code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
import os
import numpy as np
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, concatenate
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD
from keras.utils import np_utils
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, f1_score
import seaborn as sns
from keras.layers import Input, Dense, Add, Multiply
# macOS特有の警告文を非表示(GPUがないからCPUでやるときに出る)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# パラメータの初期化
classes = [
"normal cells",
"blasts",
"blasts_highSSC_granulocytes",
"blasts_highSSC_middle_ugly",
"blasts_highSSC_upper_dead",
]
num_classes = len(classes)
image_size = 66
# データの読み込み
imagefiles = np.load("imagefiles_supplementary.npz")
X_train = imagefiles['X_train']
X_test = imagefiles['X_test']
y_train = imagefiles['y_train']
y_test = imagefiles['y_test']
# グレースケール画像をCNNに入力するための次元操作
X_train = X_train.reshape((-1, image_size, image_size, 1))
X_test = X_test.reshape((-1, image_size, image_size, 1))
# データの正規化
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
# OneHotVector化する(正解ラベルの位置に1がつく)
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
def _build(_input, *nodes):
x = _input
for node in nodes:
if callable(node):
x = node(x)
elif isinstance(node, list):
x = [_build(x, branch) for branch in node]
elif isinstance(node, tuple):
x = _build(x, *node)
else:
x = node
return x
_input = Input(X_train.shape[1:])
output = _build(
_input,
# Reduction dual-path module×3の定義
# ---------------------------
# 畳み込み層の追加(96:フィルタ数)
# バッチ正規化
# 活性化関数:ReLu
# ---------------------------
# MaxPooling
# ---------------------------
# Reduction dual-path module1
[(Conv2D(96, (3, 3), strides=(2, 2)),
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
moving_mean_initializer='zeros', moving_variance_initializer='ones',
beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
gamma_constraint=None),
Activation('relu')),
MaxPooling2D(pool_size=(3, 3), strides=(2, 2))],
# Reduction dual-path module2
Add(),
[(Conv2D(96, (3, 3), strides=(2, 2)),
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
moving_mean_initializer='zeros', moving_variance_initializer='ones',
beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
gamma_constraint=None),
Activation('relu')),
MaxPooling2D(pool_size=(3, 3), strides=(2, 2))],
# Reduction dual-path module3
Add(),
[(Conv2D(96, (3, 3), strides=(2, 2)),
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
moving_mean_initializer='zeros', moving_variance_initializer='ones',
beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
gamma_constraint=None),
Activation('relu')),
MaxPooling2D(pool_size=(3, 3), strides=(2, 2))],
# Dual-path modules×10の定義
# ---------------------------
# 畳み込み層の追加(112:フィルタ数)
# バッチ正規化
# 活性化関数:ReLu
# ---------------------------
# Dual-path modules2の定義
# 畳み込み層の追加(48:フィルタ数)
# バッチ正規化
# 活性化関数:ReLu
# ---------------------------
# Dual-path modules1
Add(),
[(Conv2D(112, (1, 1), strides=(1, 1)),
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
moving_mean_initializer='zeros', moving_variance_initializer='ones',
beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
gamma_constraint=None),
Activation('relu'),
),
(Conv2D(48, (3, 3), strides=(1, 1)),
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
moving_mean_initializer='zeros', moving_variance_initializer='ones',
beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
gamma_constraint=None),
Activation('relu'),
)],
# # Dual-path modules2
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules3
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules4
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules5
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules6
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules7
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules8
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules9
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules10
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# 全結合
Add(),
[MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None),
Flatten(),
Dense(256, activation='relu'),
Dropout(0.5),
Dense(num_classes, activation='softmax')
]
)
model = Model(_input, output)
model.summary()
# # 損失関数の設定
# opt = SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False)
# model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
#
# # トレーニングの実施
# # 学習
# print("start training")
# hist = model.fit(X_train, y_train, batch_size=32, epochs=30, validation_data=(X_test, y_test))
# # 評価
# print("start eval")
# score = model.evaluate(X_test, y_test, batch_size=32, verbose=1) # verbose:途中結果表示
# print('Test Loss: ', score[0])
# print('Test Accuracy: ', score[1])
#
# model.save('leukemia_cnn_supplementary.h5')
#
# # 学習の様子をグラフへ描画
# # 正解率の推移をプロット
# fig = plt.figure()
# plt.plot(hist.history['accuracy'])
# plt.plot(hist.history['val_accuracy'])
# plt.title('Accuracy')
# plt.legend(['train', 'test'], loc='upper left')
# fig.savefig('result/cnn_supplementary/cnn_accuracy_supplementary.png')
# plt.close()
# # ロスの推移をプロット
# fig = plt.figure()
# plt.plot(hist.history['loss'])
# plt.plot(hist.history['val_loss'])
# plt.title('Loss')
# plt.legend(['train', 'test'], loc='upper left')
# fig.savefig('result/cnn_supplementary/cnn_loss_supplementary.png')
# plt.close()
# # Confusion matrix作成
# plt.figure()
# y_pred = model.predict(X_test)
# y_test = imagefiles['y_test'] # one hot vector化されているのでロードし直す
# cm = confusion_matrix(y_test, np.argmax(y_pred, axis=1))
# ticklabels = ["blasts_highSSC_granulocytes",
# "blasts_highSSC_middle_ugly",
# "blasts",
# "normal cells",
# "blasts_highSSC_upper_dead"]
# sns.heatmap(cm, annot=True, cmap='Blues', yticklabels=ticklabels, xticklabels=ticklabels)
# plt.ylabel("Correct")
# plt.xlabel("Prediction")
# plt.tight_layout()
# plt.savefig('result/cnn_supplementary/confusion_matrix_cnn_supplementary.png')
# plt.close()
#
# # F1 micro/macro
# f1_macro = f1_score(y_test, np.argmax(y_pred, axis=1), average="macro")
# f1_micro = f1_score(y_test, np.argmax(y_pred, axis=1), average="micro")
# print(f"f1_macro:{f1_macro}")
# print(f"f1_miro:{f1_micro}")
|
normal
|
{
"blob_id": "ebc050544da69837cc2b8977f347380b94474bab",
"index": 576,
"step-1": "<mask token>\n\n\ndef _build(_input, *nodes):\n x = _input\n for node in nodes:\n if callable(node):\n x = node(x)\n elif isinstance(node, list):\n x = [_build(x, branch) for branch in node]\n elif isinstance(node, tuple):\n x = _build(x, *node)\n else:\n x = node\n return x\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _build(_input, *nodes):\n x = _input\n for node in nodes:\n if callable(node):\n x = node(x)\n elif isinstance(node, list):\n x = [_build(x, branch) for branch in node]\n elif isinstance(node, tuple):\n x = _build(x, *node)\n else:\n x = node\n return x\n\n\n<mask token>\nmodel.summary()\n",
"step-3": "<mask token>\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nclasses = ['normal cells', 'blasts', 'blasts_highSSC_granulocytes',\n 'blasts_highSSC_middle_ugly', 'blasts_highSSC_upper_dead']\nnum_classes = len(classes)\nimage_size = 66\nimagefiles = np.load('imagefiles_supplementary.npz')\nX_train = imagefiles['X_train']\nX_test = imagefiles['X_test']\ny_train = imagefiles['y_train']\ny_test = imagefiles['y_test']\nX_train = X_train.reshape((-1, image_size, image_size, 1))\nX_test = X_test.reshape((-1, image_size, image_size, 1))\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\ny_train = np_utils.to_categorical(y_train, num_classes)\ny_test = np_utils.to_categorical(y_test, num_classes)\n\n\ndef _build(_input, *nodes):\n x = _input\n for node in nodes:\n if callable(node):\n x = node(x)\n elif isinstance(node, list):\n x = [_build(x, branch) for branch in node]\n elif isinstance(node, tuple):\n x = _build(x, *node)\n else:\n x = node\n return x\n\n\n_input = Input(X_train.shape[1:])\noutput = _build(_input, [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(112, (1, 1), strides=(1, 1)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), (Conv2D(48, (3, 3),\n strides=(1, 1)), BatchNormalization(axis=-1, momentum=0.99, epsilon=\n 0.001, center=True, scale=True, beta_initializer='zeros',\n gamma_initializer='ones', moving_mean_initializer='zeros',\n moving_variance_initializer='ones', beta_regularizer=None,\n gamma_regularizer=None, beta_constraint=None, gamma_constraint=None),\n Activation('relu'))], Add(), [MaxPooling2D(pool_size=(2, 2), strides=\n None, padding='valid', data_format=None), Flatten(), Dense(256,\n activation='relu'), Dropout(0.5), Dense(num_classes, activation='softmax')]\n )\nmodel = Model(_input, output)\nmodel.summary()\n",
"step-4": "import os\nimport numpy as np\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Dropout, Flatten, concatenate\nfrom keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.optimizers import SGD\nfrom keras.utils import np_utils\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix, f1_score\nimport seaborn as sns\nfrom keras.layers import Input, Dense, Add, Multiply\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nclasses = ['normal cells', 'blasts', 'blasts_highSSC_granulocytes',\n 'blasts_highSSC_middle_ugly', 'blasts_highSSC_upper_dead']\nnum_classes = len(classes)\nimage_size = 66\nimagefiles = np.load('imagefiles_supplementary.npz')\nX_train = imagefiles['X_train']\nX_test = imagefiles['X_test']\ny_train = imagefiles['y_train']\ny_test = imagefiles['y_test']\nX_train = X_train.reshape((-1, image_size, image_size, 1))\nX_test = X_test.reshape((-1, image_size, image_size, 1))\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\ny_train = np_utils.to_categorical(y_train, num_classes)\ny_test = np_utils.to_categorical(y_test, num_classes)\n\n\ndef _build(_input, *nodes):\n x = _input\n for node in nodes:\n if callable(node):\n x = node(x)\n elif isinstance(node, list):\n x = [_build(x, branch) for branch in node]\n elif isinstance(node, tuple):\n x = _build(x, *node)\n else:\n x = node\n return x\n\n\n_input = Input(X_train.shape[1:])\noutput = _build(_input, [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(112, (1, 1), strides=(1, 1)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), (Conv2D(48, (3, 3),\n strides=(1, 1)), BatchNormalization(axis=-1, momentum=0.99, epsilon=\n 0.001, center=True, scale=True, beta_initializer='zeros',\n gamma_initializer='ones', moving_mean_initializer='zeros',\n moving_variance_initializer='ones', beta_regularizer=None,\n gamma_regularizer=None, beta_constraint=None, gamma_constraint=None),\n Activation('relu'))], Add(), [MaxPooling2D(pool_size=(2, 2), strides=\n None, padding='valid', data_format=None), Flatten(), Dense(256,\n activation='relu'), Dropout(0.5), Dense(num_classes, activation='softmax')]\n )\nmodel = Model(_input, output)\nmodel.summary()\n",
"step-5": "import os\n\n\nimport numpy as np\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Dropout, Flatten, concatenate\nfrom keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.optimizers import SGD\nfrom keras.utils import np_utils\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix, f1_score\nimport seaborn as sns\nfrom keras.layers import Input, Dense, Add, Multiply\n\n# macOS特有の警告文を非表示(GPUがないからCPUでやるときに出る)\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n# パラメータの初期化\nclasses = [\n \"normal cells\",\n \"blasts\",\n \"blasts_highSSC_granulocytes\",\n \"blasts_highSSC_middle_ugly\",\n \"blasts_highSSC_upper_dead\",\n]\nnum_classes = len(classes)\nimage_size = 66\n\n# データの読み込み\nimagefiles = np.load(\"imagefiles_supplementary.npz\")\nX_train = imagefiles['X_train']\nX_test = imagefiles['X_test']\ny_train = imagefiles['y_train']\ny_test = imagefiles['y_test']\n# グレースケール画像をCNNに入力するための次元操作\nX_train = X_train.reshape((-1, image_size, image_size, 1))\nX_test = X_test.reshape((-1, image_size, image_size, 1))\n# データの正規化\nX_train = X_train.astype(\"float32\")\nX_test = X_test.astype(\"float32\")\n# OneHotVector化する(正解ラベルの位置に1がつく)\ny_train = np_utils.to_categorical(y_train, num_classes)\ny_test = np_utils.to_categorical(y_test, num_classes)\n\n\ndef _build(_input, *nodes):\n x = _input\n for node in nodes:\n if callable(node):\n x = node(x)\n elif isinstance(node, list):\n x = [_build(x, branch) for branch in node]\n elif isinstance(node, tuple):\n x = _build(x, *node)\n else:\n x = node\n return x\n\n\n_input = Input(X_train.shape[1:])\noutput = _build(\n _input,\n # Reduction dual-path module×3の定義\n # ---------------------------\n # 畳み込み層の追加(96:フィルタ数)\n # バッチ正規化\n # 活性化関数:ReLu\n # ---------------------------\n # MaxPooling\n # ---------------------------\n # Reduction dual-path module1\n [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None),\n Activation('relu')),\n MaxPooling2D(pool_size=(3, 3), strides=(2, 2))],\n # Reduction dual-path module2\n Add(),\n [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None),\n Activation('relu')),\n MaxPooling2D(pool_size=(3, 3), strides=(2, 2))],\n # Reduction dual-path module3\n Add(),\n [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None),\n Activation('relu')),\n MaxPooling2D(pool_size=(3, 3), strides=(2, 2))],\n\n # Dual-path modules×10の定義\n # ---------------------------\n # 畳み込み層の追加(112:フィルタ数)\n # バッチ正規化\n # 活性化関数:ReLu\n # ---------------------------\n # Dual-path modules2の定義\n # 畳み込み層の追加(48:フィルタ数)\n # バッチ正規化\n # 活性化関数:ReLu\n # ---------------------------\n # Dual-path modules1\n Add(),\n [(Conv2D(112, (1, 1), strides=(1, 1)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None),\n Activation('relu'),\n ),\n (Conv2D(48, (3, 3), strides=(1, 1)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None),\n Activation('relu'),\n )],\n # # Dual-path modules2\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules3\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules4\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules5\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules6\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules7\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules8\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules9\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules10\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # 全結合\n Add(),\n [MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None),\n Flatten(),\n Dense(256, activation='relu'),\n Dropout(0.5),\n Dense(num_classes, activation='softmax')\n ]\n)\nmodel = Model(_input, output)\nmodel.summary()\n\n# # 損失関数の設定\n# opt = SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False)\n# model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])\n#\n# # トレーニングの実施\n# # 学習\n# print(\"start training\")\n# hist = model.fit(X_train, y_train, batch_size=32, epochs=30, validation_data=(X_test, y_test))\n# # 評価\n# print(\"start eval\")\n# score = model.evaluate(X_test, y_test, batch_size=32, verbose=1) # verbose:途中結果表示\n# print('Test Loss: ', score[0])\n# print('Test Accuracy: ', score[1])\n#\n# model.save('leukemia_cnn_supplementary.h5')\n#\n# # 学習の様子をグラフへ描画\n# # 正解率の推移をプロット\n# fig = plt.figure()\n# plt.plot(hist.history['accuracy'])\n# plt.plot(hist.history['val_accuracy'])\n# plt.title('Accuracy')\n# plt.legend(['train', 'test'], loc='upper left')\n# fig.savefig('result/cnn_supplementary/cnn_accuracy_supplementary.png')\n# plt.close()\n# # ロスの推移をプロット\n# fig = plt.figure()\n# plt.plot(hist.history['loss'])\n# plt.plot(hist.history['val_loss'])\n# plt.title('Loss')\n# plt.legend(['train', 'test'], loc='upper left')\n# fig.savefig('result/cnn_supplementary/cnn_loss_supplementary.png')\n# plt.close()\n# # Confusion matrix作成\n# plt.figure()\n# y_pred = model.predict(X_test)\n# y_test = imagefiles['y_test'] # one hot vector化されているのでロードし直す\n# cm = confusion_matrix(y_test, np.argmax(y_pred, axis=1))\n# ticklabels = [\"blasts_highSSC_granulocytes\",\n# \"blasts_highSSC_middle_ugly\",\n# \"blasts\",\n# \"normal cells\",\n# \"blasts_highSSC_upper_dead\"]\n# sns.heatmap(cm, annot=True, cmap='Blues', yticklabels=ticklabels, xticklabels=ticklabels)\n# plt.ylabel(\"Correct\")\n# plt.xlabel(\"Prediction\")\n# plt.tight_layout()\n# plt.savefig('result/cnn_supplementary/confusion_matrix_cnn_supplementary.png')\n# plt.close()\n#\n# # F1 micro/macro\n# f1_macro = f1_score(y_test, np.argmax(y_pred, axis=1), average=\"macro\")\n# f1_micro = f1_score(y_test, np.argmax(y_pred, axis=1), average=\"micro\")\n# print(f\"f1_macro:{f1_macro}\")\n# print(f\"f1_miro:{f1_micro}\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- encoding: utf-8 -*-
import requests
import time
import random
STATS = True
INFINITE = True
VOTING_ENDPOINT = 'http://www.adressa.no/poll/vote.do'
# These are the required fields from the voting form
payload = {
"vote": "svar4",
"mentometerId": "10790638",
"publicationId": "167",
"redirectTo": "http://www.adressa.no/nyheter/trondheim/article10789480.ece?service=poll&pollId=10790638",
}
while (INFINITE):
response = requests.post(VOTING_ENDPOINT, params=payload)
json = response.json()
json['options'].sort(key=lambda x: x['votes'], reverse=True)
if (STATS):
for o in json['options']:
print unicode(o['label']) + ': ' + unicode(o['percentage']) + ' (' + unicode(o['votes']) + ')'
print "-------------------------------------------"
time.sleep(1.0 + (random.random() * 5))
|
normal
|
{
"blob_id": "1e344330b88b336598295e2a7be6a6dc57cb3d59",
"index": 8207,
"step-1": "# -*- encoding: utf-8 -*-\n\nimport requests\nimport time\nimport random\n\nSTATS = True\nINFINITE = True\nVOTING_ENDPOINT = 'http://www.adressa.no/poll/vote.do'\n\n# These are the required fields from the voting form\npayload = {\n \"vote\": \"svar4\",\n \"mentometerId\": \"10790638\",\n \"publicationId\": \"167\",\n \"redirectTo\": \"http://www.adressa.no/nyheter/trondheim/article10789480.ece?service=poll&pollId=10790638\",\n}\n\nwhile (INFINITE):\n response = requests.post(VOTING_ENDPOINT, params=payload)\n\n json = response.json()\n json['options'].sort(key=lambda x: x['votes'], reverse=True)\n\n if (STATS):\n for o in json['options']:\n print unicode(o['label']) + ': ' + unicode(o['percentage']) + ' (' + unicode(o['votes']) + ')'\n print \"-------------------------------------------\"\n\n time.sleep(1.0 + (random.random() * 5))\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
forbidden = ['Key.esc', 'Key.cmd', 'Key.cmd_r', 'Key.menu', 'Key.pause',
'Key.scroll_lock', 'Key.print_screen', 'Key.enter', 'Key.space',
'Key.backspace', 'Key.ctrl_l', 'Key.ctrl_r', 'Key.alt_l', 'Key.alt_gr',
'Key.caps_lock', 'Key.num_lock', 'Key.tab', 'Key.shift', 'Key.shift_r',
'Key.insert', 'Key.delete', 'Key.home', 'Key.end', 'Key.page_up',
'Key.page_down', '/']
dict_ = {' ': ' ', 'Key.f1': 'F1', 'Key.f2': 'F2', 'Key.f3': 'F3', 'Key.f4':
'F4', 'Key.f5': 'F5', 'Key.f6': 'F6', 'Key.f7': 'F7', 'Key.f8': 'F8',
'Key.f9': 'F9', 'Key.f10': 'F10', 'Key.f11': 'F11', 'Key.f12': 'F12',
'<96>': 'Num 0', '<97>': 'Num 1', '<98>': 'Num 2', '<99>': 'Num 3',
'<100>': 'Num 4', '<101>': 'Num 5', '<102>': 'Num 6', '<103>': 'Num 7',
'<104>': 'Num 8', '<105>': 'Num 9', '<110>': 'Num .', 'Key.up': 'Up',
'Key.down': 'Down', 'Key.left': 'Left', 'Key.right': 'Right', '\\\\': '\\'}
|
flexible
|
{
"blob_id": "995dc34ea32de4566e2804b6797d9b551b733ff3",
"index": 3406,
"step-1": "<mask token>\n",
"step-2": "forbidden = ['Key.esc', 'Key.cmd', 'Key.cmd_r', 'Key.menu', 'Key.pause',\n 'Key.scroll_lock', 'Key.print_screen', 'Key.enter', 'Key.space',\n 'Key.backspace', 'Key.ctrl_l', 'Key.ctrl_r', 'Key.alt_l', 'Key.alt_gr',\n 'Key.caps_lock', 'Key.num_lock', 'Key.tab', 'Key.shift', 'Key.shift_r',\n 'Key.insert', 'Key.delete', 'Key.home', 'Key.end', 'Key.page_up',\n 'Key.page_down', '/']\ndict_ = {' ': ' ', 'Key.f1': 'F1', 'Key.f2': 'F2', 'Key.f3': 'F3', 'Key.f4':\n 'F4', 'Key.f5': 'F5', 'Key.f6': 'F6', 'Key.f7': 'F7', 'Key.f8': 'F8',\n 'Key.f9': 'F9', 'Key.f10': 'F10', 'Key.f11': 'F11', 'Key.f12': 'F12',\n '<96>': 'Num 0', '<97>': 'Num 1', '<98>': 'Num 2', '<99>': 'Num 3',\n '<100>': 'Num 4', '<101>': 'Num 5', '<102>': 'Num 6', '<103>': 'Num 7',\n '<104>': 'Num 8', '<105>': 'Num 9', '<110>': 'Num .', 'Key.up': 'Up',\n 'Key.down': 'Down', 'Key.left': 'Left', 'Key.right': 'Right', '\\\\\\\\': '\\\\'}\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE','mkrandom.settings')
import django
django.setup()
from main.models import Character, Vehicle, Tire, Glider
char_names = [
'Mario',
'Luigi',
'Peach',
'Daisy',
'Rosalina',
'Mario Tanooki',
'Peach cat',
'Yoshi',
'Yoshi (LBlue)',
'Yoshi (Black)',
'Yoshi (Rose)',
'Yoshi (Yellow)',
'Yoshi (White)',
'Yoshi (Blue)',
'Yoshi (Rose)',
'Yoshi (Orange)',
'Toad',
'Koopa',
'Shyguy',
'Shyguy (LB)',
'Shyguy (Black)',
'Shyguy (Rose)',
'Shyguy (Yellow)',
'Shyguy (White)',
'Shyguy (Blue)',
'Shyguy (Rose)',
'Shyguy (Orange)',
'Lakitu',
'Toadette',
'Boo',
'Baby Mario',
'Baby Luigi',
'Baby Peach',
'Baby Daisy',
'Baby Rosalina',
'Metal Mario',
'Golden Mario',
'Golden Peach',
'Wario',
'Waluigi',
'Donkey Kong',
'Bowser',
'Skelerex',
'Bowser Jr',
'Dry Bowser',
'Lemmy',
'Larry',
'Wendy',
'Ludwig',
'Iggy',
'Roy',
'Morton',
'Inkling (G)',
'Inkling (B)',
'Link (SSBU)',
'Link (BOTW)',
'Villager (B)',
'Villager(G)',
'Mary',
]
char_urls = [
'https://static.wikia.nocookie.net/heros/images/9/94/Mario_and_Sonic_Tokyo_2020_Mario_artwork.png/revision/latest?cb=20210410003745&path-prefix=fr',
'https://freepngimg.com/thumb/categories/462.png',
'https://static.wikia.nocookie.net/smashbros/images/0/06/Peach_SMP.png/revision/latest?cb=20190420130956&path-prefix=fr',
'https://static.wikia.nocookie.net/mario/images/6/6c/Artwork_Daisy_MP10.png/revision/latest?cb=20171021130941&path-prefix=fr',
'https://static.wikia.nocookie.net/mario/images/1/17/Harmonie_The_Top_100.png/revision/latest?cb=20171021123917&path-prefix=fr',
'https://static.wikia.nocookie.net/mario/images/3/33/Mario_tanuki_-_SM3DL.png/revision/latest/scale-to-width-down/250?cb=20190409114830&path-prefix=fr',
'https://i.pinimg.com/originals/7d/5d/d8/7d5dd803a6eaad9e7491ed59f184eb39.png',
'https://www.seekpng.com/png/full/15-156558_ground-pound-yoshi-super-mario-yoshi-png.png',
'https://static.wikia.nocookie.net/hello-yoshi/images/f/fb/ACL_MK8_Light_Blue_Yoshi.png/revision/latest?cb=20180325192809',
'https://www.123-stickers.com/5731-6069-large/Array.jpg',
'https://static.wikia.nocookie.net/supermariorun/images/3/32/Yoshi_rouge.PNG/revision/latest?cb=20190427132857&path-prefix=fr',
'https://static.wikia.nocookie.net/supermariorun/images/9/94/Yoshi_jaune.PNG/revision/latest?cb=20190427132253&path-prefix=fr',
'https://static.wikia.nocookie.net/yoshi/images/b/b9/Yoshi_blanc.png/revision/latest?cb=20181128092526&path-prefix=fr',
'https://mario.wiki.gallery/images/thumb/9/9a/MKT_Artwork_BlueYoshi.png/129px-MKT_Artwork_BlueYoshi.png',
'https://e7.pngegg.com/pngimages/860/699/png-clipart-mario-yoshi-yoshi-s-story-super-mario-world-2-yoshi-s-island-yoshi-s-woolly-world-yoshi-s-new-island-yoshi-nintendo-computer-wallpaper.png',
'https://static.wikia.nocookie.net/yoshi/images/a/a4/Orange-yoshi-yoshi-29007923-415-479.png/revision/latest?cb=20201026191941&path-prefix=fr',
'https://static.wikia.nocookie.net/mario/images/e/e4/SMRToad.png/revision/latest?cb=20161123170829&path-prefix=fr',
'https://static.wikia.nocookie.net/smashbros/images/e/ed/Art_Koopa_NSMB.png/revision/latest?cb=20131223214127&path-prefix=fr',
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/d585815f-9fc0-440f-9949-a4a9c06bb713/db7whvu-94fc7f0d-1dea-47aa-922d-428a26ed8480.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcL2Q1ODU4MTVmLTlmYzAtNDQwZi05OTQ5LWE0YTljMDZiYjcxM1wvZGI3d2h2dS05NGZjN2YwZC0xZGVhLTQ3YWEtOTIyZC00MjhhMjZlZDg0ODAucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.iNMsbFuXa43xVer7q_c2UB65P2wAVONONt-wrMHozjo',
'https://i.pinimg.com/originals/58/69/c3/5869c3396ea69ca97c76f0b725099aa9.png',
'https://static.wikia.nocookie.net/supermarioexploration/images/8/8e/18B83E32-0819-4994-A3F8-E90CC35AB8AC.png/revision/latest/scale-to-width-down/872?cb=20180607214102',
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dw0-1d608b14-5aba-43f7-b4a8-e855207824c1.png/v1/fill/w_600,h_815,strp/super_mario__green_shy_guy_2d_by_joshuat1306_dcz4dw0-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHcwLTFkNjA4YjE0LTVhYmEtNDNmNy1iNGE4LWU4NTUyMDc4MjRjMS5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.RxuED4zTRqJT-3TAQ8iHGS6zpoDw4O4DIKFQ8cKWpSM',
'https://static.miraheze.org/drmarioworldwiki/thumb/9/9a/Cha_sub_shyguyYellow.png/144px-Cha_sub_shyguyYellow.png',
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz564x-7c505016-32d8-4268-b44e-358edcb1b10d.png/v1/fill/w_600,h_815,strp/super_mario__white_shy_guy_2d_by_joshuat1306_dcz564x-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o1NjR4LTdjNTA1MDE2LTMyZDgtNDI2OC1iNDRlLTM1OGVkY2IxYjEwZC5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.gLfujNRPJ5nNiOq-siQUD6ifo28x0oQHEB4PrpNHqFk',
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dqq-95483c93-ee74-4ca0-a820-3287359457a3.png/v1/fill/w_600,h_815,strp/super_mario__blue_shy_guy_2d_by_joshuat1306_dcz4dqq-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHFxLTk1NDgzYzkzLWVlNzQtNGNhMC1hODIwLTMyODczNTk0NTdhMy5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.w1w6wZOiQ0oxfwNTiiuFy2Ph6yO6mN99-U_HYKZdZyQ',
'https://static.wikia.nocookie.net/paper-shin-aka-keroro-gunsou/images/f/f0/Pink_Shy_Guy_dance.png/revision/latest/scale-to-width-down/250?cb=20210525165708',
'https://static.wikia.nocookie.net/fantendo/images/f/ff/ShyGuyn_s._Png/revision/latest/scale-to-width-down/250?cb=20121222235649',
'https://static.wikia.nocookie.net/fantendo/images/e/eb/Cloudless_Lakitu.png/revision/latest/scale-to-width-down/250?cb=20120809192910',
'https://static.wikia.nocookie.net/mario/images/b/b2/ToadetteMP10.png/revision/latest?cb=20190609122040&path-prefix=fr',
'https://static.wikia.nocookie.net/mario/images/a/a1/Boo_CTTT.png/revision/latest?cb=20210504081014',
'https://static.wikia.nocookie.net/videogames-fanon/images/d/d9/BabySit.png/revision/latest?cb=20120930205222',
'https://i.pinimg.com/originals/c8/4d/1f/c84d1f11741ee80b7bbda79a449917ab.png',
'https://www.pngkit.com/png/full/436-4365611_download-zip-archive-baby-peach-mario-bros.png',
'https://static.wikia.nocookie.net/fantendo/images/b/be/Baby_Daisy.png/revision/latest?cb=20210119015117',
'https://mario.wiki.gallery/images/3/33/MKT_Artwork_BabyRosalina.png',
'https://static.wikia.nocookie.net/mario/images/7/7e/Metal_Mario_Artwork_2_-_Mario_Kart_7.png/revision/latest?cb=20120513171323',
'https://static.wikia.nocookie.net/mario/images/1/10/MGWT_Gold_Mario.png/revision/latest?cb=20190317040405',
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/0e738c17-7f3c-422e-8225-f8c782b08626/deg7wos-27ff3182-82ba-43ab-b5c0-f05cbec329f2.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcLzBlNzM4YzE3LTdmM2MtNDIyZS04MjI1LWY4Yzc4MmIwODYyNlwvZGVnN3dvcy0yN2ZmMzE4Mi04MmJhLTQzYWItYjVjMC1mMDVjYmVjMzI5ZjIucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.bK3J5_NJrKn-JHsqIxEUCjBiXqM4dMnBho-b2lJ6sK8',
'https://www.smashbros.com/assets_v2/img/fighter/wario/main2.png',
'https://static.wikia.nocookie.net/wario/images/8/8a/Waluigi%28SMP%290.png/revision/latest?cb=20180929091141',
'https://static.wikia.nocookie.net/heroes-fr/images/5/5c/Donkey_Kong.png/revision/latest?cb=20201122110342&path-prefix=fr',
'https://static.wikia.nocookie.net/epicpixelbattles/images/0/0b/Bowser-png-clipart-removebg-preview.png/revision/latest?cb=20201013093525',
'https://static.wikia.nocookie.net/mario/images/1/12/MPSRSkelerex.png/revision/latest/scale-to-width-down/2000?cb=20161015183419&path-prefix=fr',
'https://static.wikia.nocookie.net/mario/images/0/07/Art_Bowser_Jr_SPM.png/revision/latest?cb=20181112222531&path-prefix=fr',
'https://mario.wiki.gallery/images/thumb/9/9d/Dry_Bowser_Artwork.png/250px-Dry_Bowser_Artwork.png',
'https://www.pngkey.com/png/full/563-5634904_super-mario-odyssey-lemmy-mario-kart-8-deluxe.png',
'https://static.wikia.nocookie.net/mariokart/images/4/42/LarryKoopa.png/revision/latest?cb=20140313170129',
'https://mario.wiki.gallery/images/thumb/9/95/NSMBW_Wendy_Artwork.png/1200px-NSMBW_Wendy_Artwork.png',
'https://static.wikia.nocookie.net/mario-fr/images/f/f6/1-1571859148.png/revision/latest?cb=20191023193229&path-prefix=fr',
'https://static.wikia.nocookie.net/mario/images/4/4c/Iggy_NSMBU.png/revision/latest?cb=20171208215237&path-prefix=fr',
'https://static.wikia.nocookie.net/mario-fr/images/f/fb/2.png/revision/latest?cb=20191023191713&path-prefix=fr',
'https://static.wikia.nocookie.net/fantendo/images/4/4f/Morton_Koopa_Jr_3D.png/revision/latest?cb=20110403192112',
'https://static.wikia.nocookie.net/mario/images/2/2e/Inkling_SSBU.png/revision/latest?cb=20200216081405',
'https://i.pinimg.com/originals/7c/ce/f8/7ccef872fcee2e11945c6799ce2985cc.png',
'https://www.seekpng.com/png/full/7-73001_link-zelda-png-super-smash-bros-for-wii.png',
'https://static.wikia.nocookie.net/versus-compendium/images/0/00/Link_BotW.png/revision/latest?cb=20181128185543',
'https://static.wikia.nocookie.net/nintendo/images/1/1d/Villager-Boy-1.png/revision/latest?cb=20150419125930&path-prefix=en',
'https://i.pinimg.com/originals/bb/ca/f7/bbcaf749d9dc2d1b1259e8fe5cb49769.png',
'https://static.wikia.nocookie.net/nintendo-univers/images/a/a9/Marie_ACAF_3.png/revision/latest?cb=20161221163100&path-prefix=fr',
]
car_names = [
'Standard Kart',
'Pipe Frame',
'Mach 8',
'Steel Driver',
'Cat Cruiser',
'Circuit Special',
'Tri-Speeder',
'Badwagon',
'Prancer',
'Biddybuggy',
'Landship',
'Sneeker',
'Sports Coupe',
'Gold Standard',
'GLA',
'W 25 Silver Arrow',
'300 SL Roadster',
'Blue Falcon',
'Tanooki Kart',
'B Dasher',
'Streetle',
'P-Wing',
'Koopa Clown',
'Standard Bike',
'Comet',
'Sport Bike',
'The Duke',
'Flame Rider',
'Varmint',
'Mr. Scooty',
'Jet Bike',
'Yoshi Bike',
'Master Cycle',
'Master Cycle Zero',
'City Tripper',
'Standard ATV',
'Wild Wiggler',
'Teddy Buggy',
'Bone Rattler',
'Splat Buggy',
'Inkstriker',
]
car_urls = [
'https://static.wikia.nocookie.net/mariokart/images/0/05/StandardKartBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20140715154926',
'https://static.wikia.nocookie.net/mariokart/images/d/d1/PipeFrameBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122932',
'https://static.wikia.nocookie.net/mariokart/images/d/df/Mach8BodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122956',
'https://static.wikia.nocookie.net/mariokart/images/9/94/Steel_Driver.png/revision/latest/scale-to-width-down/100?cb=20200925190921',
'https://static.wikia.nocookie.net/mariokart/images/f/f4/CatCruiserBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123132',
'https://static.wikia.nocookie.net/mariokart/images/6/6c/CircuitSpecialBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123237',
'https://static.wikia.nocookie.net/mariokart/images/5/56/TrispeederBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123217',
'https://static.wikia.nocookie.net/mariokart/images/c/c2/BadwagonBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123350',
'https://static.wikia.nocookie.net/mariokart/images/f/ff/PrancerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123333',
'https://static.wikia.nocookie.net/mariokart/images/4/45/BiddybuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123322',
'https://static.wikia.nocookie.net/mariokart/images/6/6d/LandshipBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123656',
'https://static.wikia.nocookie.net/mariokart/images/4/47/SneakerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123617',
'https://static.wikia.nocookie.net/mariokart/images/f/f8/SportsCoupeMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123625',
'https://static.wikia.nocookie.net/mariokart/images/3/31/MK8Gold_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102123637',
'https://static.wikia.nocookie.net/mariokart/images/c/c2/GLA-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140333',
'https://static.wikia.nocookie.net/mariokart/images/2/25/W25SilverArrow-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332',
'https://static.wikia.nocookie.net/mariokart/images/1/17/300SLRoadster-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332',
'https://static.wikia.nocookie.net/mariokart/images/e/ed/MK8_BlueFalcon.png/revision/latest/scale-to-width-down/100?cb=20150331235059',
'https://static.wikia.nocookie.net/mariokart/images/d/d7/MK8_TanookiBuggy.png/revision/latest/scale-to-width-down/100?cb=20150331235545',
'https://static.wikia.nocookie.net/mariokart/images/3/32/MK8_BDasher.png/revision/latest/scale-to-width-down/100?cb=20150401000836',
'https://static.wikia.nocookie.net/mariokart/images/c/cf/MK8Streetle.png/revision/latest/scale-to-width-down/100?cb=20150426174005',
'https://static.wikia.nocookie.net/mariokart/images/c/cd/MK8PWing.png/revision/latest/scale-to-width-down/100?cb=20150426174107',
'https://static.wikia.nocookie.net/mariokart/images/7/70/MK8DX_Koopa_Clown.png/revision/latest/scale-to-width-down/100?cb=20170704061052',
'https://static.wikia.nocookie.net/mariokart/images/8/84/StandardBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123849',
'https://static.wikia.nocookie.net/mariokart/images/0/0e/CometBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124024',
'https://static.wikia.nocookie.net/mariokart/images/f/fe/SportBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123857',
'https://static.wikia.nocookie.net/mariokart/images/8/8a/TheDukeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925174819',
'https://static.wikia.nocookie.net/mariokart/images/3/31/FlameRiderBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123942',
'https://static.wikia.nocookie.net/mariokart/images/d/d0/VarmintBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123951',
'https://static.wikia.nocookie.net/mariokart/images/1/18/MrScootyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123925',
'https://static.wikia.nocookie.net/mariokart/images/1/12/JetBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123928',
'https://static.wikia.nocookie.net/mariokart/images/6/62/YoshiBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925193256',
'https://static.wikia.nocookie.net/mariokart/images/5/52/MK8_MasterCycle.png/revision/latest/scale-to-width-down/100?cb=20150331231734',
'https://static.wikia.nocookie.net/mariokart/images/3/3e/150px-MK8D_Master_Cycle_Zero.png/revision/latest/scale-to-width-down/111?cb=20200726154936',
'https://static.wikia.nocookie.net/mariokart/images/9/90/MK8CityTripper.png/revision/latest/scale-to-width-down/100?cb=20150426175601',
'https://static.wikia.nocookie.net/mariokart/images/2/23/StandardATVBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124111',
'https://static.wikia.nocookie.net/mariokart/images/a/aa/WildWigglerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925175122',
'https://static.wikia.nocookie.net/mariokart/images/f/fa/TeddyBuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124120',
'https://static.wikia.nocookie.net/mariokart/images/0/0a/MK8BoneRattler.png/revision/latest/scale-to-width-down/100?cb=20150426180108',
'https://static.wikia.nocookie.net/mariokart/images/6/63/MK8DX_Splat_Buggy.png/revision/latest/scale-to-width-down/100?cb=20170706064814',
'https://static.wikia.nocookie.net/mariokart/images/e/eb/MK8DX_Inkstriker.png/revision/latest/scale-to-width-down/100?cb=20170706065507',
]
tire_names = [
'Standard',
'Monster',
'Roller',
'Slim',
'Slick',
'Metal',
'Button',
'Off-Road',
'Sponge',
'Wood',
'Cushion',
'Blue Standard',
'Hot Monster',
'Azure Roller',
'Crimson Slim',
'Cyber Slick',
'Retro Off-Road',
'Gold Tires',
'GLA Tires',
'Triforce Tires',
'Ancient Tyres',
'Leaf Tires',
]
tire_urls = [
'https://static.wikia.nocookie.net/mariokart/images/a/a8/StandardTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125545',
'https://static.wikia.nocookie.net/mariokart/images/2/29/MonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125541',
'https://static.wikia.nocookie.net/mariokart/images/7/76/RollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125539',
'https://static.wikia.nocookie.net/mariokart/images/f/f8/SlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125536',
'https://static.wikia.nocookie.net/mariokart/images/d/dd/SlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125542',
'https://static.wikia.nocookie.net/mariokart/images/9/96/MetalTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124533',
'https://static.wikia.nocookie.net/mariokart/images/0/07/ButtonTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124541',
'https://static.wikia.nocookie.net/mariokart/images/2/25/Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102124559',
'https://static.wikia.nocookie.net/mariokart/images/4/4c/SpongeTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124549',
'https://static.wikia.nocookie.net/mariokart/images/0/03/WoodTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124724',
'https://static.wikia.nocookie.net/mariokart/images/9/92/CushionTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124817',
'https://static.wikia.nocookie.net/mariokart/images/d/db/Blue_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102124836',
'https://static.wikia.nocookie.net/mariokart/images/d/d1/HotMonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124834',
'https://static.wikia.nocookie.net/mariokart/images/f/fe/AzureRollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20200726154338',
'https://static.wikia.nocookie.net/mariokart/images/7/71/CrimsonSlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125627',
'https://static.wikia.nocookie.net/mariokart/images/2/29/CyberSlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125626',
'https://static.wikia.nocookie.net/mariokart/images/4/48/Retro_Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102125629',
'https://static.wikia.nocookie.net/mariokart/images/5/52/Gold_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20141102125630',
'https://static.wikia.nocookie.net/mariokart/images/b/ba/GLATires-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180539',
'https://static.wikia.nocookie.net/mariokart/images/0/09/MK8_TriforceTires.png/revision/latest/scale-to-width-down/100?cb=20150331233357',
'https://static.wikia.nocookie.net/mariokart/images/d/d5/MK8D_Ancient_Tires.png/revision/latest/scale-to-width-down/100?cb=20200726154442',
'https://static.wikia.nocookie.net/mariokart/images/f/f9/Leaf_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180810',
]
glider_names = [
'Super Glider',
'Cloud Glider',
'Wario Wing',
'Waddle Wing',
'Peach Parasol',
'Parachute',
'Parafoil',
'Flower Glider',
'Bowser Kite',
'Plane Glider',
'MKTV Parafoil',
'Gold Glider',
'Hylian Kite',
'Paraglider',
'Paper Glider',
]
glider_urls = [
'https://static.wikia.nocookie.net/mariokart/images/a/a8/SuperGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125815',
'https://static.wikia.nocookie.net/mariokart/images/8/84/Cloud_Glider.png/revision/latest/scale-to-width-down/100?cb=20141102125838',
'https://static.wikia.nocookie.net/mariokart/images/a/ae/WarioWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125853',
'https://static.wikia.nocookie.net/mariokart/images/e/ef/WaddleWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125901',
'https://static.wikia.nocookie.net/mariokart/images/6/6e/PeachParasolGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125940',
'https://static.wikia.nocookie.net/mariokart/images/d/dd/ParachuteGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125823',
'https://static.wikia.nocookie.net/mariokart/images/c/c4/ParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125830',
'https://static.wikia.nocookie.net/mariokart/images/b/b3/FlowerGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125846',
'https://static.wikia.nocookie.net/mariokart/images/f/f7/BowserKiteMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125909',
'https://static.wikia.nocookie.net/mariokart/images/c/ca/PlaneGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125930',
'https://static.wikia.nocookie.net/mariokart/images/9/96/MKTVParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125947',
'https://static.wikia.nocookie.net/mariokart/images/1/18/GoldGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125956',
'https://static.wikia.nocookie.net/mariokart/images/6/62/MK8_HylianKite.png/revision/latest/scale-to-width-down/100?cb=20150331232731',
'https://static.wikia.nocookie.net/mariokart/images/3/39/MK8D_Paraglider.png/revision/latest/scale-to-width-down/117?cb=20200726155246',
'https://static.wikia.nocookie.net/mariokart/images/0/0e/PaperGliderIcon-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426181313',
]
x=0
y=0
for char in char_names:
index=x-y+1
name = char_names[x]
if "Yoshi (" in name or "Shyguy (" in name or "(G)" in name:
y+=1
index=None
new_char = Character(name=char_names[x],image_url=char_urls[x],index=index)
new_char.save()
x+=1
x=0
for tire in tire_names:
index=x+1
new_tire = Tire(name=tire_names[x],image_url=tire_urls[x],index=index)
new_tire.save()
x+=1
x=0
for car in car_names:
index=x+1
new_car = Vehicle(name=car_names[x],image_url=car_urls[x],index=index)
new_car.save()
x+=1
x=0
for glider in glider_names:
index=x+1
new_glider = Glider(name=glider_names[x],image_url=glider_urls[x],index=index)
new_glider.save()
x+=1
|
normal
|
{
"blob_id": "dbda5df7dff3f8acc320ffe7b9c7c279ebed2cc2",
"index": 7108,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mkrandom.settings')\n<mask token>\ndjango.setup()\n<mask token>\nfor char in char_names:\n index = x - y + 1\n name = char_names[x]\n if 'Yoshi (' in name or 'Shyguy (' in name or '(G)' in name:\n y += 1\n index = None\n new_char = Character(name=char_names[x], image_url=char_urls[x], index=\n index)\n new_char.save()\n x += 1\n<mask token>\nfor tire in tire_names:\n index = x + 1\n new_tire = Tire(name=tire_names[x], image_url=tire_urls[x], index=index)\n new_tire.save()\n x += 1\n<mask token>\nfor car in car_names:\n index = x + 1\n new_car = Vehicle(name=car_names[x], image_url=car_urls[x], index=index)\n new_car.save()\n x += 1\n<mask token>\nfor glider in glider_names:\n index = x + 1\n new_glider = Glider(name=glider_names[x], image_url=glider_urls[x],\n index=index)\n new_glider.save()\n x += 1\n",
"step-3": "<mask token>\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mkrandom.settings')\n<mask token>\ndjango.setup()\n<mask token>\nchar_names = ['Mario', 'Luigi', 'Peach', 'Daisy', 'Rosalina',\n 'Mario Tanooki', 'Peach cat', 'Yoshi', 'Yoshi (LBlue)', 'Yoshi (Black)',\n 'Yoshi (Rose)', 'Yoshi (Yellow)', 'Yoshi (White)', 'Yoshi (Blue)',\n 'Yoshi (Rose)', 'Yoshi (Orange)', 'Toad', 'Koopa', 'Shyguy',\n 'Shyguy (LB)', 'Shyguy (Black)', 'Shyguy (Rose)', 'Shyguy (Yellow)',\n 'Shyguy (White)', 'Shyguy (Blue)', 'Shyguy (Rose)', 'Shyguy (Orange)',\n 'Lakitu', 'Toadette', 'Boo', 'Baby Mario', 'Baby Luigi', 'Baby Peach',\n 'Baby Daisy', 'Baby Rosalina', 'Metal Mario', 'Golden Mario',\n 'Golden Peach', 'Wario', 'Waluigi', 'Donkey Kong', 'Bowser', 'Skelerex',\n 'Bowser Jr', 'Dry Bowser', 'Lemmy', 'Larry', 'Wendy', 'Ludwig', 'Iggy',\n 'Roy', 'Morton', 'Inkling (G)', 'Inkling (B)', 'Link (SSBU)',\n 'Link (BOTW)', 'Villager (B)', 'Villager(G)', 'Mary']\nchar_urls = [\n 'https://static.wikia.nocookie.net/heros/images/9/94/Mario_and_Sonic_Tokyo_2020_Mario_artwork.png/revision/latest?cb=20210410003745&path-prefix=fr'\n , 'https://freepngimg.com/thumb/categories/462.png',\n 'https://static.wikia.nocookie.net/smashbros/images/0/06/Peach_SMP.png/revision/latest?cb=20190420130956&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/6/6c/Artwork_Daisy_MP10.png/revision/latest?cb=20171021130941&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/1/17/Harmonie_The_Top_100.png/revision/latest?cb=20171021123917&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/3/33/Mario_tanuki_-_SM3DL.png/revision/latest/scale-to-width-down/250?cb=20190409114830&path-prefix=fr'\n ,\n 'https://i.pinimg.com/originals/7d/5d/d8/7d5dd803a6eaad9e7491ed59f184eb39.png'\n ,\n 'https://www.seekpng.com/png/full/15-156558_ground-pound-yoshi-super-mario-yoshi-png.png'\n ,\n 'https://static.wikia.nocookie.net/hello-yoshi/images/f/fb/ACL_MK8_Light_Blue_Yoshi.png/revision/latest?cb=20180325192809'\n , 'https://www.123-stickers.com/5731-6069-large/Array.jpg',\n 'https://static.wikia.nocookie.net/supermariorun/images/3/32/Yoshi_rouge.PNG/revision/latest?cb=20190427132857&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/supermariorun/images/9/94/Yoshi_jaune.PNG/revision/latest?cb=20190427132253&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/yoshi/images/b/b9/Yoshi_blanc.png/revision/latest?cb=20181128092526&path-prefix=fr'\n ,\n 'https://mario.wiki.gallery/images/thumb/9/9a/MKT_Artwork_BlueYoshi.png/129px-MKT_Artwork_BlueYoshi.png'\n ,\n 'https://e7.pngegg.com/pngimages/860/699/png-clipart-mario-yoshi-yoshi-s-story-super-mario-world-2-yoshi-s-island-yoshi-s-woolly-world-yoshi-s-new-island-yoshi-nintendo-computer-wallpaper.png'\n ,\n 'https://static.wikia.nocookie.net/yoshi/images/a/a4/Orange-yoshi-yoshi-29007923-415-479.png/revision/latest?cb=20201026191941&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/e/e4/SMRToad.png/revision/latest?cb=20161123170829&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/smashbros/images/e/ed/Art_Koopa_NSMB.png/revision/latest?cb=20131223214127&path-prefix=fr'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/d585815f-9fc0-440f-9949-a4a9c06bb713/db7whvu-94fc7f0d-1dea-47aa-922d-428a26ed8480.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcL2Q1ODU4MTVmLTlmYzAtNDQwZi05OTQ5LWE0YTljMDZiYjcxM1wvZGI3d2h2dS05NGZjN2YwZC0xZGVhLTQ3YWEtOTIyZC00MjhhMjZlZDg0ODAucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.iNMsbFuXa43xVer7q_c2UB65P2wAVONONt-wrMHozjo'\n ,\n 'https://i.pinimg.com/originals/58/69/c3/5869c3396ea69ca97c76f0b725099aa9.png'\n ,\n 'https://static.wikia.nocookie.net/supermarioexploration/images/8/8e/18B83E32-0819-4994-A3F8-E90CC35AB8AC.png/revision/latest/scale-to-width-down/872?cb=20180607214102'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dw0-1d608b14-5aba-43f7-b4a8-e855207824c1.png/v1/fill/w_600,h_815,strp/super_mario__green_shy_guy_2d_by_joshuat1306_dcz4dw0-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHcwLTFkNjA4YjE0LTVhYmEtNDNmNy1iNGE4LWU4NTUyMDc4MjRjMS5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.RxuED4zTRqJT-3TAQ8iHGS6zpoDw4O4DIKFQ8cKWpSM'\n ,\n 'https://static.miraheze.org/drmarioworldwiki/thumb/9/9a/Cha_sub_shyguyYellow.png/144px-Cha_sub_shyguyYellow.png'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz564x-7c505016-32d8-4268-b44e-358edcb1b10d.png/v1/fill/w_600,h_815,strp/super_mario__white_shy_guy_2d_by_joshuat1306_dcz564x-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o1NjR4LTdjNTA1MDE2LTMyZDgtNDI2OC1iNDRlLTM1OGVkY2IxYjEwZC5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.gLfujNRPJ5nNiOq-siQUD6ifo28x0oQHEB4PrpNHqFk'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dqq-95483c93-ee74-4ca0-a820-3287359457a3.png/v1/fill/w_600,h_815,strp/super_mario__blue_shy_guy_2d_by_joshuat1306_dcz4dqq-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHFxLTk1NDgzYzkzLWVlNzQtNGNhMC1hODIwLTMyODczNTk0NTdhMy5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.w1w6wZOiQ0oxfwNTiiuFy2Ph6yO6mN99-U_HYKZdZyQ'\n ,\n 'https://static.wikia.nocookie.net/paper-shin-aka-keroro-gunsou/images/f/f0/Pink_Shy_Guy_dance.png/revision/latest/scale-to-width-down/250?cb=20210525165708'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/f/ff/ShyGuyn_s._Png/revision/latest/scale-to-width-down/250?cb=20121222235649'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/e/eb/Cloudless_Lakitu.png/revision/latest/scale-to-width-down/250?cb=20120809192910'\n ,\n 'https://static.wikia.nocookie.net/mario/images/b/b2/ToadetteMP10.png/revision/latest?cb=20190609122040&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/a/a1/Boo_CTTT.png/revision/latest?cb=20210504081014'\n ,\n 'https://static.wikia.nocookie.net/videogames-fanon/images/d/d9/BabySit.png/revision/latest?cb=20120930205222'\n ,\n 'https://i.pinimg.com/originals/c8/4d/1f/c84d1f11741ee80b7bbda79a449917ab.png'\n ,\n 'https://www.pngkit.com/png/full/436-4365611_download-zip-archive-baby-peach-mario-bros.png'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/b/be/Baby_Daisy.png/revision/latest?cb=20210119015117'\n , 'https://mario.wiki.gallery/images/3/33/MKT_Artwork_BabyRosalina.png',\n 'https://static.wikia.nocookie.net/mario/images/7/7e/Metal_Mario_Artwork_2_-_Mario_Kart_7.png/revision/latest?cb=20120513171323'\n ,\n 'https://static.wikia.nocookie.net/mario/images/1/10/MGWT_Gold_Mario.png/revision/latest?cb=20190317040405'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/0e738c17-7f3c-422e-8225-f8c782b08626/deg7wos-27ff3182-82ba-43ab-b5c0-f05cbec329f2.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcLzBlNzM4YzE3LTdmM2MtNDIyZS04MjI1LWY4Yzc4MmIwODYyNlwvZGVnN3dvcy0yN2ZmMzE4Mi04MmJhLTQzYWItYjVjMC1mMDVjYmVjMzI5ZjIucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.bK3J5_NJrKn-JHsqIxEUCjBiXqM4dMnBho-b2lJ6sK8'\n , 'https://www.smashbros.com/assets_v2/img/fighter/wario/main2.png',\n 'https://static.wikia.nocookie.net/wario/images/8/8a/Waluigi%28SMP%290.png/revision/latest?cb=20180929091141'\n ,\n 'https://static.wikia.nocookie.net/heroes-fr/images/5/5c/Donkey_Kong.png/revision/latest?cb=20201122110342&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/epicpixelbattles/images/0/0b/Bowser-png-clipart-removebg-preview.png/revision/latest?cb=20201013093525'\n ,\n 'https://static.wikia.nocookie.net/mario/images/1/12/MPSRSkelerex.png/revision/latest/scale-to-width-down/2000?cb=20161015183419&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/0/07/Art_Bowser_Jr_SPM.png/revision/latest?cb=20181112222531&path-prefix=fr'\n ,\n 'https://mario.wiki.gallery/images/thumb/9/9d/Dry_Bowser_Artwork.png/250px-Dry_Bowser_Artwork.png'\n ,\n 'https://www.pngkey.com/png/full/563-5634904_super-mario-odyssey-lemmy-mario-kart-8-deluxe.png'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/42/LarryKoopa.png/revision/latest?cb=20140313170129'\n ,\n 'https://mario.wiki.gallery/images/thumb/9/95/NSMBW_Wendy_Artwork.png/1200px-NSMBW_Wendy_Artwork.png'\n ,\n 'https://static.wikia.nocookie.net/mario-fr/images/f/f6/1-1571859148.png/revision/latest?cb=20191023193229&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/4/4c/Iggy_NSMBU.png/revision/latest?cb=20171208215237&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario-fr/images/f/fb/2.png/revision/latest?cb=20191023191713&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/4/4f/Morton_Koopa_Jr_3D.png/revision/latest?cb=20110403192112'\n ,\n 'https://static.wikia.nocookie.net/mario/images/2/2e/Inkling_SSBU.png/revision/latest?cb=20200216081405'\n ,\n 'https://i.pinimg.com/originals/7c/ce/f8/7ccef872fcee2e11945c6799ce2985cc.png'\n ,\n 'https://www.seekpng.com/png/full/7-73001_link-zelda-png-super-smash-bros-for-wii.png'\n ,\n 'https://static.wikia.nocookie.net/versus-compendium/images/0/00/Link_BotW.png/revision/latest?cb=20181128185543'\n ,\n 'https://static.wikia.nocookie.net/nintendo/images/1/1d/Villager-Boy-1.png/revision/latest?cb=20150419125930&path-prefix=en'\n ,\n 'https://i.pinimg.com/originals/bb/ca/f7/bbcaf749d9dc2d1b1259e8fe5cb49769.png'\n ,\n 'https://static.wikia.nocookie.net/nintendo-univers/images/a/a9/Marie_ACAF_3.png/revision/latest?cb=20161221163100&path-prefix=fr'\n ]\ncar_names = ['Standard Kart', 'Pipe Frame', 'Mach 8', 'Steel Driver',\n 'Cat Cruiser', 'Circuit Special', 'Tri-Speeder', 'Badwagon', 'Prancer',\n 'Biddybuggy', 'Landship', 'Sneeker', 'Sports Coupe', 'Gold Standard',\n 'GLA', 'W 25 Silver Arrow', '300 SL Roadster', 'Blue Falcon',\n 'Tanooki Kart', 'B Dasher', 'Streetle', 'P-Wing', 'Koopa Clown',\n 'Standard Bike', 'Comet', 'Sport Bike', 'The Duke', 'Flame Rider',\n 'Varmint', 'Mr. Scooty', 'Jet Bike', 'Yoshi Bike', 'Master Cycle',\n 'Master Cycle Zero', 'City Tripper', 'Standard ATV', 'Wild Wiggler',\n 'Teddy Buggy', 'Bone Rattler', 'Splat Buggy', 'Inkstriker']\ncar_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/0/05/StandardKartBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20140715154926'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d1/PipeFrameBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122932'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/df/Mach8BodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122956'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/94/Steel_Driver.png/revision/latest/scale-to-width-down/100?cb=20200925190921'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f4/CatCruiserBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123132'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/6c/CircuitSpecialBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123237'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/5/56/TrispeederBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123217'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/c2/BadwagonBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123350'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/ff/PrancerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123333'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/45/BiddybuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123322'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/6d/LandshipBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123656'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/47/SneakerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123617'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f8/SportsCoupeMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123625'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/31/MK8Gold_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102123637'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/c2/GLA-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140333'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/25/W25SilverArrow-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/17/300SLRoadster-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/e/ed/MK8_BlueFalcon.png/revision/latest/scale-to-width-down/100?cb=20150331235059'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d7/MK8_TanookiBuggy.png/revision/latest/scale-to-width-down/100?cb=20150331235545'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/32/MK8_BDasher.png/revision/latest/scale-to-width-down/100?cb=20150401000836'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/cf/MK8Streetle.png/revision/latest/scale-to-width-down/100?cb=20150426174005'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/cd/MK8PWing.png/revision/latest/scale-to-width-down/100?cb=20150426174107'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/7/70/MK8DX_Koopa_Clown.png/revision/latest/scale-to-width-down/100?cb=20170704061052'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/8/84/StandardBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123849'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/0e/CometBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124024'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/fe/SportBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123857'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/8/8a/TheDukeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925174819'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/31/FlameRiderBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123942'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d0/VarmintBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123951'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/18/MrScootyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123925'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/12/JetBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123928'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/62/YoshiBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925193256'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/5/52/MK8_MasterCycle.png/revision/latest/scale-to-width-down/100?cb=20150331231734'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/3e/150px-MK8D_Master_Cycle_Zero.png/revision/latest/scale-to-width-down/111?cb=20200726154936'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/90/MK8CityTripper.png/revision/latest/scale-to-width-down/100?cb=20150426175601'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/23/StandardATVBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124111'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/a/aa/WildWigglerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925175122'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/fa/TeddyBuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124120'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/0a/MK8BoneRattler.png/revision/latest/scale-to-width-down/100?cb=20150426180108'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/63/MK8DX_Splat_Buggy.png/revision/latest/scale-to-width-down/100?cb=20170706064814'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/e/eb/MK8DX_Inkstriker.png/revision/latest/scale-to-width-down/100?cb=20170706065507'\n ]\ntire_names = ['Standard', 'Monster', 'Roller', 'Slim', 'Slick', 'Metal',\n 'Button', 'Off-Road', 'Sponge', 'Wood', 'Cushion', 'Blue Standard',\n 'Hot Monster', 'Azure Roller', 'Crimson Slim', 'Cyber Slick',\n 'Retro Off-Road', 'Gold Tires', 'GLA Tires', 'Triforce Tires',\n 'Ancient Tyres', 'Leaf Tires']\ntire_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/a/a8/StandardTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125545'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/29/MonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125541'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/7/76/RollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125539'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f8/SlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125536'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/dd/SlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125542'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/96/MetalTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124533'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/07/ButtonTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124541'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/25/Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102124559'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/4c/SpongeTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124549'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/03/WoodTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124724'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/92/CushionTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124817'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/db/Blue_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102124836'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d1/HotMonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124834'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/fe/AzureRollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20200726154338'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/7/71/CrimsonSlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125627'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/29/CyberSlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125626'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/48/Retro_Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102125629'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/5/52/Gold_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20141102125630'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/b/ba/GLATires-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180539'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/09/MK8_TriforceTires.png/revision/latest/scale-to-width-down/100?cb=20150331233357'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d5/MK8D_Ancient_Tires.png/revision/latest/scale-to-width-down/100?cb=20200726154442'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f9/Leaf_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180810'\n ]\nglider_names = ['Super Glider', 'Cloud Glider', 'Wario Wing', 'Waddle Wing',\n 'Peach Parasol', 'Parachute', 'Parafoil', 'Flower Glider',\n 'Bowser Kite', 'Plane Glider', 'MKTV Parafoil', 'Gold Glider',\n 'Hylian Kite', 'Paraglider', 'Paper Glider']\nglider_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/a/a8/SuperGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125815'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/8/84/Cloud_Glider.png/revision/latest/scale-to-width-down/100?cb=20141102125838'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/a/ae/WarioWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125853'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/e/ef/WaddleWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125901'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/6e/PeachParasolGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125940'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/dd/ParachuteGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125823'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/c4/ParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125830'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/b/b3/FlowerGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125846'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f7/BowserKiteMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125909'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/ca/PlaneGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125930'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/96/MKTVParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125947'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/18/GoldGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125956'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/62/MK8_HylianKite.png/revision/latest/scale-to-width-down/100?cb=20150331232731'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/39/MK8D_Paraglider.png/revision/latest/scale-to-width-down/117?cb=20200726155246'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/0e/PaperGliderIcon-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426181313'\n ]\nx = 0\ny = 0\nfor char in char_names:\n index = x - y + 1\n name = char_names[x]\n if 'Yoshi (' in name or 'Shyguy (' in name or '(G)' in name:\n y += 1\n index = None\n new_char = Character(name=char_names[x], image_url=char_urls[x], index=\n index)\n new_char.save()\n x += 1\nx = 0\nfor tire in tire_names:\n index = x + 1\n new_tire = Tire(name=tire_names[x], image_url=tire_urls[x], index=index)\n new_tire.save()\n x += 1\nx = 0\nfor car in car_names:\n index = x + 1\n new_car = Vehicle(name=car_names[x], image_url=car_urls[x], index=index)\n new_car.save()\n x += 1\nx = 0\nfor glider in glider_names:\n index = x + 1\n new_glider = Glider(name=glider_names[x], image_url=glider_urls[x],\n index=index)\n new_glider.save()\n x += 1\n",
"step-4": "import os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mkrandom.settings')\nimport django\ndjango.setup()\nfrom main.models import Character, Vehicle, Tire, Glider\nchar_names = ['Mario', 'Luigi', 'Peach', 'Daisy', 'Rosalina',\n 'Mario Tanooki', 'Peach cat', 'Yoshi', 'Yoshi (LBlue)', 'Yoshi (Black)',\n 'Yoshi (Rose)', 'Yoshi (Yellow)', 'Yoshi (White)', 'Yoshi (Blue)',\n 'Yoshi (Rose)', 'Yoshi (Orange)', 'Toad', 'Koopa', 'Shyguy',\n 'Shyguy (LB)', 'Shyguy (Black)', 'Shyguy (Rose)', 'Shyguy (Yellow)',\n 'Shyguy (White)', 'Shyguy (Blue)', 'Shyguy (Rose)', 'Shyguy (Orange)',\n 'Lakitu', 'Toadette', 'Boo', 'Baby Mario', 'Baby Luigi', 'Baby Peach',\n 'Baby Daisy', 'Baby Rosalina', 'Metal Mario', 'Golden Mario',\n 'Golden Peach', 'Wario', 'Waluigi', 'Donkey Kong', 'Bowser', 'Skelerex',\n 'Bowser Jr', 'Dry Bowser', 'Lemmy', 'Larry', 'Wendy', 'Ludwig', 'Iggy',\n 'Roy', 'Morton', 'Inkling (G)', 'Inkling (B)', 'Link (SSBU)',\n 'Link (BOTW)', 'Villager (B)', 'Villager(G)', 'Mary']\nchar_urls = [\n 'https://static.wikia.nocookie.net/heros/images/9/94/Mario_and_Sonic_Tokyo_2020_Mario_artwork.png/revision/latest?cb=20210410003745&path-prefix=fr'\n , 'https://freepngimg.com/thumb/categories/462.png',\n 'https://static.wikia.nocookie.net/smashbros/images/0/06/Peach_SMP.png/revision/latest?cb=20190420130956&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/6/6c/Artwork_Daisy_MP10.png/revision/latest?cb=20171021130941&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/1/17/Harmonie_The_Top_100.png/revision/latest?cb=20171021123917&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/3/33/Mario_tanuki_-_SM3DL.png/revision/latest/scale-to-width-down/250?cb=20190409114830&path-prefix=fr'\n ,\n 'https://i.pinimg.com/originals/7d/5d/d8/7d5dd803a6eaad9e7491ed59f184eb39.png'\n ,\n 'https://www.seekpng.com/png/full/15-156558_ground-pound-yoshi-super-mario-yoshi-png.png'\n ,\n 'https://static.wikia.nocookie.net/hello-yoshi/images/f/fb/ACL_MK8_Light_Blue_Yoshi.png/revision/latest?cb=20180325192809'\n , 'https://www.123-stickers.com/5731-6069-large/Array.jpg',\n 'https://static.wikia.nocookie.net/supermariorun/images/3/32/Yoshi_rouge.PNG/revision/latest?cb=20190427132857&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/supermariorun/images/9/94/Yoshi_jaune.PNG/revision/latest?cb=20190427132253&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/yoshi/images/b/b9/Yoshi_blanc.png/revision/latest?cb=20181128092526&path-prefix=fr'\n ,\n 'https://mario.wiki.gallery/images/thumb/9/9a/MKT_Artwork_BlueYoshi.png/129px-MKT_Artwork_BlueYoshi.png'\n ,\n 'https://e7.pngegg.com/pngimages/860/699/png-clipart-mario-yoshi-yoshi-s-story-super-mario-world-2-yoshi-s-island-yoshi-s-woolly-world-yoshi-s-new-island-yoshi-nintendo-computer-wallpaper.png'\n ,\n 'https://static.wikia.nocookie.net/yoshi/images/a/a4/Orange-yoshi-yoshi-29007923-415-479.png/revision/latest?cb=20201026191941&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/e/e4/SMRToad.png/revision/latest?cb=20161123170829&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/smashbros/images/e/ed/Art_Koopa_NSMB.png/revision/latest?cb=20131223214127&path-prefix=fr'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/d585815f-9fc0-440f-9949-a4a9c06bb713/db7whvu-94fc7f0d-1dea-47aa-922d-428a26ed8480.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcL2Q1ODU4MTVmLTlmYzAtNDQwZi05OTQ5LWE0YTljMDZiYjcxM1wvZGI3d2h2dS05NGZjN2YwZC0xZGVhLTQ3YWEtOTIyZC00MjhhMjZlZDg0ODAucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.iNMsbFuXa43xVer7q_c2UB65P2wAVONONt-wrMHozjo'\n ,\n 'https://i.pinimg.com/originals/58/69/c3/5869c3396ea69ca97c76f0b725099aa9.png'\n ,\n 'https://static.wikia.nocookie.net/supermarioexploration/images/8/8e/18B83E32-0819-4994-A3F8-E90CC35AB8AC.png/revision/latest/scale-to-width-down/872?cb=20180607214102'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dw0-1d608b14-5aba-43f7-b4a8-e855207824c1.png/v1/fill/w_600,h_815,strp/super_mario__green_shy_guy_2d_by_joshuat1306_dcz4dw0-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHcwLTFkNjA4YjE0LTVhYmEtNDNmNy1iNGE4LWU4NTUyMDc4MjRjMS5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.RxuED4zTRqJT-3TAQ8iHGS6zpoDw4O4DIKFQ8cKWpSM'\n ,\n 'https://static.miraheze.org/drmarioworldwiki/thumb/9/9a/Cha_sub_shyguyYellow.png/144px-Cha_sub_shyguyYellow.png'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz564x-7c505016-32d8-4268-b44e-358edcb1b10d.png/v1/fill/w_600,h_815,strp/super_mario__white_shy_guy_2d_by_joshuat1306_dcz564x-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o1NjR4LTdjNTA1MDE2LTMyZDgtNDI2OC1iNDRlLTM1OGVkY2IxYjEwZC5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.gLfujNRPJ5nNiOq-siQUD6ifo28x0oQHEB4PrpNHqFk'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dqq-95483c93-ee74-4ca0-a820-3287359457a3.png/v1/fill/w_600,h_815,strp/super_mario__blue_shy_guy_2d_by_joshuat1306_dcz4dqq-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHFxLTk1NDgzYzkzLWVlNzQtNGNhMC1hODIwLTMyODczNTk0NTdhMy5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.w1w6wZOiQ0oxfwNTiiuFy2Ph6yO6mN99-U_HYKZdZyQ'\n ,\n 'https://static.wikia.nocookie.net/paper-shin-aka-keroro-gunsou/images/f/f0/Pink_Shy_Guy_dance.png/revision/latest/scale-to-width-down/250?cb=20210525165708'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/f/ff/ShyGuyn_s._Png/revision/latest/scale-to-width-down/250?cb=20121222235649'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/e/eb/Cloudless_Lakitu.png/revision/latest/scale-to-width-down/250?cb=20120809192910'\n ,\n 'https://static.wikia.nocookie.net/mario/images/b/b2/ToadetteMP10.png/revision/latest?cb=20190609122040&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/a/a1/Boo_CTTT.png/revision/latest?cb=20210504081014'\n ,\n 'https://static.wikia.nocookie.net/videogames-fanon/images/d/d9/BabySit.png/revision/latest?cb=20120930205222'\n ,\n 'https://i.pinimg.com/originals/c8/4d/1f/c84d1f11741ee80b7bbda79a449917ab.png'\n ,\n 'https://www.pngkit.com/png/full/436-4365611_download-zip-archive-baby-peach-mario-bros.png'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/b/be/Baby_Daisy.png/revision/latest?cb=20210119015117'\n , 'https://mario.wiki.gallery/images/3/33/MKT_Artwork_BabyRosalina.png',\n 'https://static.wikia.nocookie.net/mario/images/7/7e/Metal_Mario_Artwork_2_-_Mario_Kart_7.png/revision/latest?cb=20120513171323'\n ,\n 'https://static.wikia.nocookie.net/mario/images/1/10/MGWT_Gold_Mario.png/revision/latest?cb=20190317040405'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/0e738c17-7f3c-422e-8225-f8c782b08626/deg7wos-27ff3182-82ba-43ab-b5c0-f05cbec329f2.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcLzBlNzM4YzE3LTdmM2MtNDIyZS04MjI1LWY4Yzc4MmIwODYyNlwvZGVnN3dvcy0yN2ZmMzE4Mi04MmJhLTQzYWItYjVjMC1mMDVjYmVjMzI5ZjIucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.bK3J5_NJrKn-JHsqIxEUCjBiXqM4dMnBho-b2lJ6sK8'\n , 'https://www.smashbros.com/assets_v2/img/fighter/wario/main2.png',\n 'https://static.wikia.nocookie.net/wario/images/8/8a/Waluigi%28SMP%290.png/revision/latest?cb=20180929091141'\n ,\n 'https://static.wikia.nocookie.net/heroes-fr/images/5/5c/Donkey_Kong.png/revision/latest?cb=20201122110342&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/epicpixelbattles/images/0/0b/Bowser-png-clipart-removebg-preview.png/revision/latest?cb=20201013093525'\n ,\n 'https://static.wikia.nocookie.net/mario/images/1/12/MPSRSkelerex.png/revision/latest/scale-to-width-down/2000?cb=20161015183419&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/0/07/Art_Bowser_Jr_SPM.png/revision/latest?cb=20181112222531&path-prefix=fr'\n ,\n 'https://mario.wiki.gallery/images/thumb/9/9d/Dry_Bowser_Artwork.png/250px-Dry_Bowser_Artwork.png'\n ,\n 'https://www.pngkey.com/png/full/563-5634904_super-mario-odyssey-lemmy-mario-kart-8-deluxe.png'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/42/LarryKoopa.png/revision/latest?cb=20140313170129'\n ,\n 'https://mario.wiki.gallery/images/thumb/9/95/NSMBW_Wendy_Artwork.png/1200px-NSMBW_Wendy_Artwork.png'\n ,\n 'https://static.wikia.nocookie.net/mario-fr/images/f/f6/1-1571859148.png/revision/latest?cb=20191023193229&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/4/4c/Iggy_NSMBU.png/revision/latest?cb=20171208215237&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario-fr/images/f/fb/2.png/revision/latest?cb=20191023191713&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/4/4f/Morton_Koopa_Jr_3D.png/revision/latest?cb=20110403192112'\n ,\n 'https://static.wikia.nocookie.net/mario/images/2/2e/Inkling_SSBU.png/revision/latest?cb=20200216081405'\n ,\n 'https://i.pinimg.com/originals/7c/ce/f8/7ccef872fcee2e11945c6799ce2985cc.png'\n ,\n 'https://www.seekpng.com/png/full/7-73001_link-zelda-png-super-smash-bros-for-wii.png'\n ,\n 'https://static.wikia.nocookie.net/versus-compendium/images/0/00/Link_BotW.png/revision/latest?cb=20181128185543'\n ,\n 'https://static.wikia.nocookie.net/nintendo/images/1/1d/Villager-Boy-1.png/revision/latest?cb=20150419125930&path-prefix=en'\n ,\n 'https://i.pinimg.com/originals/bb/ca/f7/bbcaf749d9dc2d1b1259e8fe5cb49769.png'\n ,\n 'https://static.wikia.nocookie.net/nintendo-univers/images/a/a9/Marie_ACAF_3.png/revision/latest?cb=20161221163100&path-prefix=fr'\n ]\ncar_names = ['Standard Kart', 'Pipe Frame', 'Mach 8', 'Steel Driver',\n 'Cat Cruiser', 'Circuit Special', 'Tri-Speeder', 'Badwagon', 'Prancer',\n 'Biddybuggy', 'Landship', 'Sneeker', 'Sports Coupe', 'Gold Standard',\n 'GLA', 'W 25 Silver Arrow', '300 SL Roadster', 'Blue Falcon',\n 'Tanooki Kart', 'B Dasher', 'Streetle', 'P-Wing', 'Koopa Clown',\n 'Standard Bike', 'Comet', 'Sport Bike', 'The Duke', 'Flame Rider',\n 'Varmint', 'Mr. Scooty', 'Jet Bike', 'Yoshi Bike', 'Master Cycle',\n 'Master Cycle Zero', 'City Tripper', 'Standard ATV', 'Wild Wiggler',\n 'Teddy Buggy', 'Bone Rattler', 'Splat Buggy', 'Inkstriker']\ncar_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/0/05/StandardKartBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20140715154926'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d1/PipeFrameBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122932'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/df/Mach8BodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122956'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/94/Steel_Driver.png/revision/latest/scale-to-width-down/100?cb=20200925190921'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f4/CatCruiserBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123132'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/6c/CircuitSpecialBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123237'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/5/56/TrispeederBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123217'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/c2/BadwagonBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123350'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/ff/PrancerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123333'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/45/BiddybuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123322'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/6d/LandshipBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123656'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/47/SneakerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123617'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f8/SportsCoupeMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123625'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/31/MK8Gold_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102123637'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/c2/GLA-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140333'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/25/W25SilverArrow-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/17/300SLRoadster-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/e/ed/MK8_BlueFalcon.png/revision/latest/scale-to-width-down/100?cb=20150331235059'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d7/MK8_TanookiBuggy.png/revision/latest/scale-to-width-down/100?cb=20150331235545'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/32/MK8_BDasher.png/revision/latest/scale-to-width-down/100?cb=20150401000836'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/cf/MK8Streetle.png/revision/latest/scale-to-width-down/100?cb=20150426174005'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/cd/MK8PWing.png/revision/latest/scale-to-width-down/100?cb=20150426174107'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/7/70/MK8DX_Koopa_Clown.png/revision/latest/scale-to-width-down/100?cb=20170704061052'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/8/84/StandardBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123849'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/0e/CometBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124024'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/fe/SportBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123857'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/8/8a/TheDukeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925174819'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/31/FlameRiderBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123942'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d0/VarmintBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123951'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/18/MrScootyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123925'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/12/JetBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123928'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/62/YoshiBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925193256'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/5/52/MK8_MasterCycle.png/revision/latest/scale-to-width-down/100?cb=20150331231734'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/3e/150px-MK8D_Master_Cycle_Zero.png/revision/latest/scale-to-width-down/111?cb=20200726154936'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/90/MK8CityTripper.png/revision/latest/scale-to-width-down/100?cb=20150426175601'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/23/StandardATVBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124111'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/a/aa/WildWigglerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925175122'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/fa/TeddyBuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124120'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/0a/MK8BoneRattler.png/revision/latest/scale-to-width-down/100?cb=20150426180108'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/63/MK8DX_Splat_Buggy.png/revision/latest/scale-to-width-down/100?cb=20170706064814'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/e/eb/MK8DX_Inkstriker.png/revision/latest/scale-to-width-down/100?cb=20170706065507'\n ]\ntire_names = ['Standard', 'Monster', 'Roller', 'Slim', 'Slick', 'Metal',\n 'Button', 'Off-Road', 'Sponge', 'Wood', 'Cushion', 'Blue Standard',\n 'Hot Monster', 'Azure Roller', 'Crimson Slim', 'Cyber Slick',\n 'Retro Off-Road', 'Gold Tires', 'GLA Tires', 'Triforce Tires',\n 'Ancient Tyres', 'Leaf Tires']\ntire_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/a/a8/StandardTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125545'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/29/MonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125541'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/7/76/RollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125539'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f8/SlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125536'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/dd/SlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125542'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/96/MetalTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124533'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/07/ButtonTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124541'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/25/Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102124559'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/4c/SpongeTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124549'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/03/WoodTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124724'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/92/CushionTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124817'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/db/Blue_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102124836'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d1/HotMonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124834'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/fe/AzureRollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20200726154338'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/7/71/CrimsonSlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125627'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/29/CyberSlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125626'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/48/Retro_Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102125629'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/5/52/Gold_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20141102125630'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/b/ba/GLATires-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180539'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/09/MK8_TriforceTires.png/revision/latest/scale-to-width-down/100?cb=20150331233357'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d5/MK8D_Ancient_Tires.png/revision/latest/scale-to-width-down/100?cb=20200726154442'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f9/Leaf_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180810'\n ]\nglider_names = ['Super Glider', 'Cloud Glider', 'Wario Wing', 'Waddle Wing',\n 'Peach Parasol', 'Parachute', 'Parafoil', 'Flower Glider',\n 'Bowser Kite', 'Plane Glider', 'MKTV Parafoil', 'Gold Glider',\n 'Hylian Kite', 'Paraglider', 'Paper Glider']\nglider_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/a/a8/SuperGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125815'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/8/84/Cloud_Glider.png/revision/latest/scale-to-width-down/100?cb=20141102125838'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/a/ae/WarioWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125853'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/e/ef/WaddleWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125901'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/6e/PeachParasolGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125940'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/dd/ParachuteGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125823'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/c4/ParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125830'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/b/b3/FlowerGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125846'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f7/BowserKiteMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125909'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/ca/PlaneGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125930'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/96/MKTVParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125947'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/18/GoldGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125956'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/62/MK8_HylianKite.png/revision/latest/scale-to-width-down/100?cb=20150331232731'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/39/MK8D_Paraglider.png/revision/latest/scale-to-width-down/117?cb=20200726155246'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/0e/PaperGliderIcon-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426181313'\n ]\nx = 0\ny = 0\nfor char in char_names:\n index = x - y + 1\n name = char_names[x]\n if 'Yoshi (' in name or 'Shyguy (' in name or '(G)' in name:\n y += 1\n index = None\n new_char = Character(name=char_names[x], image_url=char_urls[x], index=\n index)\n new_char.save()\n x += 1\nx = 0\nfor tire in tire_names:\n index = x + 1\n new_tire = Tire(name=tire_names[x], image_url=tire_urls[x], index=index)\n new_tire.save()\n x += 1\nx = 0\nfor car in car_names:\n index = x + 1\n new_car = Vehicle(name=car_names[x], image_url=car_urls[x], index=index)\n new_car.save()\n x += 1\nx = 0\nfor glider in glider_names:\n index = x + 1\n new_glider = Glider(name=glider_names[x], image_url=glider_urls[x],\n index=index)\n new_glider.save()\n x += 1\n",
"step-5": "import os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE','mkrandom.settings')\n\nimport django\ndjango.setup()\nfrom main.models import Character, Vehicle, Tire, Glider\nchar_names = [\n 'Mario',\n 'Luigi',\n 'Peach',\n 'Daisy',\n 'Rosalina',\n 'Mario Tanooki',\n 'Peach cat',\n 'Yoshi',\n 'Yoshi (LBlue)',\n 'Yoshi (Black)',\n 'Yoshi (Rose)',\n 'Yoshi (Yellow)',\n 'Yoshi (White)',\n 'Yoshi (Blue)',\n 'Yoshi (Rose)',\n 'Yoshi (Orange)',\n 'Toad',\n 'Koopa',\n 'Shyguy',\n 'Shyguy (LB)',\n 'Shyguy (Black)',\n 'Shyguy (Rose)',\n 'Shyguy (Yellow)',\n 'Shyguy (White)',\n 'Shyguy (Blue)',\n 'Shyguy (Rose)',\n 'Shyguy (Orange)',\n 'Lakitu',\n 'Toadette',\n 'Boo',\n 'Baby Mario',\n 'Baby Luigi',\n 'Baby Peach',\n 'Baby Daisy',\n 'Baby Rosalina',\n 'Metal Mario',\n 'Golden Mario',\n 'Golden Peach',\n 'Wario',\n 'Waluigi',\n 'Donkey Kong',\n 'Bowser',\n 'Skelerex',\n 'Bowser Jr',\n 'Dry Bowser',\n 'Lemmy',\n 'Larry',\n 'Wendy',\n 'Ludwig',\n 'Iggy',\n 'Roy',\n 'Morton',\n 'Inkling (G)',\n 'Inkling (B)',\n 'Link (SSBU)',\n 'Link (BOTW)',\n 'Villager (B)',\n 'Villager(G)',\n 'Mary',\n]\n\nchar_urls = [\n 'https://static.wikia.nocookie.net/heros/images/9/94/Mario_and_Sonic_Tokyo_2020_Mario_artwork.png/revision/latest?cb=20210410003745&path-prefix=fr',\n 'https://freepngimg.com/thumb/categories/462.png',\n 'https://static.wikia.nocookie.net/smashbros/images/0/06/Peach_SMP.png/revision/latest?cb=20190420130956&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario/images/6/6c/Artwork_Daisy_MP10.png/revision/latest?cb=20171021130941&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario/images/1/17/Harmonie_The_Top_100.png/revision/latest?cb=20171021123917&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario/images/3/33/Mario_tanuki_-_SM3DL.png/revision/latest/scale-to-width-down/250?cb=20190409114830&path-prefix=fr',\n 'https://i.pinimg.com/originals/7d/5d/d8/7d5dd803a6eaad9e7491ed59f184eb39.png',\n 'https://www.seekpng.com/png/full/15-156558_ground-pound-yoshi-super-mario-yoshi-png.png',\n 'https://static.wikia.nocookie.net/hello-yoshi/images/f/fb/ACL_MK8_Light_Blue_Yoshi.png/revision/latest?cb=20180325192809',\n 'https://www.123-stickers.com/5731-6069-large/Array.jpg',\n 'https://static.wikia.nocookie.net/supermariorun/images/3/32/Yoshi_rouge.PNG/revision/latest?cb=20190427132857&path-prefix=fr',\n 'https://static.wikia.nocookie.net/supermariorun/images/9/94/Yoshi_jaune.PNG/revision/latest?cb=20190427132253&path-prefix=fr',\n 'https://static.wikia.nocookie.net/yoshi/images/b/b9/Yoshi_blanc.png/revision/latest?cb=20181128092526&path-prefix=fr',\n 'https://mario.wiki.gallery/images/thumb/9/9a/MKT_Artwork_BlueYoshi.png/129px-MKT_Artwork_BlueYoshi.png',\n 'https://e7.pngegg.com/pngimages/860/699/png-clipart-mario-yoshi-yoshi-s-story-super-mario-world-2-yoshi-s-island-yoshi-s-woolly-world-yoshi-s-new-island-yoshi-nintendo-computer-wallpaper.png',\n 'https://static.wikia.nocookie.net/yoshi/images/a/a4/Orange-yoshi-yoshi-29007923-415-479.png/revision/latest?cb=20201026191941&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario/images/e/e4/SMRToad.png/revision/latest?cb=20161123170829&path-prefix=fr',\n 'https://static.wikia.nocookie.net/smashbros/images/e/ed/Art_Koopa_NSMB.png/revision/latest?cb=20131223214127&path-prefix=fr',\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/d585815f-9fc0-440f-9949-a4a9c06bb713/db7whvu-94fc7f0d-1dea-47aa-922d-428a26ed8480.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcL2Q1ODU4MTVmLTlmYzAtNDQwZi05OTQ5LWE0YTljMDZiYjcxM1wvZGI3d2h2dS05NGZjN2YwZC0xZGVhLTQ3YWEtOTIyZC00MjhhMjZlZDg0ODAucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.iNMsbFuXa43xVer7q_c2UB65P2wAVONONt-wrMHozjo',\n 'https://i.pinimg.com/originals/58/69/c3/5869c3396ea69ca97c76f0b725099aa9.png',\n 'https://static.wikia.nocookie.net/supermarioexploration/images/8/8e/18B83E32-0819-4994-A3F8-E90CC35AB8AC.png/revision/latest/scale-to-width-down/872?cb=20180607214102',\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dw0-1d608b14-5aba-43f7-b4a8-e855207824c1.png/v1/fill/w_600,h_815,strp/super_mario__green_shy_guy_2d_by_joshuat1306_dcz4dw0-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHcwLTFkNjA4YjE0LTVhYmEtNDNmNy1iNGE4LWU4NTUyMDc4MjRjMS5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.RxuED4zTRqJT-3TAQ8iHGS6zpoDw4O4DIKFQ8cKWpSM',\n 'https://static.miraheze.org/drmarioworldwiki/thumb/9/9a/Cha_sub_shyguyYellow.png/144px-Cha_sub_shyguyYellow.png',\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz564x-7c505016-32d8-4268-b44e-358edcb1b10d.png/v1/fill/w_600,h_815,strp/super_mario__white_shy_guy_2d_by_joshuat1306_dcz564x-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o1NjR4LTdjNTA1MDE2LTMyZDgtNDI2OC1iNDRlLTM1OGVkY2IxYjEwZC5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.gLfujNRPJ5nNiOq-siQUD6ifo28x0oQHEB4PrpNHqFk',\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dqq-95483c93-ee74-4ca0-a820-3287359457a3.png/v1/fill/w_600,h_815,strp/super_mario__blue_shy_guy_2d_by_joshuat1306_dcz4dqq-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHFxLTk1NDgzYzkzLWVlNzQtNGNhMC1hODIwLTMyODczNTk0NTdhMy5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.w1w6wZOiQ0oxfwNTiiuFy2Ph6yO6mN99-U_HYKZdZyQ',\n 'https://static.wikia.nocookie.net/paper-shin-aka-keroro-gunsou/images/f/f0/Pink_Shy_Guy_dance.png/revision/latest/scale-to-width-down/250?cb=20210525165708',\n 'https://static.wikia.nocookie.net/fantendo/images/f/ff/ShyGuyn_s._Png/revision/latest/scale-to-width-down/250?cb=20121222235649',\n 'https://static.wikia.nocookie.net/fantendo/images/e/eb/Cloudless_Lakitu.png/revision/latest/scale-to-width-down/250?cb=20120809192910',\n 'https://static.wikia.nocookie.net/mario/images/b/b2/ToadetteMP10.png/revision/latest?cb=20190609122040&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario/images/a/a1/Boo_CTTT.png/revision/latest?cb=20210504081014',\n 'https://static.wikia.nocookie.net/videogames-fanon/images/d/d9/BabySit.png/revision/latest?cb=20120930205222',\n 'https://i.pinimg.com/originals/c8/4d/1f/c84d1f11741ee80b7bbda79a449917ab.png',\n 'https://www.pngkit.com/png/full/436-4365611_download-zip-archive-baby-peach-mario-bros.png',\n 'https://static.wikia.nocookie.net/fantendo/images/b/be/Baby_Daisy.png/revision/latest?cb=20210119015117',\n 'https://mario.wiki.gallery/images/3/33/MKT_Artwork_BabyRosalina.png',\n 'https://static.wikia.nocookie.net/mario/images/7/7e/Metal_Mario_Artwork_2_-_Mario_Kart_7.png/revision/latest?cb=20120513171323',\n 'https://static.wikia.nocookie.net/mario/images/1/10/MGWT_Gold_Mario.png/revision/latest?cb=20190317040405',\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/0e738c17-7f3c-422e-8225-f8c782b08626/deg7wos-27ff3182-82ba-43ab-b5c0-f05cbec329f2.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcLzBlNzM4YzE3LTdmM2MtNDIyZS04MjI1LWY4Yzc4MmIwODYyNlwvZGVnN3dvcy0yN2ZmMzE4Mi04MmJhLTQzYWItYjVjMC1mMDVjYmVjMzI5ZjIucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.bK3J5_NJrKn-JHsqIxEUCjBiXqM4dMnBho-b2lJ6sK8',\n 'https://www.smashbros.com/assets_v2/img/fighter/wario/main2.png',\n 'https://static.wikia.nocookie.net/wario/images/8/8a/Waluigi%28SMP%290.png/revision/latest?cb=20180929091141',\n 'https://static.wikia.nocookie.net/heroes-fr/images/5/5c/Donkey_Kong.png/revision/latest?cb=20201122110342&path-prefix=fr',\n 'https://static.wikia.nocookie.net/epicpixelbattles/images/0/0b/Bowser-png-clipart-removebg-preview.png/revision/latest?cb=20201013093525',\n 'https://static.wikia.nocookie.net/mario/images/1/12/MPSRSkelerex.png/revision/latest/scale-to-width-down/2000?cb=20161015183419&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario/images/0/07/Art_Bowser_Jr_SPM.png/revision/latest?cb=20181112222531&path-prefix=fr',\n 'https://mario.wiki.gallery/images/thumb/9/9d/Dry_Bowser_Artwork.png/250px-Dry_Bowser_Artwork.png',\n 'https://www.pngkey.com/png/full/563-5634904_super-mario-odyssey-lemmy-mario-kart-8-deluxe.png',\n 'https://static.wikia.nocookie.net/mariokart/images/4/42/LarryKoopa.png/revision/latest?cb=20140313170129',\n 'https://mario.wiki.gallery/images/thumb/9/95/NSMBW_Wendy_Artwork.png/1200px-NSMBW_Wendy_Artwork.png',\n 'https://static.wikia.nocookie.net/mario-fr/images/f/f6/1-1571859148.png/revision/latest?cb=20191023193229&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario/images/4/4c/Iggy_NSMBU.png/revision/latest?cb=20171208215237&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario-fr/images/f/fb/2.png/revision/latest?cb=20191023191713&path-prefix=fr',\n 'https://static.wikia.nocookie.net/fantendo/images/4/4f/Morton_Koopa_Jr_3D.png/revision/latest?cb=20110403192112',\n 'https://static.wikia.nocookie.net/mario/images/2/2e/Inkling_SSBU.png/revision/latest?cb=20200216081405',\n 'https://i.pinimg.com/originals/7c/ce/f8/7ccef872fcee2e11945c6799ce2985cc.png',\n 'https://www.seekpng.com/png/full/7-73001_link-zelda-png-super-smash-bros-for-wii.png',\n 'https://static.wikia.nocookie.net/versus-compendium/images/0/00/Link_BotW.png/revision/latest?cb=20181128185543',\n 'https://static.wikia.nocookie.net/nintendo/images/1/1d/Villager-Boy-1.png/revision/latest?cb=20150419125930&path-prefix=en',\n 'https://i.pinimg.com/originals/bb/ca/f7/bbcaf749d9dc2d1b1259e8fe5cb49769.png',\n 'https://static.wikia.nocookie.net/nintendo-univers/images/a/a9/Marie_ACAF_3.png/revision/latest?cb=20161221163100&path-prefix=fr',\n]\n\ncar_names = [\n 'Standard Kart',\n 'Pipe Frame',\n 'Mach 8',\n 'Steel Driver',\n 'Cat Cruiser',\n 'Circuit Special',\n 'Tri-Speeder',\n 'Badwagon',\n 'Prancer',\n 'Biddybuggy',\n 'Landship',\n 'Sneeker',\n 'Sports Coupe',\n 'Gold Standard',\n 'GLA',\n 'W 25 Silver Arrow',\n '300 SL Roadster',\n 'Blue Falcon',\n 'Tanooki Kart',\n 'B Dasher',\n 'Streetle',\n 'P-Wing',\n 'Koopa Clown',\n 'Standard Bike',\n 'Comet',\n 'Sport Bike',\n 'The Duke',\n 'Flame Rider',\n 'Varmint',\n 'Mr. Scooty',\n 'Jet Bike',\n 'Yoshi Bike',\n 'Master Cycle',\n 'Master Cycle Zero',\n 'City Tripper',\n 'Standard ATV',\n 'Wild Wiggler',\n 'Teddy Buggy',\n 'Bone Rattler',\n 'Splat Buggy',\n 'Inkstriker',\n]\n\ncar_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/0/05/StandardKartBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20140715154926',\n 'https://static.wikia.nocookie.net/mariokart/images/d/d1/PipeFrameBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122932',\n 'https://static.wikia.nocookie.net/mariokart/images/d/df/Mach8BodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122956',\n 'https://static.wikia.nocookie.net/mariokart/images/9/94/Steel_Driver.png/revision/latest/scale-to-width-down/100?cb=20200925190921',\n 'https://static.wikia.nocookie.net/mariokart/images/f/f4/CatCruiserBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123132',\n 'https://static.wikia.nocookie.net/mariokart/images/6/6c/CircuitSpecialBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123237',\n 'https://static.wikia.nocookie.net/mariokart/images/5/56/TrispeederBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123217',\n 'https://static.wikia.nocookie.net/mariokart/images/c/c2/BadwagonBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123350',\n 'https://static.wikia.nocookie.net/mariokart/images/f/ff/PrancerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123333',\n 'https://static.wikia.nocookie.net/mariokart/images/4/45/BiddybuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123322',\n 'https://static.wikia.nocookie.net/mariokart/images/6/6d/LandshipBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123656',\n 'https://static.wikia.nocookie.net/mariokart/images/4/47/SneakerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123617',\n 'https://static.wikia.nocookie.net/mariokart/images/f/f8/SportsCoupeMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123625',\n 'https://static.wikia.nocookie.net/mariokart/images/3/31/MK8Gold_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102123637',\n 'https://static.wikia.nocookie.net/mariokart/images/c/c2/GLA-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140333',\n 'https://static.wikia.nocookie.net/mariokart/images/2/25/W25SilverArrow-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332',\n 'https://static.wikia.nocookie.net/mariokart/images/1/17/300SLRoadster-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332',\n 'https://static.wikia.nocookie.net/mariokart/images/e/ed/MK8_BlueFalcon.png/revision/latest/scale-to-width-down/100?cb=20150331235059',\n 'https://static.wikia.nocookie.net/mariokart/images/d/d7/MK8_TanookiBuggy.png/revision/latest/scale-to-width-down/100?cb=20150331235545',\n 'https://static.wikia.nocookie.net/mariokart/images/3/32/MK8_BDasher.png/revision/latest/scale-to-width-down/100?cb=20150401000836',\n 'https://static.wikia.nocookie.net/mariokart/images/c/cf/MK8Streetle.png/revision/latest/scale-to-width-down/100?cb=20150426174005',\n 'https://static.wikia.nocookie.net/mariokart/images/c/cd/MK8PWing.png/revision/latest/scale-to-width-down/100?cb=20150426174107',\n 'https://static.wikia.nocookie.net/mariokart/images/7/70/MK8DX_Koopa_Clown.png/revision/latest/scale-to-width-down/100?cb=20170704061052',\n 'https://static.wikia.nocookie.net/mariokart/images/8/84/StandardBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123849',\n 'https://static.wikia.nocookie.net/mariokart/images/0/0e/CometBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124024',\n 'https://static.wikia.nocookie.net/mariokart/images/f/fe/SportBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123857',\n 'https://static.wikia.nocookie.net/mariokart/images/8/8a/TheDukeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925174819',\n 'https://static.wikia.nocookie.net/mariokart/images/3/31/FlameRiderBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123942',\n 'https://static.wikia.nocookie.net/mariokart/images/d/d0/VarmintBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123951',\n 'https://static.wikia.nocookie.net/mariokart/images/1/18/MrScootyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123925',\n 'https://static.wikia.nocookie.net/mariokart/images/1/12/JetBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123928',\n 'https://static.wikia.nocookie.net/mariokart/images/6/62/YoshiBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925193256',\n 'https://static.wikia.nocookie.net/mariokart/images/5/52/MK8_MasterCycle.png/revision/latest/scale-to-width-down/100?cb=20150331231734',\n 'https://static.wikia.nocookie.net/mariokart/images/3/3e/150px-MK8D_Master_Cycle_Zero.png/revision/latest/scale-to-width-down/111?cb=20200726154936',\n 'https://static.wikia.nocookie.net/mariokart/images/9/90/MK8CityTripper.png/revision/latest/scale-to-width-down/100?cb=20150426175601',\n 'https://static.wikia.nocookie.net/mariokart/images/2/23/StandardATVBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124111',\n 'https://static.wikia.nocookie.net/mariokart/images/a/aa/WildWigglerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925175122',\n 'https://static.wikia.nocookie.net/mariokart/images/f/fa/TeddyBuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124120',\n 'https://static.wikia.nocookie.net/mariokart/images/0/0a/MK8BoneRattler.png/revision/latest/scale-to-width-down/100?cb=20150426180108',\n 'https://static.wikia.nocookie.net/mariokart/images/6/63/MK8DX_Splat_Buggy.png/revision/latest/scale-to-width-down/100?cb=20170706064814',\n 'https://static.wikia.nocookie.net/mariokart/images/e/eb/MK8DX_Inkstriker.png/revision/latest/scale-to-width-down/100?cb=20170706065507',\n]\n\ntire_names = [\n 'Standard',\n 'Monster',\n 'Roller',\n 'Slim',\n 'Slick',\n 'Metal',\n 'Button',\n 'Off-Road',\n 'Sponge',\n 'Wood',\n 'Cushion',\n 'Blue Standard',\n 'Hot Monster',\n 'Azure Roller',\n 'Crimson Slim',\n 'Cyber Slick',\n 'Retro Off-Road',\n 'Gold Tires',\n 'GLA Tires',\n 'Triforce Tires',\n 'Ancient Tyres',\n 'Leaf Tires',\n]\n\ntire_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/a/a8/StandardTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125545',\n 'https://static.wikia.nocookie.net/mariokart/images/2/29/MonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125541',\n 'https://static.wikia.nocookie.net/mariokart/images/7/76/RollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125539',\n 'https://static.wikia.nocookie.net/mariokart/images/f/f8/SlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125536',\n 'https://static.wikia.nocookie.net/mariokart/images/d/dd/SlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125542',\n 'https://static.wikia.nocookie.net/mariokart/images/9/96/MetalTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124533',\n 'https://static.wikia.nocookie.net/mariokart/images/0/07/ButtonTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124541',\n 'https://static.wikia.nocookie.net/mariokart/images/2/25/Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102124559',\n 'https://static.wikia.nocookie.net/mariokart/images/4/4c/SpongeTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124549',\n 'https://static.wikia.nocookie.net/mariokart/images/0/03/WoodTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124724',\n 'https://static.wikia.nocookie.net/mariokart/images/9/92/CushionTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124817',\n 'https://static.wikia.nocookie.net/mariokart/images/d/db/Blue_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102124836',\n 'https://static.wikia.nocookie.net/mariokart/images/d/d1/HotMonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124834',\n 'https://static.wikia.nocookie.net/mariokart/images/f/fe/AzureRollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20200726154338',\n 'https://static.wikia.nocookie.net/mariokart/images/7/71/CrimsonSlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125627',\n 'https://static.wikia.nocookie.net/mariokart/images/2/29/CyberSlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125626',\n 'https://static.wikia.nocookie.net/mariokart/images/4/48/Retro_Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102125629',\n 'https://static.wikia.nocookie.net/mariokart/images/5/52/Gold_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20141102125630',\n 'https://static.wikia.nocookie.net/mariokart/images/b/ba/GLATires-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180539',\n 'https://static.wikia.nocookie.net/mariokart/images/0/09/MK8_TriforceTires.png/revision/latest/scale-to-width-down/100?cb=20150331233357',\n 'https://static.wikia.nocookie.net/mariokart/images/d/d5/MK8D_Ancient_Tires.png/revision/latest/scale-to-width-down/100?cb=20200726154442',\n 'https://static.wikia.nocookie.net/mariokart/images/f/f9/Leaf_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180810',\n]\n\nglider_names = [\n 'Super Glider',\n 'Cloud Glider',\n 'Wario Wing',\n 'Waddle Wing',\n 'Peach Parasol',\n 'Parachute',\n 'Parafoil',\n 'Flower Glider',\n 'Bowser Kite',\n 'Plane Glider',\n 'MKTV Parafoil',\n 'Gold Glider',\n 'Hylian Kite',\n 'Paraglider',\n 'Paper Glider',\n]\n\nglider_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/a/a8/SuperGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125815',\n 'https://static.wikia.nocookie.net/mariokart/images/8/84/Cloud_Glider.png/revision/latest/scale-to-width-down/100?cb=20141102125838',\n 'https://static.wikia.nocookie.net/mariokart/images/a/ae/WarioWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125853',\n 'https://static.wikia.nocookie.net/mariokart/images/e/ef/WaddleWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125901',\n 'https://static.wikia.nocookie.net/mariokart/images/6/6e/PeachParasolGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125940',\n 'https://static.wikia.nocookie.net/mariokart/images/d/dd/ParachuteGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125823',\n 'https://static.wikia.nocookie.net/mariokart/images/c/c4/ParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125830',\n 'https://static.wikia.nocookie.net/mariokart/images/b/b3/FlowerGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125846',\n 'https://static.wikia.nocookie.net/mariokart/images/f/f7/BowserKiteMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125909',\n 'https://static.wikia.nocookie.net/mariokart/images/c/ca/PlaneGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125930',\n 'https://static.wikia.nocookie.net/mariokart/images/9/96/MKTVParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125947',\n 'https://static.wikia.nocookie.net/mariokart/images/1/18/GoldGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125956',\n 'https://static.wikia.nocookie.net/mariokart/images/6/62/MK8_HylianKite.png/revision/latest/scale-to-width-down/100?cb=20150331232731',\n 'https://static.wikia.nocookie.net/mariokart/images/3/39/MK8D_Paraglider.png/revision/latest/scale-to-width-down/117?cb=20200726155246',\n 'https://static.wikia.nocookie.net/mariokart/images/0/0e/PaperGliderIcon-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426181313',\n]\n\n\nx=0\ny=0\nfor char in char_names:\n index=x-y+1\n name = char_names[x]\n if \"Yoshi (\" in name or \"Shyguy (\" in name or \"(G)\" in name:\n y+=1\n index=None\n new_char = Character(name=char_names[x],image_url=char_urls[x],index=index)\n new_char.save()\n x+=1\n\nx=0\nfor tire in tire_names:\n index=x+1\n new_tire = Tire(name=tire_names[x],image_url=tire_urls[x],index=index)\n new_tire.save()\n x+=1\nx=0\nfor car in car_names:\n index=x+1\n new_car = Vehicle(name=car_names[x],image_url=car_urls[x],index=index)\n new_car.save()\n x+=1\nx=0\nfor glider in glider_names:\n index=x+1\n new_glider = Glider(name=glider_names[x],image_url=glider_urls[x],index=index)\n new_glider.save()\n x+=1\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def watch():
print('시청하다')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def watch():
print('시청하다')
watch()
print('tv.py의 module 이름은', __name__)
<|reserved_special_token_1|>
def watch():
print("시청하다")
watch()
print("tv.py의 module 이름은",__name__) #name은 __main__으로 나옴
|
flexible
|
{
"blob_id": "b9622bede471c76ae36d3f59130d2be113310d4c",
"index": 7045,
"step-1": "<mask token>\n",
"step-2": "def watch():\n print('시청하다')\n\n\n<mask token>\n",
"step-3": "def watch():\n print('시청하다')\n\n\nwatch()\nprint('tv.py의 module 이름은', __name__)\n",
"step-4": "def watch():\n print(\"시청하다\")\nwatch()\n\nprint(\"tv.py의 module 이름은\",__name__) #name은 __main__으로 나옴",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/python
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
occl_frac = 0.445188
result = [1-occl_frac, occl_frac, 0]
#Reading res_data.txt
mnfa = [0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9] #min NN factor array
nna = [2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,24,26,28,30] #NN Aray
fraction_data=[[[0.0 for i in range(len(mnfa))] for j in range(len(nna))] for k in range(3)]
df = open("res_data.txt", "r")
while(True):
try:
x = df.readline()
y = map(float,x.split())
if y==[]:
break
mnfa_index = mnfa.index(y[0])
nna_index = nna.index(y[1])
df.readline() #Get no. of points in each
df.readline() #Get blown up fractions
frdata = map(float,df.readline().split()) #get actual fractions
assert(len(frdata) == 3)
for i in range(3):
fraction_data[i][nna_index][mnfa_index] = frdata[i]
except(IOError):
print "What?"
break
print "Read all data from file, plotting stuff..."
fig = plt.figure()
for i in range(3):
ax = fig.add_subplot(1,3,i, projection='3d')
X = mnfa
xlen = len(X)
Y = nna
ylen = len(Y)
X, Y = np.meshgrid(X, Y)
Z = fraction_data[i]
colortuple = ('r', 'b')
colors = np.empty(X.shape, dtype=str)
for y in range(ylen):
for x in range(xlen):
colors[y, x] = colortuple[(x+y) % len(colortuple)]
ax.plot_surface(X,Y,Z,rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0)
plt.show()
|
normal
|
{
"blob_id": "1c8b843174521f1056e2bac472c87d0b5ec9603e",
"index": 3370,
"step-1": "#!/usr/bin/python\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\n\noccl_frac = 0.445188\nresult = [1-occl_frac, occl_frac, 0]\n\n#Reading res_data.txt\nmnfa = [0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9] #min NN factor array\nnna = [2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,24,26,28,30] #NN Aray\n\nfraction_data=[[[0.0 for i in range(len(mnfa))] for j in range(len(nna))] for k in range(3)]\n\ndf = open(\"res_data.txt\", \"r\")\nwhile(True):\n try:\n x = df.readline()\n y = map(float,x.split())\n if y==[]:\n break\n mnfa_index = mnfa.index(y[0])\n nna_index = nna.index(y[1])\n df.readline() #Get no. of points in each\n df.readline() #Get blown up fractions\n frdata = map(float,df.readline().split()) #get actual fractions\n assert(len(frdata) == 3)\n for i in range(3):\n fraction_data[i][nna_index][mnfa_index] = frdata[i]\n except(IOError):\n print \"What?\"\n break\nprint \"Read all data from file, plotting stuff...\"\n\nfig = plt.figure()\n\nfor i in range(3):\n ax = fig.add_subplot(1,3,i, projection='3d')\n X = mnfa\n xlen = len(X)\n Y = nna\n ylen = len(Y)\n X, Y = np.meshgrid(X, Y)\n Z = fraction_data[i]\n colortuple = ('r', 'b')\n colors = np.empty(X.shape, dtype=str)\n for y in range(ylen):\n for x in range(xlen):\n colors[y, x] = colortuple[(x+y) % len(colortuple)]\n ax.plot_surface(X,Y,Z,rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0)\nplt.show()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
parser.add_argument('-u', '--user')
parser.add_argument('-c', '--color')
<|reserved_special_token_0|>
print(combined['color'])
print(combined['user'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
defaults = {'color': 'red', 'user': 'guest'}
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--user')
parser.add_argument('-c', '--color')
namespace = parser.parse_args()
command_line_args = {k: v for k, v in vars(namespace).items() if v is not None}
combined = collections.ChainMap(command_line_args, os.environ, defaults)
print(combined['color'])
print(combined['user'])
<|reserved_special_token_1|>
import os, argparse, collections
defaults = {'color': 'red', 'user': 'guest'}
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--user')
parser.add_argument('-c', '--color')
namespace = parser.parse_args()
command_line_args = {k: v for k, v in vars(namespace).items() if v is not None}
combined = collections.ChainMap(command_line_args, os.environ, defaults)
print(combined['color'])
print(combined['user'])
<|reserved_special_token_1|>
import os, argparse,collections
defaults ={'color':'red','user':'guest'}
parser=argparse.ArgumentParser()
parser.add_argument('-u','--user')
parser.add_argument('-c','--color')
#a simple Namespace object will be built up from attributes parsed out of the command lin
namespace= parser.parse_args()
command_line_args= {k: v for k , v in vars(namespace).items()if v is not None}
combined= collections.ChainMap(command_line_args,os.environ,defaults)
print(combined['color'])
print(combined['user'])
|
flexible
|
{
"blob_id": "3c31e3f2a6f320bc5ae33f0ba1d234a089371899",
"index": 9199,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('-u', '--user')\nparser.add_argument('-c', '--color')\n<mask token>\nprint(combined['color'])\nprint(combined['user'])\n",
"step-3": "<mask token>\ndefaults = {'color': 'red', 'user': 'guest'}\nparser = argparse.ArgumentParser()\nparser.add_argument('-u', '--user')\nparser.add_argument('-c', '--color')\nnamespace = parser.parse_args()\ncommand_line_args = {k: v for k, v in vars(namespace).items() if v is not None}\ncombined = collections.ChainMap(command_line_args, os.environ, defaults)\nprint(combined['color'])\nprint(combined['user'])\n",
"step-4": "import os, argparse, collections\ndefaults = {'color': 'red', 'user': 'guest'}\nparser = argparse.ArgumentParser()\nparser.add_argument('-u', '--user')\nparser.add_argument('-c', '--color')\nnamespace = parser.parse_args()\ncommand_line_args = {k: v for k, v in vars(namespace).items() if v is not None}\ncombined = collections.ChainMap(command_line_args, os.environ, defaults)\nprint(combined['color'])\nprint(combined['user'])\n",
"step-5": "import os, argparse,collections\n\ndefaults ={'color':'red','user':'guest'}\nparser=argparse.ArgumentParser()\nparser.add_argument('-u','--user')\nparser.add_argument('-c','--color')\n\n#a simple Namespace object will be built up from attributes parsed out of the command lin\n\nnamespace= parser.parse_args()\ncommand_line_args= {k: v for k , v in vars(namespace).items()if v is not None}\n\ncombined= collections.ChainMap(command_line_args,os.environ,defaults)\n\nprint(combined['color'])\nprint(combined['user'])",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Solution:
def sortElemsByFrequency(self, arr):
if arr:
x = []
res = []
mydict = {}
for k, v in enumerate(arr):
mydict[v] = mydict.get(v, 0) + 1
for k, v in mydict.items():
heapq.heappush(x, (v, k))
while x:
res.insert(0, heapq.heappop(x)[1])
return res
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def sortElemsByFrequency(self, arr):
if arr:
x = []
res = []
mydict = {}
for k, v in enumerate(arr):
mydict[v] = mydict.get(v, 0) + 1
for k, v in mydict.items():
heapq.heappush(x, (v, k))
while x:
res.insert(0, heapq.heappop(x)[1])
return res
<|reserved_special_token_0|>
print(res)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def sortElemsByFrequency(self, arr):
if arr:
x = []
res = []
mydict = {}
for k, v in enumerate(arr):
mydict[v] = mydict.get(v, 0) + 1
for k, v in mydict.items():
heapq.heappush(x, (v, k))
while x:
res.insert(0, heapq.heappop(x)[1])
return res
sol = Solution()
res = sol.sortElemsByFrequency([2, 5, 2, 8, 5, 6, 8, 8])
print(res)
<|reserved_special_token_1|>
import heapq
class Solution:
def sortElemsByFrequency(self, arr):
if arr:
x = []
res = []
mydict = {}
for k, v in enumerate(arr):
mydict[v] = mydict.get(v, 0) + 1
for k, v in mydict.items():
heapq.heappush(x, (v, k))
while x:
res.insert(0, heapq.heappop(x)[1])
return res
sol = Solution()
res = sol.sortElemsByFrequency([2, 5, 2, 8, 5, 6, 8, 8])
print(res)
<|reserved_special_token_1|>
import heapq
class Solution: #priority queue
# def sortElemsByFrequency(self, arr):
# if arr:
# mydict = {}
# for k,v in enumerate(arr):
# mydict[v] = mydict.get(v, 0) + 1
# sorted_dict = sorted(mydict.items(), key = lambda x:x[1])
# return sorted_dict
def sortElemsByFrequency(self, arr):
if arr:
x = []
res = []
mydict = {}
for k,v in enumerate(arr):
mydict[v] = mydict.get(v, 0) + 1
for k,v in mydict.items():
heapq.heappush(x, (v,k))
while x:
res.insert(0, heapq.heappop(x)[1])
return res
sol = Solution()
res = sol.sortElemsByFrequency([2, 5, 2, 8, 5, 6, 8, 8])
print(res)
|
flexible
|
{
"blob_id": "dcb12e282962c63f8e7de5d29c4c81ad177a387e",
"index": 7775,
"step-1": "<mask token>\n\n\nclass Solution:\n\n def sortElemsByFrequency(self, arr):\n if arr:\n x = []\n res = []\n mydict = {}\n for k, v in enumerate(arr):\n mydict[v] = mydict.get(v, 0) + 1\n for k, v in mydict.items():\n heapq.heappush(x, (v, k))\n while x:\n res.insert(0, heapq.heappop(x)[1])\n return res\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n\n def sortElemsByFrequency(self, arr):\n if arr:\n x = []\n res = []\n mydict = {}\n for k, v in enumerate(arr):\n mydict[v] = mydict.get(v, 0) + 1\n for k, v in mydict.items():\n heapq.heappush(x, (v, k))\n while x:\n res.insert(0, heapq.heappop(x)[1])\n return res\n\n\n<mask token>\nprint(res)\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def sortElemsByFrequency(self, arr):\n if arr:\n x = []\n res = []\n mydict = {}\n for k, v in enumerate(arr):\n mydict[v] = mydict.get(v, 0) + 1\n for k, v in mydict.items():\n heapq.heappush(x, (v, k))\n while x:\n res.insert(0, heapq.heappop(x)[1])\n return res\n\n\nsol = Solution()\nres = sol.sortElemsByFrequency([2, 5, 2, 8, 5, 6, 8, 8])\nprint(res)\n",
"step-4": "import heapq\n\n\nclass Solution:\n\n def sortElemsByFrequency(self, arr):\n if arr:\n x = []\n res = []\n mydict = {}\n for k, v in enumerate(arr):\n mydict[v] = mydict.get(v, 0) + 1\n for k, v in mydict.items():\n heapq.heappush(x, (v, k))\n while x:\n res.insert(0, heapq.heappop(x)[1])\n return res\n\n\nsol = Solution()\nres = sol.sortElemsByFrequency([2, 5, 2, 8, 5, 6, 8, 8])\nprint(res)\n",
"step-5": "import heapq\nclass Solution: #priority queue\n # def sortElemsByFrequency(self, arr):\n # if arr:\n # mydict = {}\n # for k,v in enumerate(arr):\n # mydict[v] = mydict.get(v, 0) + 1\n # sorted_dict = sorted(mydict.items(), key = lambda x:x[1])\n # return sorted_dict\n\n def sortElemsByFrequency(self, arr):\n if arr:\n x = []\n res = []\n mydict = {}\n for k,v in enumerate(arr):\n mydict[v] = mydict.get(v, 0) + 1\n for k,v in mydict.items():\n heapq.heappush(x, (v,k))\n while x:\n res.insert(0, heapq.heappop(x)[1])\n return res\n\nsol = Solution()\nres = sol.sortElemsByFrequency([2, 5, 2, 8, 5, 6, 8, 8])\nprint(res)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#from tinyTensor.Node import Node
import tinyTensor
import plotly.plotly as py
from graphviz import render
#from tinyTensor.Operation import Operation
def init():
global _default_graph
_default_graph = None
def postOrder(node):
nodes_postorder = []
def recurse(node):
if isinstance(node, tinyTensor.Node.Node):
for input_node in node.inputNodes:
recurse(input_node)
nodes_postorder.append(node)
recurse(node)
return nodes_postorder
class Graph():
def __init__(self):
self.nodes = []
self.placeholderNames = []
def appendNode(self,node):
if(node.name in self.placeholderNames and node.isPlaceholder):
raise Exception("Placeholder name \"{}\" is already in use in current graph".format(node.name))
elif(node.isPlaceholder):
self.placeholderNames.append(node.name)
self.nodes.append(node)
def set_default(self):
init()
global _default_graph
_default_graph = self
def visualize_nodes(self, node):
# generating the .gv file
gv_file = "graph \"\" \n{\n"
global nodeCounter
nodeCounter = 0
def recurse(nodes,gv_file,parent_node_str = None):
global nodeCounter
nodes_list = []
if(isinstance(nodes,list)):
nodes_list.extend(nodes)
else:
nodes_list.append(nodes)
for node in nodes_list:
# node should add itself to the list
current_node_str = "n" + str(nodeCounter)
nodeCounter += 1
''' operation might contain non-node constants, hence need to make sure that they are converted to node'''
if(type(node) in (int,float)):
node = tinyTensor.Node.Node.variable(node) # creating a variable node
'''creating the node labels'''
if(isinstance(node,tinyTensor.Operation.Operation)):
gv_file += current_node_str + " [label=\"{} ({})\"] ;\n".format(node.operator,node.value)
elif(node.isPlaceholder):
gv_file += current_node_str + " [label=\"{}({})\"] ;\n".format(node.name,node.value)
else:
gv_file += current_node_str + " [label=\"{}({})\"] ;\n".format(node.name,node.value)
# now creating connection line to parent(s) TODO: make it possible to have many parents, (nodes should have output nodes list)
if(parent_node_str != None):
gv_file += parent_node_str + " -- " + current_node_str + "; \n"
# applying the same to the children of this node
if(len(node.inputNodes) > 0):
gv_file = recurse(node.inputNodes,gv_file,current_node_str)
return gv_file
gv_file = recurse(node,gv_file)
gv_file += "}\n"
with open("network.gv","w+") as file:
file.writelines(gv_file)
#render('dot','png','network.gv')
print(gv_file)
def visualize_layers(self,layer_list):
neuron_dict = {}
#generating dict of neurons
gv_file = "graph \"\" \n{\n"
#dealing with input nodes
for node in layer_list[0].inputList:
neuron_dict[node] = node.name
gv_file += neuron_dict[node] + " [label=\"{}({})\"] ;\n".format(node.name,node.value)
# creating dict for neurons
for layer in layer_list:
for neuron in layer.neuronList:
neuron_dict[neuron] = "{}".format(neuron.name)
gv_file += neuron_dict[neuron] + " [label=\"{}({})\"] ;\n".format(neuron.name,neuron.value)
# drawing links between neurons
for layer in layer_list:
for neuron in layer.neuronList:
for input_neuron in neuron.inputNeurons:
gv_file += neuron_dict[neuron] + " -- " + neuron_dict[input_neuron] + "; \n"
gv_file += "}\n"
with open("network.gv","w+") as file:
file.writelines(gv_file)
print(gv_file)
|
normal
|
{
"blob_id": "7bd2a29bff1e435cf813dd54109d7f4e17612425",
"index": 474,
"step-1": "<mask token>\n\n\nclass Graph:\n <mask token>\n\n def appendNode(self, node):\n if node.name in self.placeholderNames and node.isPlaceholder:\n raise Exception(\n 'Placeholder name \"{}\" is already in use in current graph'.\n format(node.name))\n elif node.isPlaceholder:\n self.placeholderNames.append(node.name)\n self.nodes.append(node)\n\n def set_default(self):\n init()\n global _default_graph\n _default_graph = self\n\n def visualize_nodes(self, node):\n gv_file = 'graph \"\" \\n{\\n'\n global nodeCounter\n nodeCounter = 0\n\n def recurse(nodes, gv_file, parent_node_str=None):\n global nodeCounter\n nodes_list = []\n if isinstance(nodes, list):\n nodes_list.extend(nodes)\n else:\n nodes_list.append(nodes)\n for node in nodes_list:\n current_node_str = 'n' + str(nodeCounter)\n nodeCounter += 1\n \"\"\" operation might contain non-node constants, hence need to make sure that they are converted to node\"\"\"\n if type(node) in (int, float):\n node = tinyTensor.Node.Node.variable(node)\n \"\"\"creating the node labels\"\"\"\n if isinstance(node, tinyTensor.Operation.Operation):\n gv_file += (current_node_str + ' [label=\"{} ({})\"] ;\\n'\n .format(node.operator, node.value))\n elif node.isPlaceholder:\n gv_file += (current_node_str + ' [label=\"{}({})\"] ;\\n'.\n format(node.name, node.value))\n else:\n gv_file += (current_node_str + ' [label=\"{}({})\"] ;\\n'.\n format(node.name, node.value))\n if parent_node_str != None:\n gv_file += (parent_node_str + ' -- ' + current_node_str +\n '; \\n')\n if len(node.inputNodes) > 0:\n gv_file = recurse(node.inputNodes, gv_file,\n current_node_str)\n return gv_file\n gv_file = recurse(node, gv_file)\n gv_file += '}\\n'\n with open('network.gv', 'w+') as file:\n file.writelines(gv_file)\n print(gv_file)\n\n def visualize_layers(self, layer_list):\n neuron_dict = {}\n gv_file = 'graph \"\" \\n{\\n'\n for node in layer_list[0].inputList:\n neuron_dict[node] = node.name\n gv_file += neuron_dict[node] + ' [label=\"{}({})\"] ;\\n'.format(node\n .name, node.value)\n for layer in layer_list:\n for neuron in layer.neuronList:\n neuron_dict[neuron] = '{}'.format(neuron.name)\n gv_file += neuron_dict[neuron\n ] + ' [label=\"{}({})\"] ;\\n'.format(neuron.name, neuron.\n value)\n for layer in layer_list:\n for neuron in layer.neuronList:\n for input_neuron in neuron.inputNeurons:\n gv_file += neuron_dict[neuron] + ' -- ' + neuron_dict[\n input_neuron] + '; \\n'\n gv_file += '}\\n'\n with open('network.gv', 'w+') as file:\n file.writelines(gv_file)\n print(gv_file)\n",
"step-2": "<mask token>\n\n\ndef postOrder(node):\n nodes_postorder = []\n\n def recurse(node):\n if isinstance(node, tinyTensor.Node.Node):\n for input_node in node.inputNodes:\n recurse(input_node)\n nodes_postorder.append(node)\n recurse(node)\n return nodes_postorder\n\n\nclass Graph:\n\n def __init__(self):\n self.nodes = []\n self.placeholderNames = []\n\n def appendNode(self, node):\n if node.name in self.placeholderNames and node.isPlaceholder:\n raise Exception(\n 'Placeholder name \"{}\" is already in use in current graph'.\n format(node.name))\n elif node.isPlaceholder:\n self.placeholderNames.append(node.name)\n self.nodes.append(node)\n\n def set_default(self):\n init()\n global _default_graph\n _default_graph = self\n\n def visualize_nodes(self, node):\n gv_file = 'graph \"\" \\n{\\n'\n global nodeCounter\n nodeCounter = 0\n\n def recurse(nodes, gv_file, parent_node_str=None):\n global nodeCounter\n nodes_list = []\n if isinstance(nodes, list):\n nodes_list.extend(nodes)\n else:\n nodes_list.append(nodes)\n for node in nodes_list:\n current_node_str = 'n' + str(nodeCounter)\n nodeCounter += 1\n \"\"\" operation might contain non-node constants, hence need to make sure that they are converted to node\"\"\"\n if type(node) in (int, float):\n node = tinyTensor.Node.Node.variable(node)\n \"\"\"creating the node labels\"\"\"\n if isinstance(node, tinyTensor.Operation.Operation):\n gv_file += (current_node_str + ' [label=\"{} ({})\"] ;\\n'\n .format(node.operator, node.value))\n elif node.isPlaceholder:\n gv_file += (current_node_str + ' [label=\"{}({})\"] ;\\n'.\n format(node.name, node.value))\n else:\n gv_file += (current_node_str + ' [label=\"{}({})\"] ;\\n'.\n format(node.name, node.value))\n if parent_node_str != None:\n gv_file += (parent_node_str + ' -- ' + current_node_str +\n '; \\n')\n if len(node.inputNodes) > 0:\n gv_file = recurse(node.inputNodes, gv_file,\n current_node_str)\n return gv_file\n gv_file = recurse(node, gv_file)\n gv_file += '}\\n'\n with open('network.gv', 'w+') as file:\n file.writelines(gv_file)\n print(gv_file)\n\n def visualize_layers(self, layer_list):\n neuron_dict = {}\n gv_file = 'graph \"\" \\n{\\n'\n for node in layer_list[0].inputList:\n neuron_dict[node] = node.name\n gv_file += neuron_dict[node] + ' [label=\"{}({})\"] ;\\n'.format(node\n .name, node.value)\n for layer in layer_list:\n for neuron in layer.neuronList:\n neuron_dict[neuron] = '{}'.format(neuron.name)\n gv_file += neuron_dict[neuron\n ] + ' [label=\"{}({})\"] ;\\n'.format(neuron.name, neuron.\n value)\n for layer in layer_list:\n for neuron in layer.neuronList:\n for input_neuron in neuron.inputNeurons:\n gv_file += neuron_dict[neuron] + ' -- ' + neuron_dict[\n input_neuron] + '; \\n'\n gv_file += '}\\n'\n with open('network.gv', 'w+') as file:\n file.writelines(gv_file)\n print(gv_file)\n",
"step-3": "<mask token>\n\n\ndef init():\n global _default_graph\n _default_graph = None\n\n\ndef postOrder(node):\n nodes_postorder = []\n\n def recurse(node):\n if isinstance(node, tinyTensor.Node.Node):\n for input_node in node.inputNodes:\n recurse(input_node)\n nodes_postorder.append(node)\n recurse(node)\n return nodes_postorder\n\n\nclass Graph:\n\n def __init__(self):\n self.nodes = []\n self.placeholderNames = []\n\n def appendNode(self, node):\n if node.name in self.placeholderNames and node.isPlaceholder:\n raise Exception(\n 'Placeholder name \"{}\" is already in use in current graph'.\n format(node.name))\n elif node.isPlaceholder:\n self.placeholderNames.append(node.name)\n self.nodes.append(node)\n\n def set_default(self):\n init()\n global _default_graph\n _default_graph = self\n\n def visualize_nodes(self, node):\n gv_file = 'graph \"\" \\n{\\n'\n global nodeCounter\n nodeCounter = 0\n\n def recurse(nodes, gv_file, parent_node_str=None):\n global nodeCounter\n nodes_list = []\n if isinstance(nodes, list):\n nodes_list.extend(nodes)\n else:\n nodes_list.append(nodes)\n for node in nodes_list:\n current_node_str = 'n' + str(nodeCounter)\n nodeCounter += 1\n \"\"\" operation might contain non-node constants, hence need to make sure that they are converted to node\"\"\"\n if type(node) in (int, float):\n node = tinyTensor.Node.Node.variable(node)\n \"\"\"creating the node labels\"\"\"\n if isinstance(node, tinyTensor.Operation.Operation):\n gv_file += (current_node_str + ' [label=\"{} ({})\"] ;\\n'\n .format(node.operator, node.value))\n elif node.isPlaceholder:\n gv_file += (current_node_str + ' [label=\"{}({})\"] ;\\n'.\n format(node.name, node.value))\n else:\n gv_file += (current_node_str + ' [label=\"{}({})\"] ;\\n'.\n format(node.name, node.value))\n if parent_node_str != None:\n gv_file += (parent_node_str + ' -- ' + current_node_str +\n '; \\n')\n if len(node.inputNodes) > 0:\n gv_file = recurse(node.inputNodes, gv_file,\n current_node_str)\n return gv_file\n gv_file = recurse(node, gv_file)\n gv_file += '}\\n'\n with open('network.gv', 'w+') as file:\n file.writelines(gv_file)\n print(gv_file)\n\n def visualize_layers(self, layer_list):\n neuron_dict = {}\n gv_file = 'graph \"\" \\n{\\n'\n for node in layer_list[0].inputList:\n neuron_dict[node] = node.name\n gv_file += neuron_dict[node] + ' [label=\"{}({})\"] ;\\n'.format(node\n .name, node.value)\n for layer in layer_list:\n for neuron in layer.neuronList:\n neuron_dict[neuron] = '{}'.format(neuron.name)\n gv_file += neuron_dict[neuron\n ] + ' [label=\"{}({})\"] ;\\n'.format(neuron.name, neuron.\n value)\n for layer in layer_list:\n for neuron in layer.neuronList:\n for input_neuron in neuron.inputNeurons:\n gv_file += neuron_dict[neuron] + ' -- ' + neuron_dict[\n input_neuron] + '; \\n'\n gv_file += '}\\n'\n with open('network.gv', 'w+') as file:\n file.writelines(gv_file)\n print(gv_file)\n",
"step-4": "import tinyTensor\nimport plotly.plotly as py\nfrom graphviz import render\n\n\ndef init():\n global _default_graph\n _default_graph = None\n\n\ndef postOrder(node):\n nodes_postorder = []\n\n def recurse(node):\n if isinstance(node, tinyTensor.Node.Node):\n for input_node in node.inputNodes:\n recurse(input_node)\n nodes_postorder.append(node)\n recurse(node)\n return nodes_postorder\n\n\nclass Graph:\n\n def __init__(self):\n self.nodes = []\n self.placeholderNames = []\n\n def appendNode(self, node):\n if node.name in self.placeholderNames and node.isPlaceholder:\n raise Exception(\n 'Placeholder name \"{}\" is already in use in current graph'.\n format(node.name))\n elif node.isPlaceholder:\n self.placeholderNames.append(node.name)\n self.nodes.append(node)\n\n def set_default(self):\n init()\n global _default_graph\n _default_graph = self\n\n def visualize_nodes(self, node):\n gv_file = 'graph \"\" \\n{\\n'\n global nodeCounter\n nodeCounter = 0\n\n def recurse(nodes, gv_file, parent_node_str=None):\n global nodeCounter\n nodes_list = []\n if isinstance(nodes, list):\n nodes_list.extend(nodes)\n else:\n nodes_list.append(nodes)\n for node in nodes_list:\n current_node_str = 'n' + str(nodeCounter)\n nodeCounter += 1\n \"\"\" operation might contain non-node constants, hence need to make sure that they are converted to node\"\"\"\n if type(node) in (int, float):\n node = tinyTensor.Node.Node.variable(node)\n \"\"\"creating the node labels\"\"\"\n if isinstance(node, tinyTensor.Operation.Operation):\n gv_file += (current_node_str + ' [label=\"{} ({})\"] ;\\n'\n .format(node.operator, node.value))\n elif node.isPlaceholder:\n gv_file += (current_node_str + ' [label=\"{}({})\"] ;\\n'.\n format(node.name, node.value))\n else:\n gv_file += (current_node_str + ' [label=\"{}({})\"] ;\\n'.\n format(node.name, node.value))\n if parent_node_str != None:\n gv_file += (parent_node_str + ' -- ' + current_node_str +\n '; \\n')\n if len(node.inputNodes) > 0:\n gv_file = recurse(node.inputNodes, gv_file,\n current_node_str)\n return gv_file\n gv_file = recurse(node, gv_file)\n gv_file += '}\\n'\n with open('network.gv', 'w+') as file:\n file.writelines(gv_file)\n print(gv_file)\n\n def visualize_layers(self, layer_list):\n neuron_dict = {}\n gv_file = 'graph \"\" \\n{\\n'\n for node in layer_list[0].inputList:\n neuron_dict[node] = node.name\n gv_file += neuron_dict[node] + ' [label=\"{}({})\"] ;\\n'.format(node\n .name, node.value)\n for layer in layer_list:\n for neuron in layer.neuronList:\n neuron_dict[neuron] = '{}'.format(neuron.name)\n gv_file += neuron_dict[neuron\n ] + ' [label=\"{}({})\"] ;\\n'.format(neuron.name, neuron.\n value)\n for layer in layer_list:\n for neuron in layer.neuronList:\n for input_neuron in neuron.inputNeurons:\n gv_file += neuron_dict[neuron] + ' -- ' + neuron_dict[\n input_neuron] + '; \\n'\n gv_file += '}\\n'\n with open('network.gv', 'w+') as file:\n file.writelines(gv_file)\n print(gv_file)\n",
"step-5": "#from tinyTensor.Node import Node\r\nimport tinyTensor\r\nimport plotly.plotly as py\r\nfrom graphviz import render\r\n#from tinyTensor.Operation import Operation\r\n\r\n\r\ndef init():\r\n global _default_graph\r\n _default_graph = None\r\n\r\ndef postOrder(node):\r\n nodes_postorder = []\r\n def recurse(node):\r\n if isinstance(node, tinyTensor.Node.Node):\r\n for input_node in node.inputNodes:\r\n recurse(input_node)\r\n nodes_postorder.append(node)\r\n recurse(node)\r\n return nodes_postorder\r\n\r\nclass Graph():\r\n\r\n def __init__(self):\r\n self.nodes = []\r\n self.placeholderNames = []\r\n\r\n def appendNode(self,node):\r\n if(node.name in self.placeholderNames and node.isPlaceholder):\r\n raise Exception(\"Placeholder name \\\"{}\\\" is already in use in current graph\".format(node.name))\r\n elif(node.isPlaceholder):\r\n self.placeholderNames.append(node.name)\r\n self.nodes.append(node)\r\n\r\n def set_default(self):\r\n init()\r\n global _default_graph\r\n _default_graph = self\r\n\r\n def visualize_nodes(self, node):\r\n # generating the .gv file\r\n gv_file = \"graph \\\"\\\" \\n{\\n\"\r\n global nodeCounter\r\n nodeCounter = 0\r\n def recurse(nodes,gv_file,parent_node_str = None):\r\n global nodeCounter\r\n nodes_list = []\r\n if(isinstance(nodes,list)):\r\n nodes_list.extend(nodes)\r\n else:\r\n nodes_list.append(nodes)\r\n for node in nodes_list:\r\n # node should add itself to the list\r\n current_node_str = \"n\" + str(nodeCounter)\r\n nodeCounter += 1\r\n ''' operation might contain non-node constants, hence need to make sure that they are converted to node'''\r\n if(type(node) in (int,float)):\r\n node = tinyTensor.Node.Node.variable(node) # creating a variable node\r\n '''creating the node labels'''\r\n if(isinstance(node,tinyTensor.Operation.Operation)):\r\n gv_file += current_node_str + \" [label=\\\"{} ({})\\\"] ;\\n\".format(node.operator,node.value)\r\n elif(node.isPlaceholder):\r\n gv_file += current_node_str + \" [label=\\\"{}({})\\\"] ;\\n\".format(node.name,node.value)\r\n else:\r\n gv_file += current_node_str + \" [label=\\\"{}({})\\\"] ;\\n\".format(node.name,node.value)\r\n # now creating connection line to parent(s) TODO: make it possible to have many parents, (nodes should have output nodes list)\r\n if(parent_node_str != None):\r\n gv_file += parent_node_str + \" -- \" + current_node_str + \"; \\n\"\r\n # applying the same to the children of this node\r\n if(len(node.inputNodes) > 0):\r\n gv_file = recurse(node.inputNodes,gv_file,current_node_str)\r\n return gv_file\r\n gv_file = recurse(node,gv_file)\r\n gv_file += \"}\\n\"\r\n with open(\"network.gv\",\"w+\") as file:\r\n file.writelines(gv_file)\r\n #render('dot','png','network.gv')\r\n print(gv_file)\r\n\r\n def visualize_layers(self,layer_list):\r\n neuron_dict = {}\r\n #generating dict of neurons\r\n gv_file = \"graph \\\"\\\" \\n{\\n\"\r\n #dealing with input nodes\r\n for node in layer_list[0].inputList:\r\n neuron_dict[node] = node.name\r\n gv_file += neuron_dict[node] + \" [label=\\\"{}({})\\\"] ;\\n\".format(node.name,node.value)\r\n # creating dict for neurons\r\n for layer in layer_list:\r\n for neuron in layer.neuronList:\r\n neuron_dict[neuron] = \"{}\".format(neuron.name)\r\n gv_file += neuron_dict[neuron] + \" [label=\\\"{}({})\\\"] ;\\n\".format(neuron.name,neuron.value)\r\n # drawing links between neurons\r\n for layer in layer_list:\r\n for neuron in layer.neuronList:\r\n for input_neuron in neuron.inputNeurons:\r\n gv_file += neuron_dict[neuron] + \" -- \" + neuron_dict[input_neuron] + \"; \\n\"\r\n gv_file += \"}\\n\"\r\n with open(\"network.gv\",\"w+\") as file:\r\n file.writelines(gv_file)\r\n print(gv_file)\r\n\r\n\r\n\r\n\r\n",
"step-ids": [
5,
7,
8,
9,
10
]
}
|
[
5,
7,
8,
9,
10
] |
from ttkwidgets import CheckboxTreeview
from tkinter import *
from tkinter.ttk import *
from tkinter import messagebox
import json
import os
from DbDataloader import *
class DataLoader():
def __init__(self,master):
self.anne={}
self.master=Toplevel(master)
master.wait_visibility(self.master)
self.master.grab_set()
self.master.minsize(900,680)
self.master.resizable(width=False,height=True)
self.tree = CheckboxTreeview(self.master,height=25)
os.chdir("DonneJson")
with open("DonneUtile.json","r") as rf:
self.anne.update(json.load(rf))
rf.close()
os.chdir("..")
self.Nomfichier=os.listdir("DonneJson")
self.Nomfichier.sort(reverse=True)
self.modeTransaction=False
self.db=DbDataloader(self.modeTransaction,self.master)
self.main()
#Fonction pour lire les fichiers json deja dans le module DataAcquisition
def main(self):
choice= messagebox.askyesno("askquestion","Cliquer sur Oui pour charger les données en mode Trasactionnel")
if choice :
self.modeTransaction=True
self.db.conn.start_transaction()
self.master.title("Data Loader : Mode Transanction=OUI")
self.create_widgets()
else:
self.modeTransaction=False
self.master.title("Data Loader : Mode Transanction=NON")
self.create_widgets("Non Transanction")
def lireFichier(self):
label_welcome1 = Label(self.master,text="Prévisualiser les données",
borderwidth = 7,
width = 40,
relief="groove"
)
label_welcome1.grid(row = 1, column = 0, padx = 50)
label_welcome2 = Label(self.master,text="Selectionner le fichier pour la lecture")
label_welcome2.grid(row = 2, column = 0, )
listbox = Listbox(self.master, width=40, height=20,selectmode=SINGLE)
i=0
for fichier in self.Nomfichier:
if "2" in fichier:
listbox.insert(i, fichier)
i=i+1
def afficherObjet(Obj):
try:
os.chdir("DonneJson")
textFichier={}
with open(Obj,"r") as rf:
textFichier.update(json.load(rf))
rf.close()
if textFichier:
texte="{\n"
for key,val in textFichier.items():
b ="\t{\n"
c="\t"+str(key)+" :\n"
d=""
for key1,val1 in val.items():
d=str(d)+"\t\t"+str(key1)+" :"+" "+str(val1)+"\n"
e="\t},\n"
texte=texte+b+c+d+e
texte=texte+"}\n"
texte=texte+"\n\n\t"+str(len(textFichier))+" Objets eenregistrer dans le fichier "+Obj
os.chdir("..")
return texte
except Exception as e:
print(e)
messagebox.showerror(title="Erreur !!!", message="Fichier "+Obj+" introuvable")
def selected_item():
try:
if listbox.get(listbox.curselection()):
textes=afficherObjet(listbox.get(listbox.curselection()))
if textes:
fil = Toplevel(self.master)
# fenetre blocante : empeche l’ouverture de fenetres identiques
self.master.wait_visibility(fil)
fil.grab_set()
# end fenetre blocante
fil.geometry("600x600")
fil.title("Fichier :"+listbox.get(listbox.curselection()))
yscroll = Scrollbar(fil)
yscroll.pack(side=RIGHT, fill=Y)
xscroll = Scrollbar(fil, orient=HORIZONTAL)
xscroll.pack(side=BOTTOM, fill=X)
text1 = Text(fil,wrap=NONE,height=30, width=100,yscrollcommand=yscroll.set,
xscrollcommand=xscroll.set)
text1.config(state="normal")
text1.insert("1.0",textes)
text1.pack(side=LEFT)
yscroll.config(command=text1.yview)
xscroll.config(command=text1.xview)
fil.mainloop()
fil.quit()
except :
messagebox.showerror(title="Erreur !!!", message="Vous selectionner un fichier d`abord")
listbox.grid(row = 3, column = 0, pady =20 )
btn = Button(self.master, text='Lire Le Fichier', command=selected_item)
btn.grid(row = 3, column = 1, pady =6 )
#Fonction pour cocher les dates ensuite enregistrer vers la bases de donneef
def CaseCocher(self,mode="Transanction"):
style = Style()
style.configure('W.TButton', font =
('calibri', 15, 'bold', 'underline'),
foreground = 'red')
style.configure('G.TButton', font =
('calibri', 15, 'bold','underline'),
foreground = 'green')
#recuperer les ligne selectionnes
def getCheckDict(obj):
selectDate={}
for t in obj:
try:
selectDate[t[:7]].append(t)
except:
selectDate[t[:7]]=[]
selectDate[t[:7]].append(t)
return selectDate
def valider():
if self.tree.get_checked():
#si il choisi oui (en transanction)
choice= messagebox.askyesno("Askquestion!!!","Vous etes sur pour la validation")
if choice==True:
self.db.Alldayselected =getCheckDict(self.tree.get_checked())
if self.modeTransaction == False:
#Mode Non Transactionnel
self.db.insertCommunique()
else:
#Mode Transaction
self.db.insertCommunique()
else:
messagebox.showerror(title="Erreur !!!", message="Cocher une case au moins !!!")
def commit():
choice= messagebox.askyesno("Askquestion!!!","Vouliez-vouz faire un commit?")
if choice==True:
messagebox.showinfo("Info","Mode Commit en cours")
self.db.conn.commit()
self.db.conn.start_transaction()
def rollback():
choice= messagebox.askyesno("Askquestion!!!","Vouliez-vouz faire un rollback?")
if choice==True:
messagebox.showinfo("Info","Mode rollback en cours ")
self.db.conn.rollback()
self.db.conn.start_transaction()
label_welcomec = Label(self.master,
text="La liste des fichiers json obtenus avec leur arborescence",
borderwidth = 7,
relief="groove")
label_welcomec.grid(row = 1, column = 3, pady = 8)
vsb = Scrollbar(self.master, orient="vertical", command=self.tree.yview)
vsb.place(relx=0.978, rely=0.175, relheight=0.713, relwidth=0.020)
self.tree.configure(yscrollcommand=vsb.set)
self.tree.insert("", "end", "ALL", text="SELECT ALL")
for key,val in self.anne.items():
self.tree.insert("ALL", "end", key, text=key)
for i in val:
self.tree.insert(key,"end", i, text=i)
self.tree.grid(row = 3, column = 3, pady = 2)
button_name=Button(self.master,text="Valider",command=valider)
button_name.grid(row = 3, column = 4, pady = 2)
if mode=="Transanction":
commit_buttoon_name=Button(self.master,text="COMMIT",command=commit,style="G.TButton"
)
commit_buttoon_name.grid(row = 4, column = 3, pady = 2)
rollback_buttoon_name=Button(self.master, text = 'ROLLBACK !',
style = 'W.TButton',command=rollback)
rollback_buttoon_name.grid(row = 4, column = 4, pady = 2)
def create_widgets(self,mode="Transanction"):
self.lireFichier()
self.CaseCocher(mode)
def mains(self,obj):
obj.master.mainloop()
obj.db.conn.rollback()
|
normal
|
{
"blob_id": "a70dae504a4dfa3997a11e4c605accfab0024318",
"index": 8796,
"step-1": "<mask token>\n\n\nclass DataLoader:\n <mask token>\n\n def main(self):\n choice = messagebox.askyesno('askquestion',\n 'Cliquer sur Oui pour charger les données en mode Trasactionnel')\n if choice:\n self.modeTransaction = True\n self.db.conn.start_transaction()\n self.master.title('Data Loader : Mode Transanction=\\xadOUI')\n self.create_widgets()\n else:\n self.modeTransaction = False\n self.master.title('Data Loader : Mode Transanction=\\xadNON')\n self.create_widgets('Non Transanction')\n\n def lireFichier(self):\n label_welcome1 = Label(self.master, text=\n 'Prévisualiser les données', borderwidth=7, width=40, relief=\n 'groove')\n label_welcome1.grid(row=1, column=0, padx=50)\n label_welcome2 = Label(self.master, text=\n 'Selectionner le fichier pour la lecture')\n label_welcome2.grid(row=2, column=0)\n listbox = Listbox(self.master, width=40, height=20, selectmode=SINGLE)\n i = 0\n for fichier in self.Nomfichier:\n if '2' in fichier:\n listbox.insert(i, fichier)\n i = i + 1\n\n def afficherObjet(Obj):\n try:\n os.chdir('DonneJson')\n textFichier = {}\n with open(Obj, 'r') as rf:\n textFichier.update(json.load(rf))\n rf.close()\n if textFichier:\n texte = '{\\n'\n for key, val in textFichier.items():\n b = '\\t{\\n'\n c = '\\t' + str(key) + ' :\\n'\n d = ''\n for key1, val1 in val.items():\n d = str(d) + '\\t\\t' + str(key1) + ' :' + ' ' + str(\n val1) + '\\n'\n e = '\\t},\\n'\n texte = texte + b + c + d + e\n texte = texte + '}\\n'\n texte = texte + '\\n\\n\\t' + str(len(textFichier)\n ) + ' Objets eenregistrer dans le fichier ' + Obj\n os.chdir('..')\n return texte\n except Exception as e:\n print(e)\n messagebox.showerror(title='Erreur !!!', message='Fichier ' +\n Obj + ' introuvable')\n\n def selected_item():\n try:\n if listbox.get(listbox.curselection()):\n textes = afficherObjet(listbox.get(listbox.curselection()))\n if textes:\n fil = Toplevel(self.master)\n self.master.wait_visibility(fil)\n fil.grab_set()\n fil.geometry('600x600')\n fil.title('Fichier :' + listbox.get(listbox.\n curselection()))\n yscroll = Scrollbar(fil)\n yscroll.pack(side=RIGHT, fill=Y)\n xscroll = Scrollbar(fil, orient=HORIZONTAL)\n xscroll.pack(side=BOTTOM, fill=X)\n text1 = Text(fil, wrap=NONE, height=30, width=100,\n yscrollcommand=yscroll.set, xscrollcommand=\n xscroll.set)\n text1.config(state='normal')\n text1.insert('1.0', textes)\n text1.pack(side=LEFT)\n yscroll.config(command=text1.yview)\n xscroll.config(command=text1.xview)\n fil.mainloop()\n fil.quit()\n except:\n messagebox.showerror(title='Erreur !!!', message=\n 'Vous selectionner un fichier d`abord')\n listbox.grid(row=3, column=0, pady=20)\n btn = Button(self.master, text='Lire Le Fichier', command=selected_item\n )\n btn.grid(row=3, column=1, pady=6)\n <mask token>\n\n def create_widgets(self, mode='Transanction'):\n self.lireFichier()\n self.CaseCocher(mode)\n\n def mains(self, obj):\n obj.master.mainloop()\n obj.db.conn.rollback()\n",
"step-2": "<mask token>\n\n\nclass DataLoader:\n\n def __init__(self, master):\n self.anne = {}\n self.master = Toplevel(master)\n master.wait_visibility(self.master)\n self.master.grab_set()\n self.master.minsize(900, 680)\n self.master.resizable(width=False, height=True)\n self.tree = CheckboxTreeview(self.master, height=25)\n os.chdir('DonneJson')\n with open('DonneUtile.json', 'r') as rf:\n self.anne.update(json.load(rf))\n rf.close()\n os.chdir('..')\n self.Nomfichier = os.listdir('DonneJson')\n self.Nomfichier.sort(reverse=True)\n self.modeTransaction = False\n self.db = DbDataloader(self.modeTransaction, self.master)\n self.main()\n\n def main(self):\n choice = messagebox.askyesno('askquestion',\n 'Cliquer sur Oui pour charger les données en mode Trasactionnel')\n if choice:\n self.modeTransaction = True\n self.db.conn.start_transaction()\n self.master.title('Data Loader : Mode Transanction=\\xadOUI')\n self.create_widgets()\n else:\n self.modeTransaction = False\n self.master.title('Data Loader : Mode Transanction=\\xadNON')\n self.create_widgets('Non Transanction')\n\n def lireFichier(self):\n label_welcome1 = Label(self.master, text=\n 'Prévisualiser les données', borderwidth=7, width=40, relief=\n 'groove')\n label_welcome1.grid(row=1, column=0, padx=50)\n label_welcome2 = Label(self.master, text=\n 'Selectionner le fichier pour la lecture')\n label_welcome2.grid(row=2, column=0)\n listbox = Listbox(self.master, width=40, height=20, selectmode=SINGLE)\n i = 0\n for fichier in self.Nomfichier:\n if '2' in fichier:\n listbox.insert(i, fichier)\n i = i + 1\n\n def afficherObjet(Obj):\n try:\n os.chdir('DonneJson')\n textFichier = {}\n with open(Obj, 'r') as rf:\n textFichier.update(json.load(rf))\n rf.close()\n if textFichier:\n texte = '{\\n'\n for key, val in textFichier.items():\n b = '\\t{\\n'\n c = '\\t' + str(key) + ' :\\n'\n d = ''\n for key1, val1 in val.items():\n d = str(d) + '\\t\\t' + str(key1) + ' :' + ' ' + str(\n val1) + '\\n'\n e = '\\t},\\n'\n texte = texte + b + c + d + e\n texte = texte + '}\\n'\n texte = texte + '\\n\\n\\t' + str(len(textFichier)\n ) + ' Objets eenregistrer dans le fichier ' + Obj\n os.chdir('..')\n return texte\n except Exception as e:\n print(e)\n messagebox.showerror(title='Erreur !!!', message='Fichier ' +\n Obj + ' introuvable')\n\n def selected_item():\n try:\n if listbox.get(listbox.curselection()):\n textes = afficherObjet(listbox.get(listbox.curselection()))\n if textes:\n fil = Toplevel(self.master)\n self.master.wait_visibility(fil)\n fil.grab_set()\n fil.geometry('600x600')\n fil.title('Fichier :' + listbox.get(listbox.\n curselection()))\n yscroll = Scrollbar(fil)\n yscroll.pack(side=RIGHT, fill=Y)\n xscroll = Scrollbar(fil, orient=HORIZONTAL)\n xscroll.pack(side=BOTTOM, fill=X)\n text1 = Text(fil, wrap=NONE, height=30, width=100,\n yscrollcommand=yscroll.set, xscrollcommand=\n xscroll.set)\n text1.config(state='normal')\n text1.insert('1.0', textes)\n text1.pack(side=LEFT)\n yscroll.config(command=text1.yview)\n xscroll.config(command=text1.xview)\n fil.mainloop()\n fil.quit()\n except:\n messagebox.showerror(title='Erreur !!!', message=\n 'Vous selectionner un fichier d`abord')\n listbox.grid(row=3, column=0, pady=20)\n btn = Button(self.master, text='Lire Le Fichier', command=selected_item\n )\n btn.grid(row=3, column=1, pady=6)\n <mask token>\n\n def create_widgets(self, mode='Transanction'):\n self.lireFichier()\n self.CaseCocher(mode)\n\n def mains(self, obj):\n obj.master.mainloop()\n obj.db.conn.rollback()\n",
"step-3": "<mask token>\n\n\nclass DataLoader:\n\n def __init__(self, master):\n self.anne = {}\n self.master = Toplevel(master)\n master.wait_visibility(self.master)\n self.master.grab_set()\n self.master.minsize(900, 680)\n self.master.resizable(width=False, height=True)\n self.tree = CheckboxTreeview(self.master, height=25)\n os.chdir('DonneJson')\n with open('DonneUtile.json', 'r') as rf:\n self.anne.update(json.load(rf))\n rf.close()\n os.chdir('..')\n self.Nomfichier = os.listdir('DonneJson')\n self.Nomfichier.sort(reverse=True)\n self.modeTransaction = False\n self.db = DbDataloader(self.modeTransaction, self.master)\n self.main()\n\n def main(self):\n choice = messagebox.askyesno('askquestion',\n 'Cliquer sur Oui pour charger les données en mode Trasactionnel')\n if choice:\n self.modeTransaction = True\n self.db.conn.start_transaction()\n self.master.title('Data Loader : Mode Transanction=\\xadOUI')\n self.create_widgets()\n else:\n self.modeTransaction = False\n self.master.title('Data Loader : Mode Transanction=\\xadNON')\n self.create_widgets('Non Transanction')\n\n def lireFichier(self):\n label_welcome1 = Label(self.master, text=\n 'Prévisualiser les données', borderwidth=7, width=40, relief=\n 'groove')\n label_welcome1.grid(row=1, column=0, padx=50)\n label_welcome2 = Label(self.master, text=\n 'Selectionner le fichier pour la lecture')\n label_welcome2.grid(row=2, column=0)\n listbox = Listbox(self.master, width=40, height=20, selectmode=SINGLE)\n i = 0\n for fichier in self.Nomfichier:\n if '2' in fichier:\n listbox.insert(i, fichier)\n i = i + 1\n\n def afficherObjet(Obj):\n try:\n os.chdir('DonneJson')\n textFichier = {}\n with open(Obj, 'r') as rf:\n textFichier.update(json.load(rf))\n rf.close()\n if textFichier:\n texte = '{\\n'\n for key, val in textFichier.items():\n b = '\\t{\\n'\n c = '\\t' + str(key) + ' :\\n'\n d = ''\n for key1, val1 in val.items():\n d = str(d) + '\\t\\t' + str(key1) + ' :' + ' ' + str(\n val1) + '\\n'\n e = '\\t},\\n'\n texte = texte + b + c + d + e\n texte = texte + '}\\n'\n texte = texte + '\\n\\n\\t' + str(len(textFichier)\n ) + ' Objets eenregistrer dans le fichier ' + Obj\n os.chdir('..')\n return texte\n except Exception as e:\n print(e)\n messagebox.showerror(title='Erreur !!!', message='Fichier ' +\n Obj + ' introuvable')\n\n def selected_item():\n try:\n if listbox.get(listbox.curselection()):\n textes = afficherObjet(listbox.get(listbox.curselection()))\n if textes:\n fil = Toplevel(self.master)\n self.master.wait_visibility(fil)\n fil.grab_set()\n fil.geometry('600x600')\n fil.title('Fichier :' + listbox.get(listbox.\n curselection()))\n yscroll = Scrollbar(fil)\n yscroll.pack(side=RIGHT, fill=Y)\n xscroll = Scrollbar(fil, orient=HORIZONTAL)\n xscroll.pack(side=BOTTOM, fill=X)\n text1 = Text(fil, wrap=NONE, height=30, width=100,\n yscrollcommand=yscroll.set, xscrollcommand=\n xscroll.set)\n text1.config(state='normal')\n text1.insert('1.0', textes)\n text1.pack(side=LEFT)\n yscroll.config(command=text1.yview)\n xscroll.config(command=text1.xview)\n fil.mainloop()\n fil.quit()\n except:\n messagebox.showerror(title='Erreur !!!', message=\n 'Vous selectionner un fichier d`abord')\n listbox.grid(row=3, column=0, pady=20)\n btn = Button(self.master, text='Lire Le Fichier', command=selected_item\n )\n btn.grid(row=3, column=1, pady=6)\n\n def CaseCocher(self, mode='Transanction'):\n style = Style()\n style.configure('W.TButton', font=('calibri', 15, 'bold',\n 'underline'), foreground='red')\n style.configure('G.TButton', font=('calibri', 15, 'bold',\n 'underline'), foreground='green')\n\n def getCheckDict(obj):\n selectDate = {}\n for t in obj:\n try:\n selectDate[t[:7]].append(t)\n except:\n selectDate[t[:7]] = []\n selectDate[t[:7]].append(t)\n return selectDate\n\n def valider():\n if self.tree.get_checked():\n choice = messagebox.askyesno('Askquestion!!!',\n 'Vous etes sur pour la validation')\n if choice == True:\n self.db.Alldayselected = getCheckDict(self.tree.\n get_checked())\n if self.modeTransaction == False:\n self.db.insertCommunique()\n else:\n self.db.insertCommunique()\n else:\n messagebox.showerror(title='Erreur !!!', message=\n 'Cocher une case au moins !!!')\n\n def commit():\n choice = messagebox.askyesno('Askquestion!!!',\n 'Vouliez-vouz faire un commit?')\n if choice == True:\n messagebox.showinfo('Info', 'Mode Commit en cours')\n self.db.conn.commit()\n self.db.conn.start_transaction()\n\n def rollback():\n choice = messagebox.askyesno('Askquestion!!!',\n 'Vouliez-vouz faire un rollback?')\n if choice == True:\n messagebox.showinfo('Info', 'Mode rollback en cours ')\n self.db.conn.rollback()\n self.db.conn.start_transaction()\n label_welcomec = Label(self.master, text=\n 'La liste des fichiers json obtenus avec leur arborescence',\n borderwidth=7, relief='groove')\n label_welcomec.grid(row=1, column=3, pady=8)\n vsb = Scrollbar(self.master, orient='vertical', command=self.tree.yview\n )\n vsb.place(relx=0.978, rely=0.175, relheight=0.713, relwidth=0.02)\n self.tree.configure(yscrollcommand=vsb.set)\n self.tree.insert('', 'end', 'ALL', text='SELECT ALL')\n for key, val in self.anne.items():\n self.tree.insert('ALL', 'end', key, text=key)\n for i in val:\n self.tree.insert(key, 'end', i, text=i)\n self.tree.grid(row=3, column=3, pady=2)\n button_name = Button(self.master, text='Valider', command=valider)\n button_name.grid(row=3, column=4, pady=2)\n if mode == 'Transanction':\n commit_buttoon_name = Button(self.master, text='COMMIT',\n command=commit, style='G.TButton')\n commit_buttoon_name.grid(row=4, column=3, pady=2)\n rollback_buttoon_name = Button(self.master, text='ROLLBACK !',\n style='W.TButton', command=rollback)\n rollback_buttoon_name.grid(row=4, column=4, pady=2)\n\n def create_widgets(self, mode='Transanction'):\n self.lireFichier()\n self.CaseCocher(mode)\n\n def mains(self, obj):\n obj.master.mainloop()\n obj.db.conn.rollback()\n",
"step-4": "from ttkwidgets import CheckboxTreeview\nfrom tkinter import *\nfrom tkinter.ttk import *\nfrom tkinter import messagebox\nimport json\nimport os\nfrom DbDataloader import *\n\n\nclass DataLoader:\n\n def __init__(self, master):\n self.anne = {}\n self.master = Toplevel(master)\n master.wait_visibility(self.master)\n self.master.grab_set()\n self.master.minsize(900, 680)\n self.master.resizable(width=False, height=True)\n self.tree = CheckboxTreeview(self.master, height=25)\n os.chdir('DonneJson')\n with open('DonneUtile.json', 'r') as rf:\n self.anne.update(json.load(rf))\n rf.close()\n os.chdir('..')\n self.Nomfichier = os.listdir('DonneJson')\n self.Nomfichier.sort(reverse=True)\n self.modeTransaction = False\n self.db = DbDataloader(self.modeTransaction, self.master)\n self.main()\n\n def main(self):\n choice = messagebox.askyesno('askquestion',\n 'Cliquer sur Oui pour charger les données en mode Trasactionnel')\n if choice:\n self.modeTransaction = True\n self.db.conn.start_transaction()\n self.master.title('Data Loader : Mode Transanction=\\xadOUI')\n self.create_widgets()\n else:\n self.modeTransaction = False\n self.master.title('Data Loader : Mode Transanction=\\xadNON')\n self.create_widgets('Non Transanction')\n\n def lireFichier(self):\n label_welcome1 = Label(self.master, text=\n 'Prévisualiser les données', borderwidth=7, width=40, relief=\n 'groove')\n label_welcome1.grid(row=1, column=0, padx=50)\n label_welcome2 = Label(self.master, text=\n 'Selectionner le fichier pour la lecture')\n label_welcome2.grid(row=2, column=0)\n listbox = Listbox(self.master, width=40, height=20, selectmode=SINGLE)\n i = 0\n for fichier in self.Nomfichier:\n if '2' in fichier:\n listbox.insert(i, fichier)\n i = i + 1\n\n def afficherObjet(Obj):\n try:\n os.chdir('DonneJson')\n textFichier = {}\n with open(Obj, 'r') as rf:\n textFichier.update(json.load(rf))\n rf.close()\n if textFichier:\n texte = '{\\n'\n for key, val in textFichier.items():\n b = '\\t{\\n'\n c = '\\t' + str(key) + ' :\\n'\n d = ''\n for key1, val1 in val.items():\n d = str(d) + '\\t\\t' + str(key1) + ' :' + ' ' + str(\n val1) + '\\n'\n e = '\\t},\\n'\n texte = texte + b + c + d + e\n texte = texte + '}\\n'\n texte = texte + '\\n\\n\\t' + str(len(textFichier)\n ) + ' Objets eenregistrer dans le fichier ' + Obj\n os.chdir('..')\n return texte\n except Exception as e:\n print(e)\n messagebox.showerror(title='Erreur !!!', message='Fichier ' +\n Obj + ' introuvable')\n\n def selected_item():\n try:\n if listbox.get(listbox.curselection()):\n textes = afficherObjet(listbox.get(listbox.curselection()))\n if textes:\n fil = Toplevel(self.master)\n self.master.wait_visibility(fil)\n fil.grab_set()\n fil.geometry('600x600')\n fil.title('Fichier :' + listbox.get(listbox.\n curselection()))\n yscroll = Scrollbar(fil)\n yscroll.pack(side=RIGHT, fill=Y)\n xscroll = Scrollbar(fil, orient=HORIZONTAL)\n xscroll.pack(side=BOTTOM, fill=X)\n text1 = Text(fil, wrap=NONE, height=30, width=100,\n yscrollcommand=yscroll.set, xscrollcommand=\n xscroll.set)\n text1.config(state='normal')\n text1.insert('1.0', textes)\n text1.pack(side=LEFT)\n yscroll.config(command=text1.yview)\n xscroll.config(command=text1.xview)\n fil.mainloop()\n fil.quit()\n except:\n messagebox.showerror(title='Erreur !!!', message=\n 'Vous selectionner un fichier d`abord')\n listbox.grid(row=3, column=0, pady=20)\n btn = Button(self.master, text='Lire Le Fichier', command=selected_item\n )\n btn.grid(row=3, column=1, pady=6)\n\n def CaseCocher(self, mode='Transanction'):\n style = Style()\n style.configure('W.TButton', font=('calibri', 15, 'bold',\n 'underline'), foreground='red')\n style.configure('G.TButton', font=('calibri', 15, 'bold',\n 'underline'), foreground='green')\n\n def getCheckDict(obj):\n selectDate = {}\n for t in obj:\n try:\n selectDate[t[:7]].append(t)\n except:\n selectDate[t[:7]] = []\n selectDate[t[:7]].append(t)\n return selectDate\n\n def valider():\n if self.tree.get_checked():\n choice = messagebox.askyesno('Askquestion!!!',\n 'Vous etes sur pour la validation')\n if choice == True:\n self.db.Alldayselected = getCheckDict(self.tree.\n get_checked())\n if self.modeTransaction == False:\n self.db.insertCommunique()\n else:\n self.db.insertCommunique()\n else:\n messagebox.showerror(title='Erreur !!!', message=\n 'Cocher une case au moins !!!')\n\n def commit():\n choice = messagebox.askyesno('Askquestion!!!',\n 'Vouliez-vouz faire un commit?')\n if choice == True:\n messagebox.showinfo('Info', 'Mode Commit en cours')\n self.db.conn.commit()\n self.db.conn.start_transaction()\n\n def rollback():\n choice = messagebox.askyesno('Askquestion!!!',\n 'Vouliez-vouz faire un rollback?')\n if choice == True:\n messagebox.showinfo('Info', 'Mode rollback en cours ')\n self.db.conn.rollback()\n self.db.conn.start_transaction()\n label_welcomec = Label(self.master, text=\n 'La liste des fichiers json obtenus avec leur arborescence',\n borderwidth=7, relief='groove')\n label_welcomec.grid(row=1, column=3, pady=8)\n vsb = Scrollbar(self.master, orient='vertical', command=self.tree.yview\n )\n vsb.place(relx=0.978, rely=0.175, relheight=0.713, relwidth=0.02)\n self.tree.configure(yscrollcommand=vsb.set)\n self.tree.insert('', 'end', 'ALL', text='SELECT ALL')\n for key, val in self.anne.items():\n self.tree.insert('ALL', 'end', key, text=key)\n for i in val:\n self.tree.insert(key, 'end', i, text=i)\n self.tree.grid(row=3, column=3, pady=2)\n button_name = Button(self.master, text='Valider', command=valider)\n button_name.grid(row=3, column=4, pady=2)\n if mode == 'Transanction':\n commit_buttoon_name = Button(self.master, text='COMMIT',\n command=commit, style='G.TButton')\n commit_buttoon_name.grid(row=4, column=3, pady=2)\n rollback_buttoon_name = Button(self.master, text='ROLLBACK !',\n style='W.TButton', command=rollback)\n rollback_buttoon_name.grid(row=4, column=4, pady=2)\n\n def create_widgets(self, mode='Transanction'):\n self.lireFichier()\n self.CaseCocher(mode)\n\n def mains(self, obj):\n obj.master.mainloop()\n obj.db.conn.rollback()\n",
"step-5": "from ttkwidgets import CheckboxTreeview\nfrom tkinter import *\nfrom tkinter.ttk import *\nfrom tkinter import messagebox\nimport json\nimport os\nfrom DbDataloader import *\nclass DataLoader():\n def __init__(self,master):\n self.anne={}\n self.master=Toplevel(master)\n master.wait_visibility(self.master)\n self.master.grab_set()\n self.master.minsize(900,680)\n self.master.resizable(width=False,height=True)\n self.tree = CheckboxTreeview(self.master,height=25)\n os.chdir(\"DonneJson\")\n with open(\"DonneUtile.json\",\"r\") as rf:\n self.anne.update(json.load(rf))\n rf.close()\n os.chdir(\"..\")\n self.Nomfichier=os.listdir(\"DonneJson\")\n self.Nomfichier.sort(reverse=True)\n self.modeTransaction=False\n self.db=DbDataloader(self.modeTransaction,self.master)\n self.main()\n #Fonction pour lire les fichiers json deja dans le module DataAcquisition\n def main(self):\n choice= messagebox.askyesno(\"askquestion\",\"Cliquer sur Oui pour charger les données en mode Trasactionnel\")\n if choice :\n self.modeTransaction=True\n self.db.conn.start_transaction()\n self.master.title(\"Data Loader : Mode Transanction=OUI\")\n self.create_widgets()\n else:\n self.modeTransaction=False\n self.master.title(\"Data Loader : Mode Transanction=NON\")\n self.create_widgets(\"Non Transanction\")\n def lireFichier(self):\n label_welcome1 = Label(self.master,text=\"Prévisualiser les données\",\n borderwidth = 7,\n width = 40,\n relief=\"groove\"\n )\n label_welcome1.grid(row = 1, column = 0, padx = 50)\n label_welcome2 = Label(self.master,text=\"Selectionner le fichier pour la lecture\")\n label_welcome2.grid(row = 2, column = 0, )\n listbox = Listbox(self.master, width=40, height=20,selectmode=SINGLE)\n i=0\n for fichier in self.Nomfichier:\n if \"2\" in fichier:\n listbox.insert(i, fichier)\n i=i+1\n def afficherObjet(Obj):\n try:\n os.chdir(\"DonneJson\")\n textFichier={}\n with open(Obj,\"r\") as rf:\n textFichier.update(json.load(rf))\n rf.close() \n if textFichier:\n texte=\"{\\n\"\n for key,val in textFichier.items():\n b =\"\\t{\\n\"\n c=\"\\t\"+str(key)+\" :\\n\"\n d=\"\"\n for key1,val1 in val.items():\n d=str(d)+\"\\t\\t\"+str(key1)+\" :\"+\" \"+str(val1)+\"\\n\"\n e=\"\\t},\\n\"\n texte=texte+b+c+d+e\n texte=texte+\"}\\n\"\n texte=texte+\"\\n\\n\\t\"+str(len(textFichier))+\" Objets eenregistrer dans le fichier \"+Obj \n os.chdir(\"..\")\n return texte\n except Exception as e:\n print(e)\n messagebox.showerror(title=\"Erreur !!!\", message=\"Fichier \"+Obj+\" introuvable\")\n def selected_item():\n try:\n if listbox.get(listbox.curselection()):\n textes=afficherObjet(listbox.get(listbox.curselection()))\n if textes:\n fil = Toplevel(self.master)\n # fenetre blocante : empeche l’ouverture de fenetres identiques\n self.master.wait_visibility(fil)\n fil.grab_set()\n # end fenetre blocante\n fil.geometry(\"600x600\")\n fil.title(\"Fichier :\"+listbox.get(listbox.curselection()))\n yscroll = Scrollbar(fil)\n yscroll.pack(side=RIGHT, fill=Y)\n xscroll = Scrollbar(fil, orient=HORIZONTAL)\n xscroll.pack(side=BOTTOM, fill=X)\n text1 = Text(fil,wrap=NONE,height=30, width=100,yscrollcommand=yscroll.set,\n xscrollcommand=xscroll.set) \n text1.config(state=\"normal\")\n text1.insert(\"1.0\",textes) \n text1.pack(side=LEFT) \n yscroll.config(command=text1.yview) \n xscroll.config(command=text1.xview) \n fil.mainloop()\n fil.quit()\n except :\n messagebox.showerror(title=\"Erreur !!!\", message=\"Vous selectionner un fichier d`abord\")\n listbox.grid(row = 3, column = 0, pady =20 )\n btn = Button(self.master, text='Lire Le Fichier', command=selected_item)\n btn.grid(row = 3, column = 1, pady =6 )\n #Fonction pour cocher les dates ensuite enregistrer vers la bases de donneef\n def CaseCocher(self,mode=\"Transanction\"): \n style = Style() \n style.configure('W.TButton', font =\n ('calibri', 15, 'bold', 'underline'),\n foreground = 'red')\n style.configure('G.TButton', font =\n ('calibri', 15, 'bold','underline'),\n foreground = 'green')\n #recuperer les ligne selectionnes \n def getCheckDict(obj):\n selectDate={}\n for t in obj:\n try:\n selectDate[t[:7]].append(t)\n except:\n selectDate[t[:7]]=[]\n selectDate[t[:7]].append(t)\n return selectDate\n def valider():\n if self.tree.get_checked():\n #si il choisi oui (en transanction)\n choice= messagebox.askyesno(\"Askquestion!!!\",\"Vous etes sur pour la validation\")\n if choice==True:\n self.db.Alldayselected =getCheckDict(self.tree.get_checked())\n if self.modeTransaction == False:\n #Mode Non Transactionnel\n self.db.insertCommunique()\n else:\n #Mode Transaction\n self.db.insertCommunique() \n else:\n messagebox.showerror(title=\"Erreur !!!\", message=\"Cocher une case au moins !!!\")\n def commit():\n choice= messagebox.askyesno(\"Askquestion!!!\",\"Vouliez-vouz faire un commit?\")\n if choice==True:\n messagebox.showinfo(\"Info\",\"Mode Commit en cours\")\n self.db.conn.commit()\n self.db.conn.start_transaction()\n def rollback():\n choice= messagebox.askyesno(\"Askquestion!!!\",\"Vouliez-vouz faire un rollback?\")\n if choice==True:\n messagebox.showinfo(\"Info\",\"Mode rollback en cours \")\n self.db.conn.rollback()\n self.db.conn.start_transaction()\n label_welcomec = Label(self.master,\n text=\"La liste des fichiers json obtenus avec leur arborescence\",\n borderwidth = 7,\n relief=\"groove\")\n label_welcomec.grid(row = 1, column = 3, pady = 8)\n vsb = Scrollbar(self.master, orient=\"vertical\", command=self.tree.yview)\n vsb.place(relx=0.978, rely=0.175, relheight=0.713, relwidth=0.020)\n self.tree.configure(yscrollcommand=vsb.set)\n self.tree.insert(\"\", \"end\", \"ALL\", text=\"SELECT ALL\")\n for key,val in self.anne.items():\n self.tree.insert(\"ALL\", \"end\", key, text=key)\n for i in val:\n self.tree.insert(key,\"end\", i, text=i)\n self.tree.grid(row = 3, column = 3, pady = 2)\n button_name=Button(self.master,text=\"Valider\",command=valider)\n button_name.grid(row = 3, column = 4, pady = 2)\n if mode==\"Transanction\":\n commit_buttoon_name=Button(self.master,text=\"COMMIT\",command=commit,style=\"G.TButton\"\n )\n commit_buttoon_name.grid(row = 4, column = 3, pady = 2)\n rollback_buttoon_name=Button(self.master, text = 'ROLLBACK !',\n style = 'W.TButton',command=rollback)\n rollback_buttoon_name.grid(row = 4, column = 4, pady = 2)\n def create_widgets(self,mode=\"Transanction\"):\n self.lireFichier()\n self.CaseCocher(mode)\n def mains(self,obj):\n obj.master.mainloop()\n obj.db.conn.rollback()\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
def multi_set_symmetric_difference(sets):
return list(functools.reduce(lambda a, b: a ^ b, [set(s) for s in sets]))
def flood_iteration_plaquettes(l, plaquettes):
return set(plaquettes) | set(it.chain.from_iterable(l.plaquettes[p].
adjacent_plaquettes for p in plaquettes))
def flood_iteration_vertices(l, vertices):
return set(vertices) | set(it.chain.from_iterable(i for v in set(
vertices) for i in l.edges.indices[l.vertices.adjacent_edges[v]]))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def multi_set_symmetric_difference(sets):
return list(functools.reduce(lambda a, b: a ^ b, [set(s) for s in sets]))
def flood_iteration_plaquettes(l, plaquettes):
return set(plaquettes) | set(it.chain.from_iterable(l.plaquettes[p].
adjacent_plaquettes for p in plaquettes))
def flood_iteration_vertices(l, vertices):
return set(vertices) | set(it.chain.from_iterable(i for v in set(
vertices) for i in l.edges.indices[l.vertices.adjacent_edges[v]]))
<|reserved_special_token_0|>
matplotlib.rcParams.update({'font.size': 13, 'text.usetex': True,
'font.family': 'serif', 'font.serif': ['Computer Modern']})
matplotlib.rcParams.update({'axes.linewidth': black_line_widths})
<|reserved_special_token_0|>
subprocess.run(['mkdir', '-p', './animation'])
for n in tqdm(range(15)):
fig, axes = plt.subplots(nrows=1, ncols=2)
fig.set_size_inches(2 * w, 2 / 2 * w)
for a in axes:
a.set(xticks=[], yticks=[])
if n > 0:
vertices = flood_iteration_vertices(l, vertices)
plaquettes = flood_iteration_plaquettes(l, plaquettes)
ax = axes[0]
multi_edges = multi_set_symmetric_difference([l.vertices.adjacent_edges
[v] for v in vertices])
if multi_edges:
pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], subset=multi_edges
)
pl.plot_edges(l, ax=ax, color='k', subset=multi_edges)
pl.plot_vertices(l, ax=ax, subset=list(vertices), s=5)
pl.plot_edges(l, ax=ax, alpha=0.1)
pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], alpha=0.1)
ax.set(xticks=[], yticks=[])
ax = axes[1]
plaquette_boolean = np.array([(i in plaquettes) for i in range(l.
n_plaquettes)])
fluxes = 1 - 2 * plaquette_boolean
ujk = flux_finder.find_flux_sector(l, fluxes, ujk)
fluxes = flux_finder.fluxes_from_bonds(l, ujk)
pl.plot_edges(l, ax=ax, alpha=0.1)
pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], alpha=0.1)
pl.plot_edges(l, ax=ax, subset=ujk == -1)
if len(plaquettes) > 1:
pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], subset=ujk == -1)
pl.plot_plaquettes(l, subset=fluxes == -1, ax=ax, color_scheme=[
'orange', 'white'], alpha=0.5)
ax.set(xticks=[], yticks=[])
fig.tight_layout()
if n == 3:
fig.savefig(f'./{Path.cwd().name}.svg', transparent=True)
fig.savefig(f'./{Path.cwd().name}.pdf')
fig.savefig(f'animation/iteration_{n:03}.svg')
plt.close(fig)
subprocess.run(['magick', 'animation/*.svg', f'./{Path.cwd().name}.gif'])
subprocess.run(['convert', '-delay', '100', f'./{Path.cwd().name}.gif',
f'./{Path.cwd().name}.gif'])
subprocess.run(['rm', '-r', './animation'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def multi_set_symmetric_difference(sets):
return list(functools.reduce(lambda a, b: a ^ b, [set(s) for s in sets]))
def flood_iteration_plaquettes(l, plaquettes):
return set(plaquettes) | set(it.chain.from_iterable(l.plaquettes[p].
adjacent_plaquettes for p in plaquettes))
def flood_iteration_vertices(l, vertices):
return set(vertices) | set(it.chain.from_iterable(i for v in set(
vertices) for i in l.edges.indices[l.vertices.adjacent_edges[v]]))
column_width = 3.375
w = 3.375
black_line_widths = 1.5
matplotlib.rcParams.update({'font.size': 13, 'text.usetex': True,
'font.family': 'serif', 'font.serif': ['Computer Modern']})
matplotlib.rcParams.update({'axes.linewidth': black_line_widths})
line_colors = [to_hex(a) for a in cm.inferno([0.25, 0.5, 0.75])]
rng = np.random.default_rng(seed=10)
l, coloring, ujk = eg.make_amorphous(8, rng=rng)
plaquettes = [40]
vertices = [78]
subprocess.run(['mkdir', '-p', './animation'])
for n in tqdm(range(15)):
fig, axes = plt.subplots(nrows=1, ncols=2)
fig.set_size_inches(2 * w, 2 / 2 * w)
for a in axes:
a.set(xticks=[], yticks=[])
if n > 0:
vertices = flood_iteration_vertices(l, vertices)
plaquettes = flood_iteration_plaquettes(l, plaquettes)
ax = axes[0]
multi_edges = multi_set_symmetric_difference([l.vertices.adjacent_edges
[v] for v in vertices])
if multi_edges:
pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], subset=multi_edges
)
pl.plot_edges(l, ax=ax, color='k', subset=multi_edges)
pl.plot_vertices(l, ax=ax, subset=list(vertices), s=5)
pl.plot_edges(l, ax=ax, alpha=0.1)
pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], alpha=0.1)
ax.set(xticks=[], yticks=[])
ax = axes[1]
plaquette_boolean = np.array([(i in plaquettes) for i in range(l.
n_plaquettes)])
fluxes = 1 - 2 * plaquette_boolean
ujk = flux_finder.find_flux_sector(l, fluxes, ujk)
fluxes = flux_finder.fluxes_from_bonds(l, ujk)
pl.plot_edges(l, ax=ax, alpha=0.1)
pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], alpha=0.1)
pl.plot_edges(l, ax=ax, subset=ujk == -1)
if len(plaquettes) > 1:
pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], subset=ujk == -1)
pl.plot_plaquettes(l, subset=fluxes == -1, ax=ax, color_scheme=[
'orange', 'white'], alpha=0.5)
ax.set(xticks=[], yticks=[])
fig.tight_layout()
if n == 3:
fig.savefig(f'./{Path.cwd().name}.svg', transparent=True)
fig.savefig(f'./{Path.cwd().name}.pdf')
fig.savefig(f'animation/iteration_{n:03}.svg')
plt.close(fig)
subprocess.run(['magick', 'animation/*.svg', f'./{Path.cwd().name}.gif'])
subprocess.run(['convert', '-delay', '100', f'./{Path.cwd().name}.gif',
f'./{Path.cwd().name}.gif'])
subprocess.run(['rm', '-r', './animation'])
<|reserved_special_token_1|>
import matplotlib
from matplotlib.colors import to_hex
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
import itertools as it
from pathlib import Path
import subprocess
from tqdm import tqdm
from koala import plotting as pl
from koala import phase_diagrams as pd
from koala import pointsets, voronization, flux_finder, graph_color
from koala import example_graphs as eg
import functools
def multi_set_symmetric_difference(sets):
return list(functools.reduce(lambda a, b: a ^ b, [set(s) for s in sets]))
def flood_iteration_plaquettes(l, plaquettes):
return set(plaquettes) | set(it.chain.from_iterable(l.plaquettes[p].
adjacent_plaquettes for p in plaquettes))
def flood_iteration_vertices(l, vertices):
return set(vertices) | set(it.chain.from_iterable(i for v in set(
vertices) for i in l.edges.indices[l.vertices.adjacent_edges[v]]))
column_width = 3.375
w = 3.375
black_line_widths = 1.5
matplotlib.rcParams.update({'font.size': 13, 'text.usetex': True,
'font.family': 'serif', 'font.serif': ['Computer Modern']})
matplotlib.rcParams.update({'axes.linewidth': black_line_widths})
line_colors = [to_hex(a) for a in cm.inferno([0.25, 0.5, 0.75])]
rng = np.random.default_rng(seed=10)
l, coloring, ujk = eg.make_amorphous(8, rng=rng)
plaquettes = [40]
vertices = [78]
subprocess.run(['mkdir', '-p', './animation'])
for n in tqdm(range(15)):
fig, axes = plt.subplots(nrows=1, ncols=2)
fig.set_size_inches(2 * w, 2 / 2 * w)
for a in axes:
a.set(xticks=[], yticks=[])
if n > 0:
vertices = flood_iteration_vertices(l, vertices)
plaquettes = flood_iteration_plaquettes(l, plaquettes)
ax = axes[0]
multi_edges = multi_set_symmetric_difference([l.vertices.adjacent_edges
[v] for v in vertices])
if multi_edges:
pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], subset=multi_edges
)
pl.plot_edges(l, ax=ax, color='k', subset=multi_edges)
pl.plot_vertices(l, ax=ax, subset=list(vertices), s=5)
pl.plot_edges(l, ax=ax, alpha=0.1)
pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], alpha=0.1)
ax.set(xticks=[], yticks=[])
ax = axes[1]
plaquette_boolean = np.array([(i in plaquettes) for i in range(l.
n_plaquettes)])
fluxes = 1 - 2 * plaquette_boolean
ujk = flux_finder.find_flux_sector(l, fluxes, ujk)
fluxes = flux_finder.fluxes_from_bonds(l, ujk)
pl.plot_edges(l, ax=ax, alpha=0.1)
pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], alpha=0.1)
pl.plot_edges(l, ax=ax, subset=ujk == -1)
if len(plaquettes) > 1:
pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], subset=ujk == -1)
pl.plot_plaquettes(l, subset=fluxes == -1, ax=ax, color_scheme=[
'orange', 'white'], alpha=0.5)
ax.set(xticks=[], yticks=[])
fig.tight_layout()
if n == 3:
fig.savefig(f'./{Path.cwd().name}.svg', transparent=True)
fig.savefig(f'./{Path.cwd().name}.pdf')
fig.savefig(f'animation/iteration_{n:03}.svg')
plt.close(fig)
subprocess.run(['magick', 'animation/*.svg', f'./{Path.cwd().name}.gif'])
subprocess.run(['convert', '-delay', '100', f'./{Path.cwd().name}.gif',
f'./{Path.cwd().name}.gif'])
subprocess.run(['rm', '-r', './animation'])
<|reserved_special_token_1|>
#!/usr/bin/env python3
import matplotlib
from matplotlib.colors import to_hex
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
import itertools as it
from pathlib import Path
import subprocess
from tqdm import tqdm
from koala import plotting as pl
from koala import phase_diagrams as pd
from koala import pointsets, voronization, flux_finder, graph_color
from koala import example_graphs as eg
import functools
def multi_set_symmetric_difference(sets):
return list(functools.reduce(lambda a,b: a^b, [set(s) for s in sets]))
def flood_iteration_plaquettes(l, plaquettes):
return set(plaquettes) | set(it.chain.from_iterable(l.plaquettes[p].adjacent_plaquettes for p in plaquettes))
def flood_iteration_vertices(l, vertices):
return set(vertices) | set(it.chain.from_iterable(i for v in set(vertices) for i in l.edges.indices[l.vertices.adjacent_edges[v]]))
# imports just for this plot
column_width = 3.375
w = 3.375
black_line_widths = 1.5
matplotlib.rcParams.update({'font.size': 13, 'text.usetex': True, 'font.family': 'serif', 'font.serif': ['Computer Modern']})
matplotlib.rcParams.update({"axes.linewidth": black_line_widths})
line_colors = [to_hex(a) for a in cm.inferno([0.25, 0.5, 0.75])]
rng = np.random.default_rng(seed = 10)
l, coloring, ujk = eg.make_amorphous(8, rng = rng)
# l, coloring, ujk = eg.make_honeycomb(8)
plaquettes = [40,]
vertices = [78,]
subprocess.run(["mkdir", "-p", "./animation"])
for n in tqdm(range(15)):
fig, axes = plt.subplots(nrows=1, ncols=2)
fig.set_size_inches(2 * w, 2/2 * w)
for a in axes: a.set(xticks = [], yticks = [])
# pl.plot_vertex_indices(l, ax = ax)
# pl.plot_edge_indices(l, ax = ax)
# pl.plot_plaquette_indices(l, ax = ax)
if n > 0:
vertices = flood_iteration_vertices(l, vertices)
plaquettes = flood_iteration_plaquettes(l, plaquettes)
ax = axes[0]
multi_edges = multi_set_symmetric_difference([l.vertices.adjacent_edges[v] for v in vertices])
if multi_edges: pl.plot_dual(l, ax = ax, color_scheme = line_colors[1:], subset = multi_edges)
pl.plot_edges(l, ax = ax, color = 'k', subset = multi_edges)
pl.plot_vertices(l, ax = ax, subset = list(vertices), s = 5)
pl.plot_edges(l, ax = ax, alpha = 0.1)
pl.plot_dual(l, ax = ax, color_scheme = line_colors[1:], alpha = 0.1)
ax.set(xticks = [], yticks = [])
ax = axes[1]
plaquette_boolean = np.array([i in plaquettes for i in range(l.n_plaquettes)])
fluxes = 1 - 2*plaquette_boolean
ujk = flux_finder.find_flux_sector(l, fluxes, ujk)
fluxes = flux_finder.fluxes_from_bonds(l, ujk)
pl.plot_edges(l, ax = ax, alpha = 0.1)
pl.plot_dual(l, ax = ax, color_scheme = line_colors[1:], alpha = 0.1)
pl.plot_edges(l, ax = ax, subset = (ujk == -1))
if len(plaquettes) > 1: pl.plot_dual(l, ax = ax, color_scheme = line_colors[1:], subset = (ujk == -1), )
pl.plot_plaquettes(l, subset = fluxes == -1, ax = ax, color_scheme = ["orange", "white"], alpha = 0.5);
ax.set(xticks = [], yticks = [])
fig.tight_layout()
if n == 3:
fig.savefig(f'./{Path.cwd().name}.svg', transparent = True)
fig.savefig(f'./{Path.cwd().name}.pdf')
fig.savefig(f"animation/iteration_{n:03}.svg")
plt.close(fig)
subprocess.run(["magick", "animation/*.svg", f'./{Path.cwd().name}.gif'])
subprocess.run(["convert", "-delay", "100", f'./{Path.cwd().name}.gif', f'./{Path.cwd().name}.gif'])
subprocess.run(["rm", "-r", "./animation"])
|
flexible
|
{
"blob_id": "d429f03c0f0c241166d6c0a5a45dc1101bcaec16",
"index": 5878,
"step-1": "<mask token>\n\n\ndef multi_set_symmetric_difference(sets):\n return list(functools.reduce(lambda a, b: a ^ b, [set(s) for s in sets]))\n\n\ndef flood_iteration_plaquettes(l, plaquettes):\n return set(plaquettes) | set(it.chain.from_iterable(l.plaquettes[p].\n adjacent_plaquettes for p in plaquettes))\n\n\ndef flood_iteration_vertices(l, vertices):\n return set(vertices) | set(it.chain.from_iterable(i for v in set(\n vertices) for i in l.edges.indices[l.vertices.adjacent_edges[v]]))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef multi_set_symmetric_difference(sets):\n return list(functools.reduce(lambda a, b: a ^ b, [set(s) for s in sets]))\n\n\ndef flood_iteration_plaquettes(l, plaquettes):\n return set(plaquettes) | set(it.chain.from_iterable(l.plaquettes[p].\n adjacent_plaquettes for p in plaquettes))\n\n\ndef flood_iteration_vertices(l, vertices):\n return set(vertices) | set(it.chain.from_iterable(i for v in set(\n vertices) for i in l.edges.indices[l.vertices.adjacent_edges[v]]))\n\n\n<mask token>\nmatplotlib.rcParams.update({'font.size': 13, 'text.usetex': True,\n 'font.family': 'serif', 'font.serif': ['Computer Modern']})\nmatplotlib.rcParams.update({'axes.linewidth': black_line_widths})\n<mask token>\nsubprocess.run(['mkdir', '-p', './animation'])\nfor n in tqdm(range(15)):\n fig, axes = plt.subplots(nrows=1, ncols=2)\n fig.set_size_inches(2 * w, 2 / 2 * w)\n for a in axes:\n a.set(xticks=[], yticks=[])\n if n > 0:\n vertices = flood_iteration_vertices(l, vertices)\n plaquettes = flood_iteration_plaquettes(l, plaquettes)\n ax = axes[0]\n multi_edges = multi_set_symmetric_difference([l.vertices.adjacent_edges\n [v] for v in vertices])\n if multi_edges:\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], subset=multi_edges\n )\n pl.plot_edges(l, ax=ax, color='k', subset=multi_edges)\n pl.plot_vertices(l, ax=ax, subset=list(vertices), s=5)\n pl.plot_edges(l, ax=ax, alpha=0.1)\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], alpha=0.1)\n ax.set(xticks=[], yticks=[])\n ax = axes[1]\n plaquette_boolean = np.array([(i in plaquettes) for i in range(l.\n n_plaquettes)])\n fluxes = 1 - 2 * plaquette_boolean\n ujk = flux_finder.find_flux_sector(l, fluxes, ujk)\n fluxes = flux_finder.fluxes_from_bonds(l, ujk)\n pl.plot_edges(l, ax=ax, alpha=0.1)\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], alpha=0.1)\n pl.plot_edges(l, ax=ax, subset=ujk == -1)\n if len(plaquettes) > 1:\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], subset=ujk == -1)\n pl.plot_plaquettes(l, subset=fluxes == -1, ax=ax, color_scheme=[\n 'orange', 'white'], alpha=0.5)\n ax.set(xticks=[], yticks=[])\n fig.tight_layout()\n if n == 3:\n fig.savefig(f'./{Path.cwd().name}.svg', transparent=True)\n fig.savefig(f'./{Path.cwd().name}.pdf')\n fig.savefig(f'animation/iteration_{n:03}.svg')\n plt.close(fig)\nsubprocess.run(['magick', 'animation/*.svg', f'./{Path.cwd().name}.gif'])\nsubprocess.run(['convert', '-delay', '100', f'./{Path.cwd().name}.gif',\n f'./{Path.cwd().name}.gif'])\nsubprocess.run(['rm', '-r', './animation'])\n",
"step-3": "<mask token>\n\n\ndef multi_set_symmetric_difference(sets):\n return list(functools.reduce(lambda a, b: a ^ b, [set(s) for s in sets]))\n\n\ndef flood_iteration_plaquettes(l, plaquettes):\n return set(plaquettes) | set(it.chain.from_iterable(l.plaquettes[p].\n adjacent_plaquettes for p in plaquettes))\n\n\ndef flood_iteration_vertices(l, vertices):\n return set(vertices) | set(it.chain.from_iterable(i for v in set(\n vertices) for i in l.edges.indices[l.vertices.adjacent_edges[v]]))\n\n\ncolumn_width = 3.375\nw = 3.375\nblack_line_widths = 1.5\nmatplotlib.rcParams.update({'font.size': 13, 'text.usetex': True,\n 'font.family': 'serif', 'font.serif': ['Computer Modern']})\nmatplotlib.rcParams.update({'axes.linewidth': black_line_widths})\nline_colors = [to_hex(a) for a in cm.inferno([0.25, 0.5, 0.75])]\nrng = np.random.default_rng(seed=10)\nl, coloring, ujk = eg.make_amorphous(8, rng=rng)\nplaquettes = [40]\nvertices = [78]\nsubprocess.run(['mkdir', '-p', './animation'])\nfor n in tqdm(range(15)):\n fig, axes = plt.subplots(nrows=1, ncols=2)\n fig.set_size_inches(2 * w, 2 / 2 * w)\n for a in axes:\n a.set(xticks=[], yticks=[])\n if n > 0:\n vertices = flood_iteration_vertices(l, vertices)\n plaquettes = flood_iteration_plaquettes(l, plaquettes)\n ax = axes[0]\n multi_edges = multi_set_symmetric_difference([l.vertices.adjacent_edges\n [v] for v in vertices])\n if multi_edges:\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], subset=multi_edges\n )\n pl.plot_edges(l, ax=ax, color='k', subset=multi_edges)\n pl.plot_vertices(l, ax=ax, subset=list(vertices), s=5)\n pl.plot_edges(l, ax=ax, alpha=0.1)\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], alpha=0.1)\n ax.set(xticks=[], yticks=[])\n ax = axes[1]\n plaquette_boolean = np.array([(i in plaquettes) for i in range(l.\n n_plaquettes)])\n fluxes = 1 - 2 * plaquette_boolean\n ujk = flux_finder.find_flux_sector(l, fluxes, ujk)\n fluxes = flux_finder.fluxes_from_bonds(l, ujk)\n pl.plot_edges(l, ax=ax, alpha=0.1)\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], alpha=0.1)\n pl.plot_edges(l, ax=ax, subset=ujk == -1)\n if len(plaquettes) > 1:\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], subset=ujk == -1)\n pl.plot_plaquettes(l, subset=fluxes == -1, ax=ax, color_scheme=[\n 'orange', 'white'], alpha=0.5)\n ax.set(xticks=[], yticks=[])\n fig.tight_layout()\n if n == 3:\n fig.savefig(f'./{Path.cwd().name}.svg', transparent=True)\n fig.savefig(f'./{Path.cwd().name}.pdf')\n fig.savefig(f'animation/iteration_{n:03}.svg')\n plt.close(fig)\nsubprocess.run(['magick', 'animation/*.svg', f'./{Path.cwd().name}.gif'])\nsubprocess.run(['convert', '-delay', '100', f'./{Path.cwd().name}.gif',\n f'./{Path.cwd().name}.gif'])\nsubprocess.run(['rm', '-r', './animation'])\n",
"step-4": "import matplotlib\nfrom matplotlib.colors import to_hex\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport itertools as it\nfrom pathlib import Path\nimport subprocess\nfrom tqdm import tqdm\nfrom koala import plotting as pl\nfrom koala import phase_diagrams as pd\nfrom koala import pointsets, voronization, flux_finder, graph_color\nfrom koala import example_graphs as eg\nimport functools\n\n\ndef multi_set_symmetric_difference(sets):\n return list(functools.reduce(lambda a, b: a ^ b, [set(s) for s in sets]))\n\n\ndef flood_iteration_plaquettes(l, plaquettes):\n return set(plaquettes) | set(it.chain.from_iterable(l.plaquettes[p].\n adjacent_plaquettes for p in plaquettes))\n\n\ndef flood_iteration_vertices(l, vertices):\n return set(vertices) | set(it.chain.from_iterable(i for v in set(\n vertices) for i in l.edges.indices[l.vertices.adjacent_edges[v]]))\n\n\ncolumn_width = 3.375\nw = 3.375\nblack_line_widths = 1.5\nmatplotlib.rcParams.update({'font.size': 13, 'text.usetex': True,\n 'font.family': 'serif', 'font.serif': ['Computer Modern']})\nmatplotlib.rcParams.update({'axes.linewidth': black_line_widths})\nline_colors = [to_hex(a) for a in cm.inferno([0.25, 0.5, 0.75])]\nrng = np.random.default_rng(seed=10)\nl, coloring, ujk = eg.make_amorphous(8, rng=rng)\nplaquettes = [40]\nvertices = [78]\nsubprocess.run(['mkdir', '-p', './animation'])\nfor n in tqdm(range(15)):\n fig, axes = plt.subplots(nrows=1, ncols=2)\n fig.set_size_inches(2 * w, 2 / 2 * w)\n for a in axes:\n a.set(xticks=[], yticks=[])\n if n > 0:\n vertices = flood_iteration_vertices(l, vertices)\n plaquettes = flood_iteration_plaquettes(l, plaquettes)\n ax = axes[0]\n multi_edges = multi_set_symmetric_difference([l.vertices.adjacent_edges\n [v] for v in vertices])\n if multi_edges:\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], subset=multi_edges\n )\n pl.plot_edges(l, ax=ax, color='k', subset=multi_edges)\n pl.plot_vertices(l, ax=ax, subset=list(vertices), s=5)\n pl.plot_edges(l, ax=ax, alpha=0.1)\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], alpha=0.1)\n ax.set(xticks=[], yticks=[])\n ax = axes[1]\n plaquette_boolean = np.array([(i in plaquettes) for i in range(l.\n n_plaquettes)])\n fluxes = 1 - 2 * plaquette_boolean\n ujk = flux_finder.find_flux_sector(l, fluxes, ujk)\n fluxes = flux_finder.fluxes_from_bonds(l, ujk)\n pl.plot_edges(l, ax=ax, alpha=0.1)\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], alpha=0.1)\n pl.plot_edges(l, ax=ax, subset=ujk == -1)\n if len(plaquettes) > 1:\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], subset=ujk == -1)\n pl.plot_plaquettes(l, subset=fluxes == -1, ax=ax, color_scheme=[\n 'orange', 'white'], alpha=0.5)\n ax.set(xticks=[], yticks=[])\n fig.tight_layout()\n if n == 3:\n fig.savefig(f'./{Path.cwd().name}.svg', transparent=True)\n fig.savefig(f'./{Path.cwd().name}.pdf')\n fig.savefig(f'animation/iteration_{n:03}.svg')\n plt.close(fig)\nsubprocess.run(['magick', 'animation/*.svg', f'./{Path.cwd().name}.gif'])\nsubprocess.run(['convert', '-delay', '100', f'./{Path.cwd().name}.gif',\n f'./{Path.cwd().name}.gif'])\nsubprocess.run(['rm', '-r', './animation'])\n",
"step-5": "#!/usr/bin/env python3\n\nimport matplotlib\nfrom matplotlib.colors import to_hex\nfrom matplotlib import cm\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport itertools as it\nfrom pathlib import Path\nimport subprocess\nfrom tqdm import tqdm\n\nfrom koala import plotting as pl\nfrom koala import phase_diagrams as pd\nfrom koala import pointsets, voronization, flux_finder, graph_color\nfrom koala import example_graphs as eg\n\nimport functools\n\ndef multi_set_symmetric_difference(sets):\n return list(functools.reduce(lambda a,b: a^b, [set(s) for s in sets]))\n\ndef flood_iteration_plaquettes(l, plaquettes):\n return set(plaquettes) | set(it.chain.from_iterable(l.plaquettes[p].adjacent_plaquettes for p in plaquettes))\n\ndef flood_iteration_vertices(l, vertices):\n return set(vertices) | set(it.chain.from_iterable(i for v in set(vertices) for i in l.edges.indices[l.vertices.adjacent_edges[v]]))\n\n\n# imports just for this plot\n\ncolumn_width = 3.375\nw = 3.375\nblack_line_widths = 1.5\n\nmatplotlib.rcParams.update({'font.size': 13, 'text.usetex': True, 'font.family': 'serif', 'font.serif': ['Computer Modern']})\nmatplotlib.rcParams.update({\"axes.linewidth\": black_line_widths})\n\nline_colors = [to_hex(a) for a in cm.inferno([0.25, 0.5, 0.75])]\n\nrng = np.random.default_rng(seed = 10)\nl, coloring, ujk = eg.make_amorphous(8, rng = rng)\n# l, coloring, ujk = eg.make_honeycomb(8)\n\nplaquettes = [40,]\nvertices = [78,]\n\nsubprocess.run([\"mkdir\", \"-p\", \"./animation\"])\n\nfor n in tqdm(range(15)):\n fig, axes = plt.subplots(nrows=1, ncols=2)\n fig.set_size_inches(2 * w, 2/2 * w)\n for a in axes: a.set(xticks = [], yticks = [])\n\n # pl.plot_vertex_indices(l, ax = ax)\n # pl.plot_edge_indices(l, ax = ax)\n # pl.plot_plaquette_indices(l, ax = ax)\n \n if n > 0:\n vertices = flood_iteration_vertices(l, vertices)\n plaquettes = flood_iteration_plaquettes(l, plaquettes)\n \n ax = axes[0]\n \n multi_edges = multi_set_symmetric_difference([l.vertices.adjacent_edges[v] for v in vertices])\n \n if multi_edges: pl.plot_dual(l, ax = ax, color_scheme = line_colors[1:], subset = multi_edges)\n pl.plot_edges(l, ax = ax, color = 'k', subset = multi_edges)\n pl.plot_vertices(l, ax = ax, subset = list(vertices), s = 5)\n\n pl.plot_edges(l, ax = ax, alpha = 0.1)\n pl.plot_dual(l, ax = ax, color_scheme = line_colors[1:], alpha = 0.1)\n\n ax.set(xticks = [], yticks = [])\n \n ax = axes[1]\n\n plaquette_boolean = np.array([i in plaquettes for i in range(l.n_plaquettes)])\n\n fluxes = 1 - 2*plaquette_boolean\n ujk = flux_finder.find_flux_sector(l, fluxes, ujk)\n fluxes = flux_finder.fluxes_from_bonds(l, ujk)\n\n pl.plot_edges(l, ax = ax, alpha = 0.1)\n pl.plot_dual(l, ax = ax, color_scheme = line_colors[1:], alpha = 0.1)\n \n pl.plot_edges(l, ax = ax, subset = (ujk == -1))\n if len(plaquettes) > 1: pl.plot_dual(l, ax = ax, color_scheme = line_colors[1:], subset = (ujk == -1), )\n pl.plot_plaquettes(l, subset = fluxes == -1, ax = ax, color_scheme = [\"orange\", \"white\"], alpha = 0.5);\n ax.set(xticks = [], yticks = [])\n \n fig.tight_layout()\n if n == 3: \n fig.savefig(f'./{Path.cwd().name}.svg', transparent = True)\n fig.savefig(f'./{Path.cwd().name}.pdf')\n fig.savefig(f\"animation/iteration_{n:03}.svg\")\n plt.close(fig)\n\nsubprocess.run([\"magick\", \"animation/*.svg\", f'./{Path.cwd().name}.gif'])\nsubprocess.run([\"convert\", \"-delay\", \"100\", f'./{Path.cwd().name}.gif', f'./{Path.cwd().name}.gif'])\nsubprocess.run([\"rm\", \"-r\", \"./animation\"])",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import os
from flask import request, jsonify
from flask_api import FlaskAPI
from flask_api.exceptions import NotAcceptable
from dotenv import load_dotenv
load_dotenv(dotenv_path='./.env')
from src.service.jira import jira
from src.service.helper import helper
application = FlaskAPI(__name__)
jiraservice = jira()
helper = helper()
@application.route('/')
def hello_world():
return jsonify({'Hello': 'World'})
@application.route('/jira-issue-transition', methods=['POST'])
def jira_issue_transition_update():
if not helper.check_github_ip(src_ip=request.access_route[0]):
raise NotAcceptable('Github IP whitelist check failed! IP: {}'.format(request.access_route[0]))
response = None
if not request.json or not 'review' in request.json or not 'action' in request.json:
raise NotAcceptable('Invalid JSON')
if request.json['review']['state'] == 'changes_requested':
response = jiraservice.issue_transition_update(issue_id=request.json['pull_request']['head']['ref'],
new_transition_id=os.getenv('JIRA_TRANSITION_REJECT_ID'))
elif request.json['review']['state'] == 'approved':
response = jiraservice.issue_transition_update(issue_id=request.json['pull_request']['head']['ref'],
new_transition_id=os.getenv('JIRA_TRANSITION_APPROVED_ID'))
if request.json['review']['state'] == 'approved' or request.json['review']['state'] == 'changes_requested':
helper.save_pull_request_review(issue_id=request.json['pull_request']['head']['ref'],
pr_id=request.json['pull_request']['number'],
issue_owner_username=request.json['pull_request']['user']['login'],
reviewer_username=request.json['review']['user']['login'],
action=request.json['review']['state'])
if response:
return jsonify({'ack': 'OK'})
else:
return jsonify({'ack': 'NOT OK'})
if __name__ == '__main__':
application.run(debug=True)
|
normal
|
{
"blob_id": "72e03e7199044f3ed1d562db622a7b884fa186b0",
"index": 2206,
"step-1": "<mask token>\n\n\n@application.route('/')\ndef hello_world():\n return jsonify({'Hello': 'World'})\n\n\n<mask token>\n",
"step-2": "<mask token>\nload_dotenv(dotenv_path='./.env')\n<mask token>\n\n\n@application.route('/')\ndef hello_world():\n return jsonify({'Hello': 'World'})\n\n\n@application.route('/jira-issue-transition', methods=['POST'])\ndef jira_issue_transition_update():\n if not helper.check_github_ip(src_ip=request.access_route[0]):\n raise NotAcceptable('Github IP whitelist check failed! IP: {}'.\n format(request.access_route[0]))\n response = None\n if (not request.json or not 'review' in request.json or not 'action' in\n request.json):\n raise NotAcceptable('Invalid JSON')\n if request.json['review']['state'] == 'changes_requested':\n response = jiraservice.issue_transition_update(issue_id=request.\n json['pull_request']['head']['ref'], new_transition_id=os.\n getenv('JIRA_TRANSITION_REJECT_ID'))\n elif request.json['review']['state'] == 'approved':\n response = jiraservice.issue_transition_update(issue_id=request.\n json['pull_request']['head']['ref'], new_transition_id=os.\n getenv('JIRA_TRANSITION_APPROVED_ID'))\n if request.json['review']['state'] == 'approved' or request.json['review'][\n 'state'] == 'changes_requested':\n helper.save_pull_request_review(issue_id=request.json[\n 'pull_request']['head']['ref'], pr_id=request.json[\n 'pull_request']['number'], issue_owner_username=request.json[\n 'pull_request']['user']['login'], reviewer_username=request.\n json['review']['user']['login'], action=request.json['review'][\n 'state'])\n if response:\n return jsonify({'ack': 'OK'})\n else:\n return jsonify({'ack': 'NOT OK'})\n if __name__ == '__main__':\n application.run(debug=True)\n",
"step-3": "<mask token>\nload_dotenv(dotenv_path='./.env')\n<mask token>\napplication = FlaskAPI(__name__)\njiraservice = jira()\nhelper = helper()\n\n\n@application.route('/')\ndef hello_world():\n return jsonify({'Hello': 'World'})\n\n\n@application.route('/jira-issue-transition', methods=['POST'])\ndef jira_issue_transition_update():\n if not helper.check_github_ip(src_ip=request.access_route[0]):\n raise NotAcceptable('Github IP whitelist check failed! IP: {}'.\n format(request.access_route[0]))\n response = None\n if (not request.json or not 'review' in request.json or not 'action' in\n request.json):\n raise NotAcceptable('Invalid JSON')\n if request.json['review']['state'] == 'changes_requested':\n response = jiraservice.issue_transition_update(issue_id=request.\n json['pull_request']['head']['ref'], new_transition_id=os.\n getenv('JIRA_TRANSITION_REJECT_ID'))\n elif request.json['review']['state'] == 'approved':\n response = jiraservice.issue_transition_update(issue_id=request.\n json['pull_request']['head']['ref'], new_transition_id=os.\n getenv('JIRA_TRANSITION_APPROVED_ID'))\n if request.json['review']['state'] == 'approved' or request.json['review'][\n 'state'] == 'changes_requested':\n helper.save_pull_request_review(issue_id=request.json[\n 'pull_request']['head']['ref'], pr_id=request.json[\n 'pull_request']['number'], issue_owner_username=request.json[\n 'pull_request']['user']['login'], reviewer_username=request.\n json['review']['user']['login'], action=request.json['review'][\n 'state'])\n if response:\n return jsonify({'ack': 'OK'})\n else:\n return jsonify({'ack': 'NOT OK'})\n if __name__ == '__main__':\n application.run(debug=True)\n",
"step-4": "import os\nfrom flask import request, jsonify\nfrom flask_api import FlaskAPI\nfrom flask_api.exceptions import NotAcceptable\nfrom dotenv import load_dotenv\nload_dotenv(dotenv_path='./.env')\nfrom src.service.jira import jira\nfrom src.service.helper import helper\napplication = FlaskAPI(__name__)\njiraservice = jira()\nhelper = helper()\n\n\n@application.route('/')\ndef hello_world():\n return jsonify({'Hello': 'World'})\n\n\n@application.route('/jira-issue-transition', methods=['POST'])\ndef jira_issue_transition_update():\n if not helper.check_github_ip(src_ip=request.access_route[0]):\n raise NotAcceptable('Github IP whitelist check failed! IP: {}'.\n format(request.access_route[0]))\n response = None\n if (not request.json or not 'review' in request.json or not 'action' in\n request.json):\n raise NotAcceptable('Invalid JSON')\n if request.json['review']['state'] == 'changes_requested':\n response = jiraservice.issue_transition_update(issue_id=request.\n json['pull_request']['head']['ref'], new_transition_id=os.\n getenv('JIRA_TRANSITION_REJECT_ID'))\n elif request.json['review']['state'] == 'approved':\n response = jiraservice.issue_transition_update(issue_id=request.\n json['pull_request']['head']['ref'], new_transition_id=os.\n getenv('JIRA_TRANSITION_APPROVED_ID'))\n if request.json['review']['state'] == 'approved' or request.json['review'][\n 'state'] == 'changes_requested':\n helper.save_pull_request_review(issue_id=request.json[\n 'pull_request']['head']['ref'], pr_id=request.json[\n 'pull_request']['number'], issue_owner_username=request.json[\n 'pull_request']['user']['login'], reviewer_username=request.\n json['review']['user']['login'], action=request.json['review'][\n 'state'])\n if response:\n return jsonify({'ack': 'OK'})\n else:\n return jsonify({'ack': 'NOT OK'})\n if __name__ == '__main__':\n application.run(debug=True)\n",
"step-5": "import os\nfrom flask import request, jsonify\nfrom flask_api import FlaskAPI\nfrom flask_api.exceptions import NotAcceptable\nfrom dotenv import load_dotenv\n\nload_dotenv(dotenv_path='./.env')\nfrom src.service.jira import jira\nfrom src.service.helper import helper\n\napplication = FlaskAPI(__name__)\njiraservice = jira()\nhelper = helper()\n\n\n@application.route('/')\ndef hello_world():\n return jsonify({'Hello': 'World'})\n\n\n@application.route('/jira-issue-transition', methods=['POST'])\ndef jira_issue_transition_update():\n if not helper.check_github_ip(src_ip=request.access_route[0]):\n raise NotAcceptable('Github IP whitelist check failed! IP: {}'.format(request.access_route[0]))\n\n response = None\n if not request.json or not 'review' in request.json or not 'action' in request.json:\n raise NotAcceptable('Invalid JSON')\n\n if request.json['review']['state'] == 'changes_requested':\n response = jiraservice.issue_transition_update(issue_id=request.json['pull_request']['head']['ref'],\n new_transition_id=os.getenv('JIRA_TRANSITION_REJECT_ID'))\n elif request.json['review']['state'] == 'approved':\n response = jiraservice.issue_transition_update(issue_id=request.json['pull_request']['head']['ref'],\n new_transition_id=os.getenv('JIRA_TRANSITION_APPROVED_ID'))\n\n if request.json['review']['state'] == 'approved' or request.json['review']['state'] == 'changes_requested':\n helper.save_pull_request_review(issue_id=request.json['pull_request']['head']['ref'],\n pr_id=request.json['pull_request']['number'],\n issue_owner_username=request.json['pull_request']['user']['login'],\n reviewer_username=request.json['review']['user']['login'],\n action=request.json['review']['state'])\n\n if response:\n return jsonify({'ack': 'OK'})\n else:\n return jsonify({'ack': 'NOT OK'})\n\n if __name__ == '__main__':\n application.run(debug=True)\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import os
import sys
import random
import string
trainingData = open('./Data.txt').readlines()
# Used for storing the Markov states
table = {}
# Stores all the words
words = []
# The size of the tuple that represents the Markov State
ChainLength = 2
# Length of hte output chain
Size = int(sys.argv[1])
if(len(sys.argv) >= 3): ChainLength = int(sys.argv[2])
# Read in data and split into words
for line in trainingData:
for word in line.split():
#word = word.translate(string.maketrans("",""), string.punctuation)
words.append(word)
# For each set of words
for idx in xrange(0,len(words)-ChainLength):
# Now we have ChainLength+1 amount of words
ws = words[idx:idx+ChainLength+1]
# Construct our table
# For example Chain Lenght of 2
# A valid key val pair would be
# table[('see', 'spot')] = ['run','play']
# Indicating that if you are in teh state of ('see', 'spot') the next word has a 50% chance of being run and a 50% chance of being play
key = tuple(ws[:ChainLength])
val = ws[ChainLength]
if key in table:
table[key].append(val)
else:
table[key] = [val]
seed = random.randint(0, len(words)-ChainLength+1)
ws = words[seed:seed+ChainLength]
gen = []
for i in xrange(0,int(sys.argv[1])):
gen.append(ws[0])
# Actually find the next word randomally given the current state Ie: the tuple of words
val = random.choice(table[tuple(ws)])
ws.append(val)
ws.pop(0)
print ' '.join(gen)
|
normal
|
{
"blob_id": "379ab72f5cc74cf6ed4319fff76437ce84aaca23",
"index": 4185,
"step-1": "import os\nimport sys\nimport random\nimport string\n\n\ntrainingData = open('./Data.txt').readlines()\n\n# Used for storing the Markov states\ntable = {} \n\n# Stores all the words\nwords = []\n\n# The size of the tuple that represents the Markov State\nChainLength = 2\n# Length of hte output chain\nSize = int(sys.argv[1])\nif(len(sys.argv) >= 3): ChainLength = int(sys.argv[2])\n\n# Read in data and split into words\nfor line in trainingData:\n\tfor word in line.split():\n\t\t#word = word.translate(string.maketrans(\"\",\"\"), string.punctuation)\n\t\twords.append(word)\n# For each set of words\nfor idx in xrange(0,len(words)-ChainLength):\n\t# Now we have ChainLength+1 amount of words\n\tws = words[idx:idx+ChainLength+1]\n\t\n\t# Construct our table\n\t# For example Chain Lenght of 2\n\t# A valid key val pair would be \n # table[('see', 'spot')] = ['run','play']\n # Indicating that if you are in teh state of ('see', 'spot') the next word has a 50% chance of being run and a 50% chance of being play\n\tkey = tuple(ws[:ChainLength])\n\tval = ws[ChainLength]\n\tif key in table:\n\t\ttable[key].append(val)\n\telse:\n\t\ttable[key] = [val]\n\t\n\nseed = random.randint(0, len(words)-ChainLength+1)\nws = words[seed:seed+ChainLength] \ngen = []\nfor i in xrange(0,int(sys.argv[1])):\n\tgen.append(ws[0])\n\t# Actually find the next word randomally given the current state Ie: the tuple of words \n\tval = random.choice(table[tuple(ws)])\n\tws.append(val)\n\tws.pop(0)\n\nprint ' '.join(gen)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cv2.putText(image, 'Hello World!', (75, 290), cv2.FONT_HERSHEY_COMPLEX, 2,
(100, 170, 0), 3)
cv2.imshow('Hello World!', image)
cv2.imwrite('Text.jpg', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
image = np.zeros((512, 512, 3), np.uint8)
cv2.putText(image, 'Hello World!', (75, 290), cv2.FONT_HERSHEY_COMPLEX, 2,
(100, 170, 0), 3)
cv2.imshow('Hello World!', image)
cv2.imwrite('Text.jpg', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import cv2
import numpy as np
<|reserved_special_token_0|>
image = np.zeros((512, 512, 3), np.uint8)
cv2.putText(image, 'Hello World!', (75, 290), cv2.FONT_HERSHEY_COMPLEX, 2,
(100, 170, 0), 3)
cv2.imshow('Hello World!', image)
cv2.imwrite('Text.jpg', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import cv2
import numpy as np
"""
# Create a black image
image = np.zeros((512,512,3), np.uint8)
# Can we make this in black and white?
image_bw = np.zeros((512,512), np.uint8)
cv2.imshow("Black Rectangle (Color)", image)
cv2.imshow("Black Rectangle (B&W)", image_bw)
cv2.waitKey(0)
cv2.destroyAllWindows()
image = np.zeros((512,512,3), np.uint8)
cv2.line(image, (0,0), (511,511), (255,127,0), 5) #Start Position , End positon of a line , RGB , 5 >> Thickness
cv2.imshow("Blue Line", image)
cv2.imwrite("blueline.jpg",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
image = np.zeros((512,512,3), np.uint8)
cv2.rectangle(image, (100,100), (300,250), (127,50,127), -1)
cv2.imshow("Rectangle", image)
cv2.imwrite("Rectangle.jpg",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
image = np.zeros((512,512,3), np.uint8)
cv2.circle(image, (350, 350), 100, (15,75,50), -1)
cv2.imshow("Circle", image)
cv2.imwrite("circle.jpg",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
image = np.zeros((512,512,3), np.uint8)
# Let's define four points
pts = np.array( [[10,50], [400,50], [90,200], [50,500]], np.int32)
# Let's now reshape our points in form required by polylines
pts = pts.reshape((-1,1,2))
cv2.polylines(image, [pts], True, (0,0,255), 3)
cv2.imshow("Polygon", image)
cv2.imwrite("polygon.jpg",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
"""
image = np.zeros((512,512,3), np.uint8)
cv2.putText(image, 'Hello World!', (75,290), cv2.FONT_HERSHEY_COMPLEX, 2, (100,170,0), 3)
cv2.imshow("Hello World!", image)
cv2.imwrite("Text.jpg",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
flexible
|
{
"blob_id": "693f2a56578dfb1e4f9c73a0d33c5585070e9f9e",
"index": 5371,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncv2.putText(image, 'Hello World!', (75, 290), cv2.FONT_HERSHEY_COMPLEX, 2,\n (100, 170, 0), 3)\ncv2.imshow('Hello World!', image)\ncv2.imwrite('Text.jpg', image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nimage = np.zeros((512, 512, 3), np.uint8)\ncv2.putText(image, 'Hello World!', (75, 290), cv2.FONT_HERSHEY_COMPLEX, 2,\n (100, 170, 0), 3)\ncv2.imshow('Hello World!', image)\ncv2.imwrite('Text.jpg', image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport numpy as np\n<mask token>\nimage = np.zeros((512, 512, 3), np.uint8)\ncv2.putText(image, 'Hello World!', (75, 290), cv2.FONT_HERSHEY_COMPLEX, 2,\n (100, 170, 0), 3)\ncv2.imshow('Hello World!', image)\ncv2.imwrite('Text.jpg', image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-5": "import cv2\nimport numpy as np\n\"\"\"\n# Create a black image\nimage = np.zeros((512,512,3), np.uint8)\n\n# Can we make this in black and white?\nimage_bw = np.zeros((512,512), np.uint8)\n\ncv2.imshow(\"Black Rectangle (Color)\", image)\ncv2.imshow(\"Black Rectangle (B&W)\", image_bw)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\nimage = np.zeros((512,512,3), np.uint8)\ncv2.line(image, (0,0), (511,511), (255,127,0), 5) #Start Position , End positon of a line , RGB , 5 >> Thickness\ncv2.imshow(\"Blue Line\", image)\ncv2.imwrite(\"blueline.jpg\",image)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n\nimage = np.zeros((512,512,3), np.uint8)\n\ncv2.rectangle(image, (100,100), (300,250), (127,50,127), -1)\ncv2.imshow(\"Rectangle\", image)\ncv2.imwrite(\"Rectangle.jpg\",image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n\nimage = np.zeros((512,512,3), np.uint8)\n\ncv2.circle(image, (350, 350), 100, (15,75,50), -1) \ncv2.imshow(\"Circle\", image)\ncv2.imwrite(\"circle.jpg\",image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n\n\n\nimage = np.zeros((512,512,3), np.uint8)\n\n# Let's define four points\npts = np.array( [[10,50], [400,50], [90,200], [50,500]], np.int32)\n\n# Let's now reshape our points in form required by polylines\npts = pts.reshape((-1,1,2))\n\ncv2.polylines(image, [pts], True, (0,0,255), 3)\ncv2.imshow(\"Polygon\", image)\ncv2.imwrite(\"polygon.jpg\",image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n\"\"\"\n\nimage = np.zeros((512,512,3), np.uint8)\n\ncv2.putText(image, 'Hello World!', (75,290), cv2.FONT_HERSHEY_COMPLEX, 2, (100,170,0), 3)\ncv2.imshow(\"Hello World!\", image)\ncv2.imwrite(\"Text.jpg\",image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class HandView:
<|reserved_special_token_0|>
def __init__(self, controller, display, ruleset):
self.controller = controller
self.display = display
self.ruleset = ruleset
self.Meld_Threshold = controller._state.rules.Meld_Threshold
self.deal_size = controller._state.rules.Deal_Size
self.help_text = controller._state.rules.help_text
if ruleset == 'Liverpool':
self.buttons_per_player = self.Meld_Threshold[0][0
] + self.Meld_Threshold[0][1]
self.RuleSetsButtons = RuleSetsButtons_LP
elif ruleset == 'HandAndFoot':
self.RuleSetsButtons = RuleSetsButtons_HF
self.hand_scaling = UIC.scale, UIC.Card_Spacing
self.current_hand = []
self.last_hand = []
self.hand_info = []
self.prepared_cards = []
self.discards = []
self.discard_confirm = False
self.num_wilds = 0
self.wild_cards = []
self.selected_list = []
self.round_index = 0
self.round_advance = False
self.num_players = 1
self.need_updated_buttons = True
self.ready_color_idx = 2
self.not_ready_color_idx = 6
self.RuleSetsButtons.CreateButtons(self)
def update(self, player_index=0, num_players=1, visible_scards=[]):
"""This updates the view of the hand, between rounds it displays a message. """
self.visible_scards = visible_scards
self.controller._state.player_index = player_index
if (self.num_players > num_players and self.controller._state.rules
.Shared_Board and not self.need_updated_buttons):
self.playerLeftGame(num_players)
self.num_players = num_players
if self.controller._state.round == -1:
self.mesgBetweenRounds(self.help_text)
if self.round_advance:
self.round_index = self.round_index + 1
if self.round_index < len(self.Meld_Threshold):
self.help_text[0] = 'This is the round of ' + str(self.
Meld_Threshold[self.round_index]) + ' ! '
self.need_updated_buttons = True
else:
self.help_text = [
'Game has concluded. Scores for each round can be found in command window.'
]
self.round_advance = False
else:
if not self.round_index == self.controller._state.round:
skipped_rounds = (self.controller._state.round - self.
round_index)
for idx in range(skipped_rounds):
score = 0
self.controller.lateJoinScores(score)
self.round_index = self.controller._state.round
self.round_advance = True
self.ready_color_idx = 2
self.not_ready_color_idx = 6
self.last_hand = self.current_hand
self.current_hand = self.controller.getHand()
if len(self.current_hand) == 0:
self.hand_info = []
elif not self.last_hand == self.current_hand:
self.hand_info = HandManagement.WrapHand(self, self.
current_hand, self.hand_info)
HandManagement.ShowHolding(self, self.hand_info)
self.RuleSetsButtons.ButtonDisplay(self)
def nextEventWildsOnBoard(self):
"""This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards.
It is looking for key strokes to designate ambiguous wild cards in runs.
The mouse is ignored until you designate all the wilds (turn phase goes back to play)."""
if self.controller._state.rules.Shared_Board and self.num_wilds > 0:
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
print('pygame crash, AAAHHH')
pygame.quit()
quit()
else:
HandManagement.wildsHiLoGetInput(self)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def discardConfirmation(self, confirmed, wrapped_discards):
""" Confirm a user is sure about a discard and then perform it once confirmed."""
discards = []
for element in wrapped_discards:
discards.append(element.card)
if self.discards != discards:
confirmed = False
self.discards = discards
if not confirmed:
self.controller.note = 'Please confirm - discard ' + '{0}'.format(
self.discards)
return True
else:
if self.discard_confirm:
controller_response = self.controller.discard(self.discards)
if controller_response:
for element in wrapped_discards:
self.hand_info.remove(element)
return False
<|reserved_special_token_0|>
def labelMedium(self, labelstr, x_offset, y_offset):
font = UIC.Medium_Text
text_surface = font.render(labelstr, True, UIC.Bright_Blue)
text_rect = text_surface.get_rect()
text_rect.center = x_offset, y_offset
self.display.blit(text_surface, text_rect)
def playerLeftGame(self, num_players):
self.controller.resetProcessedCards(self.visible_scards)
self.controller.clearPreparedCards()
self.hand_info = HandManagement.ClearPreparedCardsInHandView(self.
hand_info)
self.controller.note = (
'A player has left the game, all prepared cards are automatically cleared.'
)
if num_players > 1:
players_sp_w = UIC.Disp_Width / num_players
else:
players_sp_w = UIC.Disp_Width
for idx in range(num_players):
for button in self.assign_cards_btns[idx]:
button.x = 10 + players_sp_w * idx
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class HandView:
<|reserved_special_token_0|>
def __init__(self, controller, display, ruleset):
self.controller = controller
self.display = display
self.ruleset = ruleset
self.Meld_Threshold = controller._state.rules.Meld_Threshold
self.deal_size = controller._state.rules.Deal_Size
self.help_text = controller._state.rules.help_text
if ruleset == 'Liverpool':
self.buttons_per_player = self.Meld_Threshold[0][0
] + self.Meld_Threshold[0][1]
self.RuleSetsButtons = RuleSetsButtons_LP
elif ruleset == 'HandAndFoot':
self.RuleSetsButtons = RuleSetsButtons_HF
self.hand_scaling = UIC.scale, UIC.Card_Spacing
self.current_hand = []
self.last_hand = []
self.hand_info = []
self.prepared_cards = []
self.discards = []
self.discard_confirm = False
self.num_wilds = 0
self.wild_cards = []
self.selected_list = []
self.round_index = 0
self.round_advance = False
self.num_players = 1
self.need_updated_buttons = True
self.ready_color_idx = 2
self.not_ready_color_idx = 6
self.RuleSetsButtons.CreateButtons(self)
def update(self, player_index=0, num_players=1, visible_scards=[]):
"""This updates the view of the hand, between rounds it displays a message. """
self.visible_scards = visible_scards
self.controller._state.player_index = player_index
if (self.num_players > num_players and self.controller._state.rules
.Shared_Board and not self.need_updated_buttons):
self.playerLeftGame(num_players)
self.num_players = num_players
if self.controller._state.round == -1:
self.mesgBetweenRounds(self.help_text)
if self.round_advance:
self.round_index = self.round_index + 1
if self.round_index < len(self.Meld_Threshold):
self.help_text[0] = 'This is the round of ' + str(self.
Meld_Threshold[self.round_index]) + ' ! '
self.need_updated_buttons = True
else:
self.help_text = [
'Game has concluded. Scores for each round can be found in command window.'
]
self.round_advance = False
else:
if not self.round_index == self.controller._state.round:
skipped_rounds = (self.controller._state.round - self.
round_index)
for idx in range(skipped_rounds):
score = 0
self.controller.lateJoinScores(score)
self.round_index = self.controller._state.round
self.round_advance = True
self.ready_color_idx = 2
self.not_ready_color_idx = 6
self.last_hand = self.current_hand
self.current_hand = self.controller.getHand()
if len(self.current_hand) == 0:
self.hand_info = []
elif not self.last_hand == self.current_hand:
self.hand_info = HandManagement.WrapHand(self, self.
current_hand, self.hand_info)
HandManagement.ShowHolding(self, self.hand_info)
self.RuleSetsButtons.ButtonDisplay(self)
def nextEventWildsOnBoard(self):
"""This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards.
It is looking for key strokes to designate ambiguous wild cards in runs.
The mouse is ignored until you designate all the wilds (turn phase goes back to play)."""
if self.controller._state.rules.Shared_Board and self.num_wilds > 0:
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
print('pygame crash, AAAHHH')
pygame.quit()
quit()
else:
HandManagement.wildsHiLoGetInput(self)
def nextEvent(self):
"""This submits the next user input to the controller,
In games with Shared_Board = False (e.g. HandAndFoot) key strokes don't do anything
unless designating values for prepared wild cards, at which time the mouse is ignored
unless you want to clear the prepared cards.
In games with Shared_Board = True wilds on board might change designation upon other cards being played.
IF designation cannot be handled automatically (= if wild can be at the beginning or end of a run) then
it must be designated before play is completed.
This is done in nextEvenWildsOnBoard. All other events are ignored until num_wilds == 0 OR play is canceled."""
if self.controller._state.rules.Shared_Board:
self.num_wilds = len(self.controller.unassigned_wilds_dict.keys())
if self.num_wilds > 0:
self.nextEventWildsOnBoard()
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
print('pygame crash, AAAHHH')
pygame.quit()
quit()
if (not self.controller._state.rules.Shared_Board and self.
num_wilds > 0):
wild_instructions = (
'Use the keyboard to designate your prepared wild cards \r\n '
)
wild_instructions = (wild_instructions +
'(use 0 for 10 and J, Q, or K for facecards).')
self.controller.note = wild_instructions
pos = pygame.mouse.get_pos()
if self.event.type == pygame.MOUSEBUTTONDOWN:
self.RuleSetsButtons.ClickedButton(self, pos)
for element in self.hand_info:
if element.img_clickable.isOver(pos):
if element.status == 1:
element.status = 0
element.img_clickable.changeOutline(0)
elif element.status == 0:
element.status = 1
element.img_clickable.changeOutline(2)
elif self.event.type == pygame.MOUSEMOTION:
self.RuleSetsButtons.MouseHiLight(self, pos)
HandManagement.MouseHiLight(self.hand_info, pos)
elif self.event.type == pygame.KEYDOWN:
if self.controller._state.rules.Buy_Option:
if self.controller.buying_opportunity:
if self.event.key == pygame.K_y:
self.controller.wantTopCard(True)
self.controller.note = (
'You have signaled you want to buy the card.')
elif self.event.key == pygame.K_n:
self.controller.wantTopCard(False)
self.controller.note = (
'You have signaled you do not want to buy the card.'
)
if (not self.controller._state.rules.Shared_Board and self.
num_wilds > 0):
HandManagement.ManuallyAssign(self)
def gatherSelected(self):
""" gathers selected cards
in order to take action on selected cards (either discarding them or preparing them)
"""
self.selected_list = []
for element in self.hand_info:
if element.status == 1:
self.selected_list.append(element)
return self.selected_list
def discardConfirmation(self, confirmed, wrapped_discards):
""" Confirm a user is sure about a discard and then perform it once confirmed."""
discards = []
for element in wrapped_discards:
discards.append(element.card)
if self.discards != discards:
confirmed = False
self.discards = discards
if not confirmed:
self.controller.note = 'Please confirm - discard ' + '{0}'.format(
self.discards)
return True
else:
if self.discard_confirm:
controller_response = self.controller.discard(self.discards)
if controller_response:
for element in wrapped_discards:
self.hand_info.remove(element)
return False
<|reserved_special_token_0|>
def labelMedium(self, labelstr, x_offset, y_offset):
font = UIC.Medium_Text
text_surface = font.render(labelstr, True, UIC.Bright_Blue)
text_rect = text_surface.get_rect()
text_rect.center = x_offset, y_offset
self.display.blit(text_surface, text_rect)
def playerLeftGame(self, num_players):
self.controller.resetProcessedCards(self.visible_scards)
self.controller.clearPreparedCards()
self.hand_info = HandManagement.ClearPreparedCardsInHandView(self.
hand_info)
self.controller.note = (
'A player has left the game, all prepared cards are automatically cleared.'
)
if num_players > 1:
players_sp_w = UIC.Disp_Width / num_players
else:
players_sp_w = UIC.Disp_Width
for idx in range(num_players):
for button in self.assign_cards_btns[idx]:
button.x = 10 + players_sp_w * idx
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class HandView:
"""This class handles player's cards and enables actions.
Actions are primarily performed using buttons, since these need to somewhat customized by game
the buttons are in ***.py (*** is Liverpool or HandAndFoot) and are imported as RuleSetsButtons.
Management of displaying the hand's cards is not game specific, and methods that help with that
are in HandManagement.py.
Player can arrange their own hand, and prepare to play cards during other players' turns.
"""
def __init__(self, controller, display, ruleset):
self.controller = controller
self.display = display
self.ruleset = ruleset
self.Meld_Threshold = controller._state.rules.Meld_Threshold
self.deal_size = controller._state.rules.Deal_Size
self.help_text = controller._state.rules.help_text
if ruleset == 'Liverpool':
self.buttons_per_player = self.Meld_Threshold[0][0
] + self.Meld_Threshold[0][1]
self.RuleSetsButtons = RuleSetsButtons_LP
elif ruleset == 'HandAndFoot':
self.RuleSetsButtons = RuleSetsButtons_HF
self.hand_scaling = UIC.scale, UIC.Card_Spacing
self.current_hand = []
self.last_hand = []
self.hand_info = []
self.prepared_cards = []
self.discards = []
self.discard_confirm = False
self.num_wilds = 0
self.wild_cards = []
self.selected_list = []
self.round_index = 0
self.round_advance = False
self.num_players = 1
self.need_updated_buttons = True
self.ready_color_idx = 2
self.not_ready_color_idx = 6
self.RuleSetsButtons.CreateButtons(self)
def update(self, player_index=0, num_players=1, visible_scards=[]):
"""This updates the view of the hand, between rounds it displays a message. """
self.visible_scards = visible_scards
self.controller._state.player_index = player_index
if (self.num_players > num_players and self.controller._state.rules
.Shared_Board and not self.need_updated_buttons):
self.playerLeftGame(num_players)
self.num_players = num_players
if self.controller._state.round == -1:
self.mesgBetweenRounds(self.help_text)
if self.round_advance:
self.round_index = self.round_index + 1
if self.round_index < len(self.Meld_Threshold):
self.help_text[0] = 'This is the round of ' + str(self.
Meld_Threshold[self.round_index]) + ' ! '
self.need_updated_buttons = True
else:
self.help_text = [
'Game has concluded. Scores for each round can be found in command window.'
]
self.round_advance = False
else:
if not self.round_index == self.controller._state.round:
skipped_rounds = (self.controller._state.round - self.
round_index)
for idx in range(skipped_rounds):
score = 0
self.controller.lateJoinScores(score)
self.round_index = self.controller._state.round
self.round_advance = True
self.ready_color_idx = 2
self.not_ready_color_idx = 6
self.last_hand = self.current_hand
self.current_hand = self.controller.getHand()
if len(self.current_hand) == 0:
self.hand_info = []
elif not self.last_hand == self.current_hand:
self.hand_info = HandManagement.WrapHand(self, self.
current_hand, self.hand_info)
HandManagement.ShowHolding(self, self.hand_info)
self.RuleSetsButtons.ButtonDisplay(self)
def nextEventWildsOnBoard(self):
"""This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards.
It is looking for key strokes to designate ambiguous wild cards in runs.
The mouse is ignored until you designate all the wilds (turn phase goes back to play)."""
if self.controller._state.rules.Shared_Board and self.num_wilds > 0:
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
print('pygame crash, AAAHHH')
pygame.quit()
quit()
else:
HandManagement.wildsHiLoGetInput(self)
def nextEvent(self):
"""This submits the next user input to the controller,
In games with Shared_Board = False (e.g. HandAndFoot) key strokes don't do anything
unless designating values for prepared wild cards, at which time the mouse is ignored
unless you want to clear the prepared cards.
In games with Shared_Board = True wilds on board might change designation upon other cards being played.
IF designation cannot be handled automatically (= if wild can be at the beginning or end of a run) then
it must be designated before play is completed.
This is done in nextEvenWildsOnBoard. All other events are ignored until num_wilds == 0 OR play is canceled."""
if self.controller._state.rules.Shared_Board:
self.num_wilds = len(self.controller.unassigned_wilds_dict.keys())
if self.num_wilds > 0:
self.nextEventWildsOnBoard()
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
print('pygame crash, AAAHHH')
pygame.quit()
quit()
if (not self.controller._state.rules.Shared_Board and self.
num_wilds > 0):
wild_instructions = (
'Use the keyboard to designate your prepared wild cards \r\n '
)
wild_instructions = (wild_instructions +
'(use 0 for 10 and J, Q, or K for facecards).')
self.controller.note = wild_instructions
pos = pygame.mouse.get_pos()
if self.event.type == pygame.MOUSEBUTTONDOWN:
self.RuleSetsButtons.ClickedButton(self, pos)
for element in self.hand_info:
if element.img_clickable.isOver(pos):
if element.status == 1:
element.status = 0
element.img_clickable.changeOutline(0)
elif element.status == 0:
element.status = 1
element.img_clickable.changeOutline(2)
elif self.event.type == pygame.MOUSEMOTION:
self.RuleSetsButtons.MouseHiLight(self, pos)
HandManagement.MouseHiLight(self.hand_info, pos)
elif self.event.type == pygame.KEYDOWN:
if self.controller._state.rules.Buy_Option:
if self.controller.buying_opportunity:
if self.event.key == pygame.K_y:
self.controller.wantTopCard(True)
self.controller.note = (
'You have signaled you want to buy the card.')
elif self.event.key == pygame.K_n:
self.controller.wantTopCard(False)
self.controller.note = (
'You have signaled you do not want to buy the card.'
)
if (not self.controller._state.rules.Shared_Board and self.
num_wilds > 0):
HandManagement.ManuallyAssign(self)
def gatherSelected(self):
""" gathers selected cards
in order to take action on selected cards (either discarding them or preparing them)
"""
self.selected_list = []
for element in self.hand_info:
if element.status == 1:
self.selected_list.append(element)
return self.selected_list
def discardConfirmation(self, confirmed, wrapped_discards):
""" Confirm a user is sure about a discard and then perform it once confirmed."""
discards = []
for element in wrapped_discards:
discards.append(element.card)
if self.discards != discards:
confirmed = False
self.discards = discards
if not confirmed:
self.controller.note = 'Please confirm - discard ' + '{0}'.format(
self.discards)
return True
else:
if self.discard_confirm:
controller_response = self.controller.discard(self.discards)
if controller_response:
for element in wrapped_discards:
self.hand_info.remove(element)
return False
def mesgBetweenRounds(self, message):
"""print message where cards usually displayed until Ready button is clicked for next round."""
font = UIC.Medium_Text
y_offset = UIC.Disp_Height * (1 - UIC.Hand_Row_Fraction * 0.8)
for message_string in message:
text_surface = font.render(message_string, True, UIC.Black)
text_rect = text_surface.get_rect()
text_rect.center = UIC.Disp_Width * 0.5, y_offset
y_offset = y_offset + UIC.Medium_Text_Feed
self.display.blit(text_surface, text_rect)
def labelMedium(self, labelstr, x_offset, y_offset):
font = UIC.Medium_Text
text_surface = font.render(labelstr, True, UIC.Bright_Blue)
text_rect = text_surface.get_rect()
text_rect.center = x_offset, y_offset
self.display.blit(text_surface, text_rect)
def playerLeftGame(self, num_players):
self.controller.resetProcessedCards(self.visible_scards)
self.controller.clearPreparedCards()
self.hand_info = HandManagement.ClearPreparedCardsInHandView(self.
hand_info)
self.controller.note = (
'A player has left the game, all prepared cards are automatically cleared.'
)
if num_players > 1:
players_sp_w = UIC.Disp_Width / num_players
else:
players_sp_w = UIC.Disp_Width
for idx in range(num_players):
for button in self.assign_cards_btns[idx]:
button.x = 10 + players_sp_w * idx
<|reserved_special_token_1|>
import pygame
import textwrap
import client.Button as Btn
from client.ClickableImage import ClickableImage as ClickImg
from client.CreateDisplay import CreateDisplay
import client.LiverpoolButtons as RuleSetsButtons_LP
import client.HandAndFootButtons as RuleSetsButtons_HF
import client.HandManagement as HandManagement
from client.UICardWrapper import UICardWrapper
import client.UIConstants as UIC
from common.Card import Card
class HandView:
"""This class handles player's cards and enables actions.
Actions are primarily performed using buttons, since these need to somewhat customized by game
the buttons are in ***.py (*** is Liverpool or HandAndFoot) and are imported as RuleSetsButtons.
Management of displaying the hand's cards is not game specific, and methods that help with that
are in HandManagement.py.
Player can arrange their own hand, and prepare to play cards during other players' turns.
"""
def __init__(self, controller, display, ruleset):
self.controller = controller
self.display = display
self.ruleset = ruleset
self.Meld_Threshold = controller._state.rules.Meld_Threshold
self.deal_size = controller._state.rules.Deal_Size
self.help_text = controller._state.rules.help_text
if ruleset == 'Liverpool':
self.buttons_per_player = self.Meld_Threshold[0][0
] + self.Meld_Threshold[0][1]
self.RuleSetsButtons = RuleSetsButtons_LP
elif ruleset == 'HandAndFoot':
self.RuleSetsButtons = RuleSetsButtons_HF
self.hand_scaling = UIC.scale, UIC.Card_Spacing
self.current_hand = []
self.last_hand = []
self.hand_info = []
self.prepared_cards = []
self.discards = []
self.discard_confirm = False
self.num_wilds = 0
self.wild_cards = []
self.selected_list = []
self.round_index = 0
self.round_advance = False
self.num_players = 1
self.need_updated_buttons = True
self.ready_color_idx = 2
self.not_ready_color_idx = 6
self.RuleSetsButtons.CreateButtons(self)
def update(self, player_index=0, num_players=1, visible_scards=[]):
"""This updates the view of the hand, between rounds it displays a message. """
self.visible_scards = visible_scards
self.controller._state.player_index = player_index
if (self.num_players > num_players and self.controller._state.rules
.Shared_Board and not self.need_updated_buttons):
self.playerLeftGame(num_players)
self.num_players = num_players
if self.controller._state.round == -1:
self.mesgBetweenRounds(self.help_text)
if self.round_advance:
self.round_index = self.round_index + 1
if self.round_index < len(self.Meld_Threshold):
self.help_text[0] = 'This is the round of ' + str(self.
Meld_Threshold[self.round_index]) + ' ! '
self.need_updated_buttons = True
else:
self.help_text = [
'Game has concluded. Scores for each round can be found in command window.'
]
self.round_advance = False
else:
if not self.round_index == self.controller._state.round:
skipped_rounds = (self.controller._state.round - self.
round_index)
for idx in range(skipped_rounds):
score = 0
self.controller.lateJoinScores(score)
self.round_index = self.controller._state.round
self.round_advance = True
self.ready_color_idx = 2
self.not_ready_color_idx = 6
self.last_hand = self.current_hand
self.current_hand = self.controller.getHand()
if len(self.current_hand) == 0:
self.hand_info = []
elif not self.last_hand == self.current_hand:
self.hand_info = HandManagement.WrapHand(self, self.
current_hand, self.hand_info)
HandManagement.ShowHolding(self, self.hand_info)
self.RuleSetsButtons.ButtonDisplay(self)
def nextEventWildsOnBoard(self):
"""This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards.
It is looking for key strokes to designate ambiguous wild cards in runs.
The mouse is ignored until you designate all the wilds (turn phase goes back to play)."""
if self.controller._state.rules.Shared_Board and self.num_wilds > 0:
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
print('pygame crash, AAAHHH')
pygame.quit()
quit()
else:
HandManagement.wildsHiLoGetInput(self)
def nextEvent(self):
"""This submits the next user input to the controller,
In games with Shared_Board = False (e.g. HandAndFoot) key strokes don't do anything
unless designating values for prepared wild cards, at which time the mouse is ignored
unless you want to clear the prepared cards.
In games with Shared_Board = True wilds on board might change designation upon other cards being played.
IF designation cannot be handled automatically (= if wild can be at the beginning or end of a run) then
it must be designated before play is completed.
This is done in nextEvenWildsOnBoard. All other events are ignored until num_wilds == 0 OR play is canceled."""
if self.controller._state.rules.Shared_Board:
self.num_wilds = len(self.controller.unassigned_wilds_dict.keys())
if self.num_wilds > 0:
self.nextEventWildsOnBoard()
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
print('pygame crash, AAAHHH')
pygame.quit()
quit()
if (not self.controller._state.rules.Shared_Board and self.
num_wilds > 0):
wild_instructions = (
'Use the keyboard to designate your prepared wild cards \r\n '
)
wild_instructions = (wild_instructions +
'(use 0 for 10 and J, Q, or K for facecards).')
self.controller.note = wild_instructions
pos = pygame.mouse.get_pos()
if self.event.type == pygame.MOUSEBUTTONDOWN:
self.RuleSetsButtons.ClickedButton(self, pos)
for element in self.hand_info:
if element.img_clickable.isOver(pos):
if element.status == 1:
element.status = 0
element.img_clickable.changeOutline(0)
elif element.status == 0:
element.status = 1
element.img_clickable.changeOutline(2)
elif self.event.type == pygame.MOUSEMOTION:
self.RuleSetsButtons.MouseHiLight(self, pos)
HandManagement.MouseHiLight(self.hand_info, pos)
elif self.event.type == pygame.KEYDOWN:
if self.controller._state.rules.Buy_Option:
if self.controller.buying_opportunity:
if self.event.key == pygame.K_y:
self.controller.wantTopCard(True)
self.controller.note = (
'You have signaled you want to buy the card.')
elif self.event.key == pygame.K_n:
self.controller.wantTopCard(False)
self.controller.note = (
'You have signaled you do not want to buy the card.'
)
if (not self.controller._state.rules.Shared_Board and self.
num_wilds > 0):
HandManagement.ManuallyAssign(self)
def gatherSelected(self):
""" gathers selected cards
in order to take action on selected cards (either discarding them or preparing them)
"""
self.selected_list = []
for element in self.hand_info:
if element.status == 1:
self.selected_list.append(element)
return self.selected_list
def discardConfirmation(self, confirmed, wrapped_discards):
""" Confirm a user is sure about a discard and then perform it once confirmed."""
discards = []
for element in wrapped_discards:
discards.append(element.card)
if self.discards != discards:
confirmed = False
self.discards = discards
if not confirmed:
self.controller.note = 'Please confirm - discard ' + '{0}'.format(
self.discards)
return True
else:
if self.discard_confirm:
controller_response = self.controller.discard(self.discards)
if controller_response:
for element in wrapped_discards:
self.hand_info.remove(element)
return False
def mesgBetweenRounds(self, message):
"""print message where cards usually displayed until Ready button is clicked for next round."""
font = UIC.Medium_Text
y_offset = UIC.Disp_Height * (1 - UIC.Hand_Row_Fraction * 0.8)
for message_string in message:
text_surface = font.render(message_string, True, UIC.Black)
text_rect = text_surface.get_rect()
text_rect.center = UIC.Disp_Width * 0.5, y_offset
y_offset = y_offset + UIC.Medium_Text_Feed
self.display.blit(text_surface, text_rect)
def labelMedium(self, labelstr, x_offset, y_offset):
font = UIC.Medium_Text
text_surface = font.render(labelstr, True, UIC.Bright_Blue)
text_rect = text_surface.get_rect()
text_rect.center = x_offset, y_offset
self.display.blit(text_surface, text_rect)
def playerLeftGame(self, num_players):
self.controller.resetProcessedCards(self.visible_scards)
self.controller.clearPreparedCards()
self.hand_info = HandManagement.ClearPreparedCardsInHandView(self.
hand_info)
self.controller.note = (
'A player has left the game, all prepared cards are automatically cleared.'
)
if num_players > 1:
players_sp_w = UIC.Disp_Width / num_players
else:
players_sp_w = UIC.Disp_Width
for idx in range(num_players):
for button in self.assign_cards_btns[idx]:
button.x = 10 + players_sp_w * idx
<|reserved_special_token_1|>
import pygame
import textwrap
import client.Button as Btn
from client.ClickableImage import ClickableImage as ClickImg
from client.CreateDisplay import CreateDisplay
import client.LiverpoolButtons as RuleSetsButtons_LP
import client.HandAndFootButtons as RuleSetsButtons_HF
import client.HandManagement as HandManagement
from client.UICardWrapper import UICardWrapper
import client.UIConstants as UIC
from common.Card import Card
class HandView:
"""This class handles player's cards and enables actions.
Actions are primarily performed using buttons, since these need to somewhat customized by game
the buttons are in ***.py (*** is Liverpool or HandAndFoot) and are imported as RuleSetsButtons.
Management of displaying the hand's cards is not game specific, and methods that help with that
are in HandManagement.py.
Player can arrange their own hand, and prepare to play cards during other players' turns.
"""
def __init__(self, controller, display, ruleset):
self.controller = controller
self.display = display
self.ruleset = ruleset
self.Meld_Threshold = controller._state.rules.Meld_Threshold
self.deal_size = controller._state.rules.Deal_Size
self.help_text = controller._state.rules.help_text
if ruleset == 'Liverpool':
self.buttons_per_player = self.Meld_Threshold[0][0] + self.Meld_Threshold[0][1]
self.RuleSetsButtons = RuleSetsButtons_LP
elif ruleset == 'HandAndFoot':
self.RuleSetsButtons = RuleSetsButtons_HF
self.hand_scaling = (UIC.scale, UIC.Card_Spacing)
self.current_hand = []
self.last_hand = []
self.hand_info = [] # will contain UICardWrapped elements of current_hand
self.prepared_cards = [] # will contain list of prepared cards from controller
self.discards = []
self.discard_confirm = False
# num_wilds is HandAndFoot specific, only non-zero if by prepare_card_btn in HandAndFootButtons.py is triggered.
self.num_wilds = 0
self.wild_cards = []
self.selected_list = []
self.round_index = 0
self.round_advance = False
self.num_players = 1
# In Liverpool and other Shared_Board games: prepare cards buttons must be updated each round
self.need_updated_buttons = True
self.ready_color_idx = 2
self.not_ready_color_idx = 6
#
# if someone joins between rounds, then they won't know the correct meld requirement until the round begins.
# (self.controller._state.round = -1 until play commences).
# In HandAndFoot: Correct meld requirement will be written in lower right corner once play commences.
# In Liverpool: Will see correct buttons once round commences.
self.RuleSetsButtons.CreateButtons(self)
def update(self, player_index=0, num_players=1, visible_scards = []):
"""This updates the view of the hand, between rounds it displays a message. """
self.visible_scards = visible_scards
self.controller._state.player_index = player_index
if self.num_players > num_players and self.controller._state.rules.Shared_Board \
and not self.need_updated_buttons:
# A player has left the game after the round has begun -- make adjustments so game can continue.
self.playerLeftGame(num_players)
self.num_players = num_players
if self.controller._state.round == -1:
self.mesgBetweenRounds(self.help_text)
if self.round_advance:
self.round_index = self.round_index + 1
if self.round_index < len(self.Meld_Threshold):
self.help_text[0] = 'This is the round of ' + str(self.Meld_Threshold[self.round_index]) + ' ! '
self.need_updated_buttons = True # used for Liverpool.
else:
self.help_text = ['Game has concluded. Scores for each round can be found in command window.']
self.round_advance = False
else:
if not self.round_index == self.controller._state.round:
# Need this to true up round_index if a player joins mid-game.
skipped_rounds = self.controller._state.round - self.round_index
for idx in range(skipped_rounds):
#todo: How to score latecomers should be moved to ruleset.
score = 0
self.controller.lateJoinScores(score)
self.round_index = self.controller._state.round
self.round_advance = True
# reset outline colors on ready buttons to what they need to be at the start of the "between rounds" state.
self.ready_color_idx = 2
self.not_ready_color_idx = 6
self.last_hand = self.current_hand
self.current_hand = self.controller.getHand()
if len(self.current_hand) == 0:
self.hand_info = []
elif not self.last_hand == self.current_hand:
self.hand_info = HandManagement.WrapHand(self, self.current_hand, self.hand_info)
HandManagement.ShowHolding(self, self.hand_info) # displays hand
self.RuleSetsButtons.ButtonDisplay(self)
def nextEventWildsOnBoard(self):
"""This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards.
It is looking for key strokes to designate ambiguous wild cards in runs.
The mouse is ignored until you designate all the wilds (turn phase goes back to play)."""
if self.controller._state.rules.Shared_Board and self.num_wilds > 0:
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
# The window crashed, we should handle this
print("pygame crash, AAAHHH")
pygame.quit()
quit()
else:
# in Shared_Board games, check if there are wilds that need to be updated.
# All other events are ignored until play is finished.
HandManagement.wildsHiLoGetInput(self)
def nextEvent(self):
"""This submits the next user input to the controller,
In games with Shared_Board = False (e.g. HandAndFoot) key strokes don't do anything
unless designating values for prepared wild cards, at which time the mouse is ignored
unless you want to clear the prepared cards.
In games with Shared_Board = True wilds on board might change designation upon other cards being played.
IF designation cannot be handled automatically (= if wild can be at the beginning or end of a run) then
it must be designated before play is completed.
This is done in nextEvenWildsOnBoard. All other events are ignored until num_wilds == 0 OR play is canceled."""
if self.controller._state.rules.Shared_Board:
self.num_wilds = len(self.controller.unassigned_wilds_dict.keys())
if self.num_wilds > 0:
self.nextEventWildsOnBoard()
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
# The window crashed, we should handle this
print("pygame crash, AAAHHH")
pygame.quit()
quit()
if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:
wild_instructions = 'Use the keyboard to designate your prepared wild cards \r\n '
wild_instructions = wild_instructions + '(use 0 for 10 and J, Q, or K for facecards).'
self.controller.note = wild_instructions
pos = pygame.mouse.get_pos()
if self.event.type == pygame.MOUSEBUTTONDOWN:
self.RuleSetsButtons.ClickedButton(self, pos)
for element in self.hand_info:
# cannot select prepared cards, so not included in logic below.
if element.img_clickable.isOver(pos):
if element.status == 1:
element.status = 0
element.img_clickable.changeOutline(0)
elif element.status == 0:
element.status = 1
element.img_clickable.changeOutline(2)
elif self.event.type == pygame.MOUSEMOTION:
self.RuleSetsButtons.MouseHiLight(self, pos)
HandManagement.MouseHiLight(self.hand_info, pos)
elif self.event.type == pygame.KEYDOWN:
if self.controller._state.rules.Buy_Option:
if self.controller.buying_opportunity:
if self.event.key == pygame.K_y:
self.controller.wantTopCard(True)
self.controller.note = 'You have signaled you want to buy the card.'
elif self.event.key == pygame.K_n:
self.controller.wantTopCard(False)
self.controller.note = 'You have signaled you do not want to buy the card.'
if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:
HandManagement.ManuallyAssign(self)
def gatherSelected(self):
""" gathers selected cards
in order to take action on selected cards (either discarding them or preparing them)
"""
self.selected_list = []
for element in self.hand_info:
if element.status == 1:
self.selected_list.append(element)
return self.selected_list
def discardConfirmation(self, confirmed, wrapped_discards):
""" Confirm a user is sure about a discard and then perform it once confirmed."""
discards = []
for element in wrapped_discards:
discards.append(element.card)
if self.discards != discards:
confirmed = False
self.discards = discards
if not confirmed:
self.controller.note = "Please confirm - discard " + "{0}".format(self.discards)
return True # ask for confirmation
else:
# confirmed is True, performing discard and removing discarded wrapped cards from hand_info.
if self.discard_confirm:
controller_response = self.controller.discard(self.discards)
if controller_response:
for element in wrapped_discards:
self.hand_info.remove(element)
return False # now that this is done, we don't have anything waiting on confirmation
def mesgBetweenRounds(self, message):
"""print message where cards usually displayed until Ready button is clicked for next round."""
font = UIC.Medium_Text
y_offset = (UIC.Disp_Height * (1 - (UIC.Hand_Row_Fraction * 0.8)))
for message_string in message:
text_surface = font.render(message_string, True, UIC.Black)
text_rect = text_surface.get_rect()
text_rect.center = ((UIC.Disp_Width * 0.5), y_offset)
y_offset = y_offset + UIC.Medium_Text_Feed
self.display.blit(text_surface, text_rect)
def labelMedium(self, labelstr, x_offset, y_offset):
font = UIC.Medium_Text
text_surface = font.render(labelstr, True, UIC.Bright_Blue)
text_rect = text_surface.get_rect()
text_rect.center = (x_offset, y_offset)
self.display.blit(text_surface, text_rect)
def playerLeftGame(self, num_players):
# a player has disconnected a game with a Shared_Board = True. Must make adjustments to
# (i) card group dictionaries, (ii) prepared cards & (iii) buttons locations.
self.controller.resetProcessedCards(self.visible_scards)
self.controller.clearPreparedCards() # so that prepared cards won't be mistakenly played on wrong group.
self.hand_info = HandManagement.ClearPreparedCardsInHandView(self.hand_info)
self.controller.note = "A player has left the game, all prepared cards are automatically cleared."
# reset set/run button locations:
if num_players > 1:
players_sp_w = UIC.Disp_Width / num_players
else:
players_sp_w = UIC.Disp_Width
for idx in range(num_players):
for button in self.assign_cards_btns[idx]:
button.x = 10 + (players_sp_w * idx)
|
flexible
|
{
"blob_id": "1cdd315eec6792a8588dc2e6a221bc024be47078",
"index": 7885,
"step-1": "<mask token>\n\n\nclass HandView:\n <mask token>\n\n def __init__(self, controller, display, ruleset):\n self.controller = controller\n self.display = display\n self.ruleset = ruleset\n self.Meld_Threshold = controller._state.rules.Meld_Threshold\n self.deal_size = controller._state.rules.Deal_Size\n self.help_text = controller._state.rules.help_text\n if ruleset == 'Liverpool':\n self.buttons_per_player = self.Meld_Threshold[0][0\n ] + self.Meld_Threshold[0][1]\n self.RuleSetsButtons = RuleSetsButtons_LP\n elif ruleset == 'HandAndFoot':\n self.RuleSetsButtons = RuleSetsButtons_HF\n self.hand_scaling = UIC.scale, UIC.Card_Spacing\n self.current_hand = []\n self.last_hand = []\n self.hand_info = []\n self.prepared_cards = []\n self.discards = []\n self.discard_confirm = False\n self.num_wilds = 0\n self.wild_cards = []\n self.selected_list = []\n self.round_index = 0\n self.round_advance = False\n self.num_players = 1\n self.need_updated_buttons = True\n self.ready_color_idx = 2\n self.not_ready_color_idx = 6\n self.RuleSetsButtons.CreateButtons(self)\n\n def update(self, player_index=0, num_players=1, visible_scards=[]):\n \"\"\"This updates the view of the hand, between rounds it displays a message. \"\"\"\n self.visible_scards = visible_scards\n self.controller._state.player_index = player_index\n if (self.num_players > num_players and self.controller._state.rules\n .Shared_Board and not self.need_updated_buttons):\n self.playerLeftGame(num_players)\n self.num_players = num_players\n if self.controller._state.round == -1:\n self.mesgBetweenRounds(self.help_text)\n if self.round_advance:\n self.round_index = self.round_index + 1\n if self.round_index < len(self.Meld_Threshold):\n self.help_text[0] = 'This is the round of ' + str(self.\n Meld_Threshold[self.round_index]) + ' ! '\n self.need_updated_buttons = True\n else:\n self.help_text = [\n 'Game has concluded. Scores for each round can be found in command window.'\n ]\n self.round_advance = False\n else:\n if not self.round_index == self.controller._state.round:\n skipped_rounds = (self.controller._state.round - self.\n round_index)\n for idx in range(skipped_rounds):\n score = 0\n self.controller.lateJoinScores(score)\n self.round_index = self.controller._state.round\n self.round_advance = True\n self.ready_color_idx = 2\n self.not_ready_color_idx = 6\n self.last_hand = self.current_hand\n self.current_hand = self.controller.getHand()\n if len(self.current_hand) == 0:\n self.hand_info = []\n elif not self.last_hand == self.current_hand:\n self.hand_info = HandManagement.WrapHand(self, self.\n current_hand, self.hand_info)\n HandManagement.ShowHolding(self, self.hand_info)\n self.RuleSetsButtons.ButtonDisplay(self)\n\n def nextEventWildsOnBoard(self):\n \"\"\"This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards.\n\n It is looking for key strokes to designate ambiguous wild cards in runs.\n The mouse is ignored until you designate all the wilds (turn phase goes back to play).\"\"\"\n if self.controller._state.rules.Shared_Board and self.num_wilds > 0:\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n print('pygame crash, AAAHHH')\n pygame.quit()\n quit()\n else:\n HandManagement.wildsHiLoGetInput(self)\n <mask token>\n <mask token>\n\n def discardConfirmation(self, confirmed, wrapped_discards):\n \"\"\" Confirm a user is sure about a discard and then perform it once confirmed.\"\"\"\n discards = []\n for element in wrapped_discards:\n discards.append(element.card)\n if self.discards != discards:\n confirmed = False\n self.discards = discards\n if not confirmed:\n self.controller.note = 'Please confirm - discard ' + '{0}'.format(\n self.discards)\n return True\n else:\n if self.discard_confirm:\n controller_response = self.controller.discard(self.discards)\n if controller_response:\n for element in wrapped_discards:\n self.hand_info.remove(element)\n return False\n <mask token>\n\n def labelMedium(self, labelstr, x_offset, y_offset):\n font = UIC.Medium_Text\n text_surface = font.render(labelstr, True, UIC.Bright_Blue)\n text_rect = text_surface.get_rect()\n text_rect.center = x_offset, y_offset\n self.display.blit(text_surface, text_rect)\n\n def playerLeftGame(self, num_players):\n self.controller.resetProcessedCards(self.visible_scards)\n self.controller.clearPreparedCards()\n self.hand_info = HandManagement.ClearPreparedCardsInHandView(self.\n hand_info)\n self.controller.note = (\n 'A player has left the game, all prepared cards are automatically cleared.'\n )\n if num_players > 1:\n players_sp_w = UIC.Disp_Width / num_players\n else:\n players_sp_w = UIC.Disp_Width\n for idx in range(num_players):\n for button in self.assign_cards_btns[idx]:\n button.x = 10 + players_sp_w * idx\n",
"step-2": "<mask token>\n\n\nclass HandView:\n <mask token>\n\n def __init__(self, controller, display, ruleset):\n self.controller = controller\n self.display = display\n self.ruleset = ruleset\n self.Meld_Threshold = controller._state.rules.Meld_Threshold\n self.deal_size = controller._state.rules.Deal_Size\n self.help_text = controller._state.rules.help_text\n if ruleset == 'Liverpool':\n self.buttons_per_player = self.Meld_Threshold[0][0\n ] + self.Meld_Threshold[0][1]\n self.RuleSetsButtons = RuleSetsButtons_LP\n elif ruleset == 'HandAndFoot':\n self.RuleSetsButtons = RuleSetsButtons_HF\n self.hand_scaling = UIC.scale, UIC.Card_Spacing\n self.current_hand = []\n self.last_hand = []\n self.hand_info = []\n self.prepared_cards = []\n self.discards = []\n self.discard_confirm = False\n self.num_wilds = 0\n self.wild_cards = []\n self.selected_list = []\n self.round_index = 0\n self.round_advance = False\n self.num_players = 1\n self.need_updated_buttons = True\n self.ready_color_idx = 2\n self.not_ready_color_idx = 6\n self.RuleSetsButtons.CreateButtons(self)\n\n def update(self, player_index=0, num_players=1, visible_scards=[]):\n \"\"\"This updates the view of the hand, between rounds it displays a message. \"\"\"\n self.visible_scards = visible_scards\n self.controller._state.player_index = player_index\n if (self.num_players > num_players and self.controller._state.rules\n .Shared_Board and not self.need_updated_buttons):\n self.playerLeftGame(num_players)\n self.num_players = num_players\n if self.controller._state.round == -1:\n self.mesgBetweenRounds(self.help_text)\n if self.round_advance:\n self.round_index = self.round_index + 1\n if self.round_index < len(self.Meld_Threshold):\n self.help_text[0] = 'This is the round of ' + str(self.\n Meld_Threshold[self.round_index]) + ' ! '\n self.need_updated_buttons = True\n else:\n self.help_text = [\n 'Game has concluded. Scores for each round can be found in command window.'\n ]\n self.round_advance = False\n else:\n if not self.round_index == self.controller._state.round:\n skipped_rounds = (self.controller._state.round - self.\n round_index)\n for idx in range(skipped_rounds):\n score = 0\n self.controller.lateJoinScores(score)\n self.round_index = self.controller._state.round\n self.round_advance = True\n self.ready_color_idx = 2\n self.not_ready_color_idx = 6\n self.last_hand = self.current_hand\n self.current_hand = self.controller.getHand()\n if len(self.current_hand) == 0:\n self.hand_info = []\n elif not self.last_hand == self.current_hand:\n self.hand_info = HandManagement.WrapHand(self, self.\n current_hand, self.hand_info)\n HandManagement.ShowHolding(self, self.hand_info)\n self.RuleSetsButtons.ButtonDisplay(self)\n\n def nextEventWildsOnBoard(self):\n \"\"\"This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards.\n\n It is looking for key strokes to designate ambiguous wild cards in runs.\n The mouse is ignored until you designate all the wilds (turn phase goes back to play).\"\"\"\n if self.controller._state.rules.Shared_Board and self.num_wilds > 0:\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n print('pygame crash, AAAHHH')\n pygame.quit()\n quit()\n else:\n HandManagement.wildsHiLoGetInput(self)\n\n def nextEvent(self):\n \"\"\"This submits the next user input to the controller,\n\n In games with Shared_Board = False (e.g. HandAndFoot) key strokes don't do anything\n unless designating values for prepared wild cards, at which time the mouse is ignored\n unless you want to clear the prepared cards.\n In games with Shared_Board = True wilds on board might change designation upon other cards being played.\n IF designation cannot be handled automatically (= if wild can be at the beginning or end of a run) then\n it must be designated before play is completed.\n This is done in nextEvenWildsOnBoard. All other events are ignored until num_wilds == 0 OR play is canceled.\"\"\"\n if self.controller._state.rules.Shared_Board:\n self.num_wilds = len(self.controller.unassigned_wilds_dict.keys())\n if self.num_wilds > 0:\n self.nextEventWildsOnBoard()\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n print('pygame crash, AAAHHH')\n pygame.quit()\n quit()\n if (not self.controller._state.rules.Shared_Board and self.\n num_wilds > 0):\n wild_instructions = (\n 'Use the keyboard to designate your prepared wild cards \\r\\n '\n )\n wild_instructions = (wild_instructions +\n '(use 0 for 10 and J, Q, or K for facecards).')\n self.controller.note = wild_instructions\n pos = pygame.mouse.get_pos()\n if self.event.type == pygame.MOUSEBUTTONDOWN:\n self.RuleSetsButtons.ClickedButton(self, pos)\n for element in self.hand_info:\n if element.img_clickable.isOver(pos):\n if element.status == 1:\n element.status = 0\n element.img_clickable.changeOutline(0)\n elif element.status == 0:\n element.status = 1\n element.img_clickable.changeOutline(2)\n elif self.event.type == pygame.MOUSEMOTION:\n self.RuleSetsButtons.MouseHiLight(self, pos)\n HandManagement.MouseHiLight(self.hand_info, pos)\n elif self.event.type == pygame.KEYDOWN:\n if self.controller._state.rules.Buy_Option:\n if self.controller.buying_opportunity:\n if self.event.key == pygame.K_y:\n self.controller.wantTopCard(True)\n self.controller.note = (\n 'You have signaled you want to buy the card.')\n elif self.event.key == pygame.K_n:\n self.controller.wantTopCard(False)\n self.controller.note = (\n 'You have signaled you do not want to buy the card.'\n )\n if (not self.controller._state.rules.Shared_Board and self.\n num_wilds > 0):\n HandManagement.ManuallyAssign(self)\n\n def gatherSelected(self):\n \"\"\" gathers selected cards\n in order to take action on selected cards (either discarding them or preparing them)\n \"\"\"\n self.selected_list = []\n for element in self.hand_info:\n if element.status == 1:\n self.selected_list.append(element)\n return self.selected_list\n\n def discardConfirmation(self, confirmed, wrapped_discards):\n \"\"\" Confirm a user is sure about a discard and then perform it once confirmed.\"\"\"\n discards = []\n for element in wrapped_discards:\n discards.append(element.card)\n if self.discards != discards:\n confirmed = False\n self.discards = discards\n if not confirmed:\n self.controller.note = 'Please confirm - discard ' + '{0}'.format(\n self.discards)\n return True\n else:\n if self.discard_confirm:\n controller_response = self.controller.discard(self.discards)\n if controller_response:\n for element in wrapped_discards:\n self.hand_info.remove(element)\n return False\n <mask token>\n\n def labelMedium(self, labelstr, x_offset, y_offset):\n font = UIC.Medium_Text\n text_surface = font.render(labelstr, True, UIC.Bright_Blue)\n text_rect = text_surface.get_rect()\n text_rect.center = x_offset, y_offset\n self.display.blit(text_surface, text_rect)\n\n def playerLeftGame(self, num_players):\n self.controller.resetProcessedCards(self.visible_scards)\n self.controller.clearPreparedCards()\n self.hand_info = HandManagement.ClearPreparedCardsInHandView(self.\n hand_info)\n self.controller.note = (\n 'A player has left the game, all prepared cards are automatically cleared.'\n )\n if num_players > 1:\n players_sp_w = UIC.Disp_Width / num_players\n else:\n players_sp_w = UIC.Disp_Width\n for idx in range(num_players):\n for button in self.assign_cards_btns[idx]:\n button.x = 10 + players_sp_w * idx\n",
"step-3": "<mask token>\n\n\nclass HandView:\n \"\"\"This class handles player's cards and enables actions.\n\n Actions are primarily performed using buttons, since these need to somewhat customized by game\n the buttons are in ***.py (*** is Liverpool or HandAndFoot) and are imported as RuleSetsButtons.\n Management of displaying the hand's cards is not game specific, and methods that help with that\n are in HandManagement.py.\n\n Player can arrange their own hand, and prepare to play cards during other players' turns.\n \"\"\"\n\n def __init__(self, controller, display, ruleset):\n self.controller = controller\n self.display = display\n self.ruleset = ruleset\n self.Meld_Threshold = controller._state.rules.Meld_Threshold\n self.deal_size = controller._state.rules.Deal_Size\n self.help_text = controller._state.rules.help_text\n if ruleset == 'Liverpool':\n self.buttons_per_player = self.Meld_Threshold[0][0\n ] + self.Meld_Threshold[0][1]\n self.RuleSetsButtons = RuleSetsButtons_LP\n elif ruleset == 'HandAndFoot':\n self.RuleSetsButtons = RuleSetsButtons_HF\n self.hand_scaling = UIC.scale, UIC.Card_Spacing\n self.current_hand = []\n self.last_hand = []\n self.hand_info = []\n self.prepared_cards = []\n self.discards = []\n self.discard_confirm = False\n self.num_wilds = 0\n self.wild_cards = []\n self.selected_list = []\n self.round_index = 0\n self.round_advance = False\n self.num_players = 1\n self.need_updated_buttons = True\n self.ready_color_idx = 2\n self.not_ready_color_idx = 6\n self.RuleSetsButtons.CreateButtons(self)\n\n def update(self, player_index=0, num_players=1, visible_scards=[]):\n \"\"\"This updates the view of the hand, between rounds it displays a message. \"\"\"\n self.visible_scards = visible_scards\n self.controller._state.player_index = player_index\n if (self.num_players > num_players and self.controller._state.rules\n .Shared_Board and not self.need_updated_buttons):\n self.playerLeftGame(num_players)\n self.num_players = num_players\n if self.controller._state.round == -1:\n self.mesgBetweenRounds(self.help_text)\n if self.round_advance:\n self.round_index = self.round_index + 1\n if self.round_index < len(self.Meld_Threshold):\n self.help_text[0] = 'This is the round of ' + str(self.\n Meld_Threshold[self.round_index]) + ' ! '\n self.need_updated_buttons = True\n else:\n self.help_text = [\n 'Game has concluded. Scores for each round can be found in command window.'\n ]\n self.round_advance = False\n else:\n if not self.round_index == self.controller._state.round:\n skipped_rounds = (self.controller._state.round - self.\n round_index)\n for idx in range(skipped_rounds):\n score = 0\n self.controller.lateJoinScores(score)\n self.round_index = self.controller._state.round\n self.round_advance = True\n self.ready_color_idx = 2\n self.not_ready_color_idx = 6\n self.last_hand = self.current_hand\n self.current_hand = self.controller.getHand()\n if len(self.current_hand) == 0:\n self.hand_info = []\n elif not self.last_hand == self.current_hand:\n self.hand_info = HandManagement.WrapHand(self, self.\n current_hand, self.hand_info)\n HandManagement.ShowHolding(self, self.hand_info)\n self.RuleSetsButtons.ButtonDisplay(self)\n\n def nextEventWildsOnBoard(self):\n \"\"\"This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards.\n\n It is looking for key strokes to designate ambiguous wild cards in runs.\n The mouse is ignored until you designate all the wilds (turn phase goes back to play).\"\"\"\n if self.controller._state.rules.Shared_Board and self.num_wilds > 0:\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n print('pygame crash, AAAHHH')\n pygame.quit()\n quit()\n else:\n HandManagement.wildsHiLoGetInput(self)\n\n def nextEvent(self):\n \"\"\"This submits the next user input to the controller,\n\n In games with Shared_Board = False (e.g. HandAndFoot) key strokes don't do anything\n unless designating values for prepared wild cards, at which time the mouse is ignored\n unless you want to clear the prepared cards.\n In games with Shared_Board = True wilds on board might change designation upon other cards being played.\n IF designation cannot be handled automatically (= if wild can be at the beginning or end of a run) then\n it must be designated before play is completed.\n This is done in nextEvenWildsOnBoard. All other events are ignored until num_wilds == 0 OR play is canceled.\"\"\"\n if self.controller._state.rules.Shared_Board:\n self.num_wilds = len(self.controller.unassigned_wilds_dict.keys())\n if self.num_wilds > 0:\n self.nextEventWildsOnBoard()\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n print('pygame crash, AAAHHH')\n pygame.quit()\n quit()\n if (not self.controller._state.rules.Shared_Board and self.\n num_wilds > 0):\n wild_instructions = (\n 'Use the keyboard to designate your prepared wild cards \\r\\n '\n )\n wild_instructions = (wild_instructions +\n '(use 0 for 10 and J, Q, or K for facecards).')\n self.controller.note = wild_instructions\n pos = pygame.mouse.get_pos()\n if self.event.type == pygame.MOUSEBUTTONDOWN:\n self.RuleSetsButtons.ClickedButton(self, pos)\n for element in self.hand_info:\n if element.img_clickable.isOver(pos):\n if element.status == 1:\n element.status = 0\n element.img_clickable.changeOutline(0)\n elif element.status == 0:\n element.status = 1\n element.img_clickable.changeOutline(2)\n elif self.event.type == pygame.MOUSEMOTION:\n self.RuleSetsButtons.MouseHiLight(self, pos)\n HandManagement.MouseHiLight(self.hand_info, pos)\n elif self.event.type == pygame.KEYDOWN:\n if self.controller._state.rules.Buy_Option:\n if self.controller.buying_opportunity:\n if self.event.key == pygame.K_y:\n self.controller.wantTopCard(True)\n self.controller.note = (\n 'You have signaled you want to buy the card.')\n elif self.event.key == pygame.K_n:\n self.controller.wantTopCard(False)\n self.controller.note = (\n 'You have signaled you do not want to buy the card.'\n )\n if (not self.controller._state.rules.Shared_Board and self.\n num_wilds > 0):\n HandManagement.ManuallyAssign(self)\n\n def gatherSelected(self):\n \"\"\" gathers selected cards\n in order to take action on selected cards (either discarding them or preparing them)\n \"\"\"\n self.selected_list = []\n for element in self.hand_info:\n if element.status == 1:\n self.selected_list.append(element)\n return self.selected_list\n\n def discardConfirmation(self, confirmed, wrapped_discards):\n \"\"\" Confirm a user is sure about a discard and then perform it once confirmed.\"\"\"\n discards = []\n for element in wrapped_discards:\n discards.append(element.card)\n if self.discards != discards:\n confirmed = False\n self.discards = discards\n if not confirmed:\n self.controller.note = 'Please confirm - discard ' + '{0}'.format(\n self.discards)\n return True\n else:\n if self.discard_confirm:\n controller_response = self.controller.discard(self.discards)\n if controller_response:\n for element in wrapped_discards:\n self.hand_info.remove(element)\n return False\n\n def mesgBetweenRounds(self, message):\n \"\"\"print message where cards usually displayed until Ready button is clicked for next round.\"\"\"\n font = UIC.Medium_Text\n y_offset = UIC.Disp_Height * (1 - UIC.Hand_Row_Fraction * 0.8)\n for message_string in message:\n text_surface = font.render(message_string, True, UIC.Black)\n text_rect = text_surface.get_rect()\n text_rect.center = UIC.Disp_Width * 0.5, y_offset\n y_offset = y_offset + UIC.Medium_Text_Feed\n self.display.blit(text_surface, text_rect)\n\n def labelMedium(self, labelstr, x_offset, y_offset):\n font = UIC.Medium_Text\n text_surface = font.render(labelstr, True, UIC.Bright_Blue)\n text_rect = text_surface.get_rect()\n text_rect.center = x_offset, y_offset\n self.display.blit(text_surface, text_rect)\n\n def playerLeftGame(self, num_players):\n self.controller.resetProcessedCards(self.visible_scards)\n self.controller.clearPreparedCards()\n self.hand_info = HandManagement.ClearPreparedCardsInHandView(self.\n hand_info)\n self.controller.note = (\n 'A player has left the game, all prepared cards are automatically cleared.'\n )\n if num_players > 1:\n players_sp_w = UIC.Disp_Width / num_players\n else:\n players_sp_w = UIC.Disp_Width\n for idx in range(num_players):\n for button in self.assign_cards_btns[idx]:\n button.x = 10 + players_sp_w * idx\n",
"step-4": "import pygame\nimport textwrap\nimport client.Button as Btn\nfrom client.ClickableImage import ClickableImage as ClickImg\nfrom client.CreateDisplay import CreateDisplay\nimport client.LiverpoolButtons as RuleSetsButtons_LP\nimport client.HandAndFootButtons as RuleSetsButtons_HF\nimport client.HandManagement as HandManagement\nfrom client.UICardWrapper import UICardWrapper\nimport client.UIConstants as UIC\nfrom common.Card import Card\n\n\nclass HandView:\n \"\"\"This class handles player's cards and enables actions.\n\n Actions are primarily performed using buttons, since these need to somewhat customized by game\n the buttons are in ***.py (*** is Liverpool or HandAndFoot) and are imported as RuleSetsButtons.\n Management of displaying the hand's cards is not game specific, and methods that help with that\n are in HandManagement.py.\n\n Player can arrange their own hand, and prepare to play cards during other players' turns.\n \"\"\"\n\n def __init__(self, controller, display, ruleset):\n self.controller = controller\n self.display = display\n self.ruleset = ruleset\n self.Meld_Threshold = controller._state.rules.Meld_Threshold\n self.deal_size = controller._state.rules.Deal_Size\n self.help_text = controller._state.rules.help_text\n if ruleset == 'Liverpool':\n self.buttons_per_player = self.Meld_Threshold[0][0\n ] + self.Meld_Threshold[0][1]\n self.RuleSetsButtons = RuleSetsButtons_LP\n elif ruleset == 'HandAndFoot':\n self.RuleSetsButtons = RuleSetsButtons_HF\n self.hand_scaling = UIC.scale, UIC.Card_Spacing\n self.current_hand = []\n self.last_hand = []\n self.hand_info = []\n self.prepared_cards = []\n self.discards = []\n self.discard_confirm = False\n self.num_wilds = 0\n self.wild_cards = []\n self.selected_list = []\n self.round_index = 0\n self.round_advance = False\n self.num_players = 1\n self.need_updated_buttons = True\n self.ready_color_idx = 2\n self.not_ready_color_idx = 6\n self.RuleSetsButtons.CreateButtons(self)\n\n def update(self, player_index=0, num_players=1, visible_scards=[]):\n \"\"\"This updates the view of the hand, between rounds it displays a message. \"\"\"\n self.visible_scards = visible_scards\n self.controller._state.player_index = player_index\n if (self.num_players > num_players and self.controller._state.rules\n .Shared_Board and not self.need_updated_buttons):\n self.playerLeftGame(num_players)\n self.num_players = num_players\n if self.controller._state.round == -1:\n self.mesgBetweenRounds(self.help_text)\n if self.round_advance:\n self.round_index = self.round_index + 1\n if self.round_index < len(self.Meld_Threshold):\n self.help_text[0] = 'This is the round of ' + str(self.\n Meld_Threshold[self.round_index]) + ' ! '\n self.need_updated_buttons = True\n else:\n self.help_text = [\n 'Game has concluded. Scores for each round can be found in command window.'\n ]\n self.round_advance = False\n else:\n if not self.round_index == self.controller._state.round:\n skipped_rounds = (self.controller._state.round - self.\n round_index)\n for idx in range(skipped_rounds):\n score = 0\n self.controller.lateJoinScores(score)\n self.round_index = self.controller._state.round\n self.round_advance = True\n self.ready_color_idx = 2\n self.not_ready_color_idx = 6\n self.last_hand = self.current_hand\n self.current_hand = self.controller.getHand()\n if len(self.current_hand) == 0:\n self.hand_info = []\n elif not self.last_hand == self.current_hand:\n self.hand_info = HandManagement.WrapHand(self, self.\n current_hand, self.hand_info)\n HandManagement.ShowHolding(self, self.hand_info)\n self.RuleSetsButtons.ButtonDisplay(self)\n\n def nextEventWildsOnBoard(self):\n \"\"\"This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards.\n\n It is looking for key strokes to designate ambiguous wild cards in runs.\n The mouse is ignored until you designate all the wilds (turn phase goes back to play).\"\"\"\n if self.controller._state.rules.Shared_Board and self.num_wilds > 0:\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n print('pygame crash, AAAHHH')\n pygame.quit()\n quit()\n else:\n HandManagement.wildsHiLoGetInput(self)\n\n def nextEvent(self):\n \"\"\"This submits the next user input to the controller,\n\n In games with Shared_Board = False (e.g. HandAndFoot) key strokes don't do anything\n unless designating values for prepared wild cards, at which time the mouse is ignored\n unless you want to clear the prepared cards.\n In games with Shared_Board = True wilds on board might change designation upon other cards being played.\n IF designation cannot be handled automatically (= if wild can be at the beginning or end of a run) then\n it must be designated before play is completed.\n This is done in nextEvenWildsOnBoard. All other events are ignored until num_wilds == 0 OR play is canceled.\"\"\"\n if self.controller._state.rules.Shared_Board:\n self.num_wilds = len(self.controller.unassigned_wilds_dict.keys())\n if self.num_wilds > 0:\n self.nextEventWildsOnBoard()\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n print('pygame crash, AAAHHH')\n pygame.quit()\n quit()\n if (not self.controller._state.rules.Shared_Board and self.\n num_wilds > 0):\n wild_instructions = (\n 'Use the keyboard to designate your prepared wild cards \\r\\n '\n )\n wild_instructions = (wild_instructions +\n '(use 0 for 10 and J, Q, or K for facecards).')\n self.controller.note = wild_instructions\n pos = pygame.mouse.get_pos()\n if self.event.type == pygame.MOUSEBUTTONDOWN:\n self.RuleSetsButtons.ClickedButton(self, pos)\n for element in self.hand_info:\n if element.img_clickable.isOver(pos):\n if element.status == 1:\n element.status = 0\n element.img_clickable.changeOutline(0)\n elif element.status == 0:\n element.status = 1\n element.img_clickable.changeOutline(2)\n elif self.event.type == pygame.MOUSEMOTION:\n self.RuleSetsButtons.MouseHiLight(self, pos)\n HandManagement.MouseHiLight(self.hand_info, pos)\n elif self.event.type == pygame.KEYDOWN:\n if self.controller._state.rules.Buy_Option:\n if self.controller.buying_opportunity:\n if self.event.key == pygame.K_y:\n self.controller.wantTopCard(True)\n self.controller.note = (\n 'You have signaled you want to buy the card.')\n elif self.event.key == pygame.K_n:\n self.controller.wantTopCard(False)\n self.controller.note = (\n 'You have signaled you do not want to buy the card.'\n )\n if (not self.controller._state.rules.Shared_Board and self.\n num_wilds > 0):\n HandManagement.ManuallyAssign(self)\n\n def gatherSelected(self):\n \"\"\" gathers selected cards\n in order to take action on selected cards (either discarding them or preparing them)\n \"\"\"\n self.selected_list = []\n for element in self.hand_info:\n if element.status == 1:\n self.selected_list.append(element)\n return self.selected_list\n\n def discardConfirmation(self, confirmed, wrapped_discards):\n \"\"\" Confirm a user is sure about a discard and then perform it once confirmed.\"\"\"\n discards = []\n for element in wrapped_discards:\n discards.append(element.card)\n if self.discards != discards:\n confirmed = False\n self.discards = discards\n if not confirmed:\n self.controller.note = 'Please confirm - discard ' + '{0}'.format(\n self.discards)\n return True\n else:\n if self.discard_confirm:\n controller_response = self.controller.discard(self.discards)\n if controller_response:\n for element in wrapped_discards:\n self.hand_info.remove(element)\n return False\n\n def mesgBetweenRounds(self, message):\n \"\"\"print message where cards usually displayed until Ready button is clicked for next round.\"\"\"\n font = UIC.Medium_Text\n y_offset = UIC.Disp_Height * (1 - UIC.Hand_Row_Fraction * 0.8)\n for message_string in message:\n text_surface = font.render(message_string, True, UIC.Black)\n text_rect = text_surface.get_rect()\n text_rect.center = UIC.Disp_Width * 0.5, y_offset\n y_offset = y_offset + UIC.Medium_Text_Feed\n self.display.blit(text_surface, text_rect)\n\n def labelMedium(self, labelstr, x_offset, y_offset):\n font = UIC.Medium_Text\n text_surface = font.render(labelstr, True, UIC.Bright_Blue)\n text_rect = text_surface.get_rect()\n text_rect.center = x_offset, y_offset\n self.display.blit(text_surface, text_rect)\n\n def playerLeftGame(self, num_players):\n self.controller.resetProcessedCards(self.visible_scards)\n self.controller.clearPreparedCards()\n self.hand_info = HandManagement.ClearPreparedCardsInHandView(self.\n hand_info)\n self.controller.note = (\n 'A player has left the game, all prepared cards are automatically cleared.'\n )\n if num_players > 1:\n players_sp_w = UIC.Disp_Width / num_players\n else:\n players_sp_w = UIC.Disp_Width\n for idx in range(num_players):\n for button in self.assign_cards_btns[idx]:\n button.x = 10 + players_sp_w * idx\n",
"step-5": "import pygame\nimport textwrap\nimport client.Button as Btn\nfrom client.ClickableImage import ClickableImage as ClickImg\nfrom client.CreateDisplay import CreateDisplay\nimport client.LiverpoolButtons as RuleSetsButtons_LP\nimport client.HandAndFootButtons as RuleSetsButtons_HF\nimport client.HandManagement as HandManagement\nfrom client.UICardWrapper import UICardWrapper\nimport client.UIConstants as UIC\nfrom common.Card import Card\n\n\nclass HandView:\n \"\"\"This class handles player's cards and enables actions.\n\n Actions are primarily performed using buttons, since these need to somewhat customized by game\n the buttons are in ***.py (*** is Liverpool or HandAndFoot) and are imported as RuleSetsButtons.\n Management of displaying the hand's cards is not game specific, and methods that help with that\n are in HandManagement.py.\n\n Player can arrange their own hand, and prepare to play cards during other players' turns.\n \"\"\"\n def __init__(self, controller, display, ruleset):\n self.controller = controller\n self.display = display\n self.ruleset = ruleset\n self.Meld_Threshold = controller._state.rules.Meld_Threshold\n self.deal_size = controller._state.rules.Deal_Size\n self.help_text = controller._state.rules.help_text\n if ruleset == 'Liverpool':\n self.buttons_per_player = self.Meld_Threshold[0][0] + self.Meld_Threshold[0][1]\n self.RuleSetsButtons = RuleSetsButtons_LP\n elif ruleset == 'HandAndFoot':\n self.RuleSetsButtons = RuleSetsButtons_HF\n self.hand_scaling = (UIC.scale, UIC.Card_Spacing)\n self.current_hand = []\n self.last_hand = []\n self.hand_info = [] # will contain UICardWrapped elements of current_hand\n self.prepared_cards = [] # will contain list of prepared cards from controller\n self.discards = []\n self.discard_confirm = False\n # num_wilds is HandAndFoot specific, only non-zero if by prepare_card_btn in HandAndFootButtons.py is triggered.\n self.num_wilds = 0\n self.wild_cards = []\n self.selected_list = []\n self.round_index = 0\n self.round_advance = False\n self.num_players = 1\n # In Liverpool and other Shared_Board games: prepare cards buttons must be updated each round\n self.need_updated_buttons = True\n self.ready_color_idx = 2\n self.not_ready_color_idx = 6\n #\n # if someone joins between rounds, then they won't know the correct meld requirement until the round begins.\n # (self.controller._state.round = -1 until play commences).\n # In HandAndFoot: Correct meld requirement will be written in lower right corner once play commences.\n # In Liverpool: Will see correct buttons once round commences.\n self.RuleSetsButtons.CreateButtons(self)\n\n def update(self, player_index=0, num_players=1, visible_scards = []):\n \"\"\"This updates the view of the hand, between rounds it displays a message. \"\"\"\n\n self.visible_scards = visible_scards\n self.controller._state.player_index = player_index\n if self.num_players > num_players and self.controller._state.rules.Shared_Board \\\n and not self.need_updated_buttons:\n # A player has left the game after the round has begun -- make adjustments so game can continue.\n self.playerLeftGame(num_players)\n self.num_players = num_players\n if self.controller._state.round == -1:\n self.mesgBetweenRounds(self.help_text)\n if self.round_advance:\n self.round_index = self.round_index + 1\n if self.round_index < len(self.Meld_Threshold):\n self.help_text[0] = 'This is the round of ' + str(self.Meld_Threshold[self.round_index]) + ' ! '\n self.need_updated_buttons = True # used for Liverpool.\n else:\n self.help_text = ['Game has concluded. Scores for each round can be found in command window.']\n self.round_advance = False\n else:\n if not self.round_index == self.controller._state.round:\n # Need this to true up round_index if a player joins mid-game.\n skipped_rounds = self.controller._state.round - self.round_index\n for idx in range(skipped_rounds):\n #todo: How to score latecomers should be moved to ruleset.\n score = 0\n self.controller.lateJoinScores(score)\n self.round_index = self.controller._state.round\n self.round_advance = True\n # reset outline colors on ready buttons to what they need to be at the start of the \"between rounds\" state.\n self.ready_color_idx = 2\n self.not_ready_color_idx = 6\n self.last_hand = self.current_hand\n self.current_hand = self.controller.getHand()\n if len(self.current_hand) == 0:\n self.hand_info = []\n elif not self.last_hand == self.current_hand:\n self.hand_info = HandManagement.WrapHand(self, self.current_hand, self.hand_info)\n HandManagement.ShowHolding(self, self.hand_info) # displays hand\n self.RuleSetsButtons.ButtonDisplay(self)\n\n def nextEventWildsOnBoard(self):\n \"\"\"This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards.\n\n It is looking for key strokes to designate ambiguous wild cards in runs.\n The mouse is ignored until you designate all the wilds (turn phase goes back to play).\"\"\"\n\n if self.controller._state.rules.Shared_Board and self.num_wilds > 0:\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n # The window crashed, we should handle this\n print(\"pygame crash, AAAHHH\")\n pygame.quit()\n quit()\n else:\n # in Shared_Board games, check if there are wilds that need to be updated.\n # All other events are ignored until play is finished.\n HandManagement.wildsHiLoGetInput(self)\n\n def nextEvent(self):\n \"\"\"This submits the next user input to the controller,\n\n In games with Shared_Board = False (e.g. HandAndFoot) key strokes don't do anything\n unless designating values for prepared wild cards, at which time the mouse is ignored\n unless you want to clear the prepared cards.\n In games with Shared_Board = True wilds on board might change designation upon other cards being played.\n IF designation cannot be handled automatically (= if wild can be at the beginning or end of a run) then\n it must be designated before play is completed.\n This is done in nextEvenWildsOnBoard. All other events are ignored until num_wilds == 0 OR play is canceled.\"\"\"\n\n if self.controller._state.rules.Shared_Board:\n self.num_wilds = len(self.controller.unassigned_wilds_dict.keys())\n if self.num_wilds > 0:\n self.nextEventWildsOnBoard()\n\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n # The window crashed, we should handle this\n print(\"pygame crash, AAAHHH\")\n pygame.quit()\n quit()\n\n if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:\n wild_instructions = 'Use the keyboard to designate your prepared wild cards \\r\\n '\n wild_instructions = wild_instructions + '(use 0 for 10 and J, Q, or K for facecards).'\n self.controller.note = wild_instructions\n pos = pygame.mouse.get_pos()\n\n if self.event.type == pygame.MOUSEBUTTONDOWN:\n self.RuleSetsButtons.ClickedButton(self, pos)\n for element in self.hand_info:\n # cannot select prepared cards, so not included in logic below.\n if element.img_clickable.isOver(pos):\n if element.status == 1:\n element.status = 0\n element.img_clickable.changeOutline(0)\n elif element.status == 0:\n element.status = 1\n element.img_clickable.changeOutline(2)\n\n elif self.event.type == pygame.MOUSEMOTION:\n self.RuleSetsButtons.MouseHiLight(self, pos)\n HandManagement.MouseHiLight(self.hand_info, pos)\n elif self.event.type == pygame.KEYDOWN:\n if self.controller._state.rules.Buy_Option:\n if self.controller.buying_opportunity:\n if self.event.key == pygame.K_y:\n self.controller.wantTopCard(True)\n self.controller.note = 'You have signaled you want to buy the card.'\n elif self.event.key == pygame.K_n:\n self.controller.wantTopCard(False)\n self.controller.note = 'You have signaled you do not want to buy the card.'\n if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:\n HandManagement.ManuallyAssign(self)\n\n\n def gatherSelected(self):\n \"\"\" gathers selected cards\n in order to take action on selected cards (either discarding them or preparing them)\n \"\"\"\n self.selected_list = []\n for element in self.hand_info:\n if element.status == 1:\n self.selected_list.append(element)\n return self.selected_list\n\n def discardConfirmation(self, confirmed, wrapped_discards):\n \"\"\" Confirm a user is sure about a discard and then perform it once confirmed.\"\"\"\n discards = []\n for element in wrapped_discards:\n discards.append(element.card)\n if self.discards != discards:\n confirmed = False\n self.discards = discards\n if not confirmed:\n self.controller.note = \"Please confirm - discard \" + \"{0}\".format(self.discards)\n return True # ask for confirmation\n else:\n # confirmed is True, performing discard and removing discarded wrapped cards from hand_info.\n if self.discard_confirm:\n controller_response = self.controller.discard(self.discards)\n if controller_response:\n for element in wrapped_discards:\n self.hand_info.remove(element)\n return False # now that this is done, we don't have anything waiting on confirmation\n\n def mesgBetweenRounds(self, message):\n \"\"\"print message where cards usually displayed until Ready button is clicked for next round.\"\"\"\n font = UIC.Medium_Text\n y_offset = (UIC.Disp_Height * (1 - (UIC.Hand_Row_Fraction * 0.8)))\n for message_string in message:\n text_surface = font.render(message_string, True, UIC.Black)\n text_rect = text_surface.get_rect()\n text_rect.center = ((UIC.Disp_Width * 0.5), y_offset)\n y_offset = y_offset + UIC.Medium_Text_Feed\n self.display.blit(text_surface, text_rect)\n\n def labelMedium(self, labelstr, x_offset, y_offset):\n font = UIC.Medium_Text\n text_surface = font.render(labelstr, True, UIC.Bright_Blue)\n text_rect = text_surface.get_rect()\n text_rect.center = (x_offset, y_offset)\n self.display.blit(text_surface, text_rect)\n\n def playerLeftGame(self, num_players):\n # a player has disconnected a game with a Shared_Board = True. Must make adjustments to\n # (i) card group dictionaries, (ii) prepared cards & (iii) buttons locations.\n self.controller.resetProcessedCards(self.visible_scards)\n self.controller.clearPreparedCards() # so that prepared cards won't be mistakenly played on wrong group.\n self.hand_info = HandManagement.ClearPreparedCardsInHandView(self.hand_info)\n self.controller.note = \"A player has left the game, all prepared cards are automatically cleared.\"\n # reset set/run button locations:\n if num_players > 1:\n players_sp_w = UIC.Disp_Width / num_players\n else:\n players_sp_w = UIC.Disp_Width\n for idx in range(num_players):\n for button in self.assign_cards_btns[idx]:\n button.x = 10 + (players_sp_w * idx)\n",
"step-ids": [
7,
9,
11,
12,
13
]
}
|
[
7,
9,
11,
12,
13
] |
#! /usr/bin/env python
"""
Normalizes a vidoe by dividing against it's background.
See: BackgroundExtractor.py to get the background of a video.
USING:
As a command line utility:
$ Normalizer.py input_video input_image output_video
As a module:
from Normalizer import Normalizer
norm = Normalizer("input_video.avi", input_image, "output_video.avi")
norm.normalize()
Author: Martin Humphreys
"""
from argparse import ArgumentParser
import numpy as np
import os
import cv2
class Normalizer:
def __init__(self):
pass
def imageFromArg(self, image):
if isinstance(image, (str, unicode)):
return cv2.imread(image, 0)
else:
return image
def videoReaderFromArg(self, video):
if isinstance(video, (str, unicode)):
vc = cv2.VideoCapture(video)
else:
vc = video
return vc
def normalize(self, background, in_video, out_video):
vc = self.videoReaderFromArg(in_video)
frames = int(vc.get(cv2.CAP_PROP_FRAME_COUNT))
fps = float(vc.get(cv2.CAP_PROP_FPS))
if fps == float('inf'):
fps = 300
width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = int(vc.get(cv2.CAP_PROP_FOURCC))
vw = cv2.VideoWriter(out_video, fourcc, fps, (width, height), False)
self.normalizeVideo(background, vc, vw)
def normalizeVideo(self, background, video_reader, video_writer):
f = 1
while(True):
ret, frame = video_reader.read()
if not ret:
break;
else:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
f += 1
normal_frame = self.normalizeFrame(background, frame)
video_writer.write(normal_frame)
def normalizeFrame(self, background, frame):
if callable(background):
bg = background(frame)
else:
bg = self.imageFromArg(background)
a = frame.astype('float')
a = self.transformRange(a, 0, 255, 1, 255)
b = bg.astype('float')
b = self.transformRange(b, 0, 255, 1, 255)
c = a/((b+1)/256)
d = c*(c < 255)+255*np.ones(np.shape(c))*(c > 255)
return d.astype('uint8')
def transformRange(self, value, oldmin, oldmax, newmin, newmax):
return (((value - oldmin) * (newmax - newmin)) / (oldmax - oldmin)) + newmin
def build_parser():
parser = ArgumentParser()
parser.add_argument('input_video', help='video to process')
parser.add_argument('background', help='background image')
parser.add_argument('output_video', help='file to save normalized video to')
return parser
def main():
parser = build_parser()
opts = parser.parse_args()
if not os.path.isfile(opts.input_video):
parser.error("Video file %s does not exist." % opts.input_video)
if not os.path.isfile(opts.background):
parser.error("Image file %s does not exist." % opts.background)
norm = Normalizer()
norm.normalize(opts.background, opts.input_video, opts.output_video)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "141e0f20ce912ecf21940f78e9f40cb86b91dc2b",
"index": 6121,
"step-1": "<mask token>\n\n\nclass Normalizer:\n <mask token>\n\n def imageFromArg(self, image):\n if isinstance(image, (str, unicode)):\n return cv2.imread(image, 0)\n else:\n return image\n\n def videoReaderFromArg(self, video):\n if isinstance(video, (str, unicode)):\n vc = cv2.VideoCapture(video)\n else:\n vc = video\n return vc\n\n def normalize(self, background, in_video, out_video):\n vc = self.videoReaderFromArg(in_video)\n frames = int(vc.get(cv2.CAP_PROP_FRAME_COUNT))\n fps = float(vc.get(cv2.CAP_PROP_FPS))\n if fps == float('inf'):\n fps = 300\n width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fourcc = int(vc.get(cv2.CAP_PROP_FOURCC))\n vw = cv2.VideoWriter(out_video, fourcc, fps, (width, height), False)\n self.normalizeVideo(background, vc, vw)\n <mask token>\n <mask token>\n\n def transformRange(self, value, oldmin, oldmax, newmin, newmax):\n return (value - oldmin) * (newmax - newmin) / (oldmax - oldmin\n ) + newmin\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Normalizer:\n\n def __init__(self):\n pass\n\n def imageFromArg(self, image):\n if isinstance(image, (str, unicode)):\n return cv2.imread(image, 0)\n else:\n return image\n\n def videoReaderFromArg(self, video):\n if isinstance(video, (str, unicode)):\n vc = cv2.VideoCapture(video)\n else:\n vc = video\n return vc\n\n def normalize(self, background, in_video, out_video):\n vc = self.videoReaderFromArg(in_video)\n frames = int(vc.get(cv2.CAP_PROP_FRAME_COUNT))\n fps = float(vc.get(cv2.CAP_PROP_FPS))\n if fps == float('inf'):\n fps = 300\n width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fourcc = int(vc.get(cv2.CAP_PROP_FOURCC))\n vw = cv2.VideoWriter(out_video, fourcc, fps, (width, height), False)\n self.normalizeVideo(background, vc, vw)\n <mask token>\n\n def normalizeFrame(self, background, frame):\n if callable(background):\n bg = background(frame)\n else:\n bg = self.imageFromArg(background)\n a = frame.astype('float')\n a = self.transformRange(a, 0, 255, 1, 255)\n b = bg.astype('float')\n b = self.transformRange(b, 0, 255, 1, 255)\n c = a / ((b + 1) / 256)\n d = c * (c < 255) + 255 * np.ones(np.shape(c)) * (c > 255)\n return d.astype('uint8')\n\n def transformRange(self, value, oldmin, oldmax, newmin, newmax):\n return (value - oldmin) * (newmax - newmin) / (oldmax - oldmin\n ) + newmin\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Normalizer:\n\n def __init__(self):\n pass\n\n def imageFromArg(self, image):\n if isinstance(image, (str, unicode)):\n return cv2.imread(image, 0)\n else:\n return image\n\n def videoReaderFromArg(self, video):\n if isinstance(video, (str, unicode)):\n vc = cv2.VideoCapture(video)\n else:\n vc = video\n return vc\n\n def normalize(self, background, in_video, out_video):\n vc = self.videoReaderFromArg(in_video)\n frames = int(vc.get(cv2.CAP_PROP_FRAME_COUNT))\n fps = float(vc.get(cv2.CAP_PROP_FPS))\n if fps == float('inf'):\n fps = 300\n width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fourcc = int(vc.get(cv2.CAP_PROP_FOURCC))\n vw = cv2.VideoWriter(out_video, fourcc, fps, (width, height), False)\n self.normalizeVideo(background, vc, vw)\n\n def normalizeVideo(self, background, video_reader, video_writer):\n f = 1\n while True:\n ret, frame = video_reader.read()\n if not ret:\n break\n else:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n f += 1\n normal_frame = self.normalizeFrame(background, frame)\n video_writer.write(normal_frame)\n\n def normalizeFrame(self, background, frame):\n if callable(background):\n bg = background(frame)\n else:\n bg = self.imageFromArg(background)\n a = frame.astype('float')\n a = self.transformRange(a, 0, 255, 1, 255)\n b = bg.astype('float')\n b = self.transformRange(b, 0, 255, 1, 255)\n c = a / ((b + 1) / 256)\n d = c * (c < 255) + 255 * np.ones(np.shape(c)) * (c > 255)\n return d.astype('uint8')\n\n def transformRange(self, value, oldmin, oldmax, newmin, newmax):\n return (value - oldmin) * (newmax - newmin) / (oldmax - oldmin\n ) + newmin\n\n\ndef build_parser():\n parser = ArgumentParser()\n parser.add_argument('input_video', help='video to process')\n parser.add_argument('background', help='background image')\n parser.add_argument('output_video', help='file to save normalized video to'\n )\n return parser\n\n\ndef main():\n parser = build_parser()\n opts = parser.parse_args()\n if not os.path.isfile(opts.input_video):\n parser.error('Video file %s does not exist.' % opts.input_video)\n if not os.path.isfile(opts.background):\n parser.error('Image file %s does not exist.' % opts.background)\n norm = Normalizer()\n norm.normalize(opts.background, opts.input_video, opts.output_video)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Normalizer:\n\n def __init__(self):\n pass\n\n def imageFromArg(self, image):\n if isinstance(image, (str, unicode)):\n return cv2.imread(image, 0)\n else:\n return image\n\n def videoReaderFromArg(self, video):\n if isinstance(video, (str, unicode)):\n vc = cv2.VideoCapture(video)\n else:\n vc = video\n return vc\n\n def normalize(self, background, in_video, out_video):\n vc = self.videoReaderFromArg(in_video)\n frames = int(vc.get(cv2.CAP_PROP_FRAME_COUNT))\n fps = float(vc.get(cv2.CAP_PROP_FPS))\n if fps == float('inf'):\n fps = 300\n width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fourcc = int(vc.get(cv2.CAP_PROP_FOURCC))\n vw = cv2.VideoWriter(out_video, fourcc, fps, (width, height), False)\n self.normalizeVideo(background, vc, vw)\n\n def normalizeVideo(self, background, video_reader, video_writer):\n f = 1\n while True:\n ret, frame = video_reader.read()\n if not ret:\n break\n else:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n f += 1\n normal_frame = self.normalizeFrame(background, frame)\n video_writer.write(normal_frame)\n\n def normalizeFrame(self, background, frame):\n if callable(background):\n bg = background(frame)\n else:\n bg = self.imageFromArg(background)\n a = frame.astype('float')\n a = self.transformRange(a, 0, 255, 1, 255)\n b = bg.astype('float')\n b = self.transformRange(b, 0, 255, 1, 255)\n c = a / ((b + 1) / 256)\n d = c * (c < 255) + 255 * np.ones(np.shape(c)) * (c > 255)\n return d.astype('uint8')\n\n def transformRange(self, value, oldmin, oldmax, newmin, newmax):\n return (value - oldmin) * (newmax - newmin) / (oldmax - oldmin\n ) + newmin\n\n\ndef build_parser():\n parser = ArgumentParser()\n parser.add_argument('input_video', help='video to process')\n parser.add_argument('background', help='background image')\n parser.add_argument('output_video', help='file to save normalized video to'\n )\n return parser\n\n\ndef main():\n parser = build_parser()\n opts = parser.parse_args()\n if not os.path.isfile(opts.input_video):\n parser.error('Video file %s does not exist.' % opts.input_video)\n if not os.path.isfile(opts.background):\n parser.error('Image file %s does not exist.' % opts.background)\n norm = Normalizer()\n norm.normalize(opts.background, opts.input_video, opts.output_video)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#! /usr/bin/env python\n\"\"\"\nNormalizes a vidoe by dividing against it's background.\nSee: BackgroundExtractor.py to get the background of a video.\n\nUSING:\n\n As a command line utility:\n \n $ Normalizer.py input_video input_image output_video\n \n As a module:\n \n from Normalizer import Normalizer\n norm = Normalizer(\"input_video.avi\", input_image, \"output_video.avi\")\n norm.normalize()\n \n\nAuthor: Martin Humphreys\n\"\"\"\n\nfrom argparse import ArgumentParser\nimport numpy as np\nimport os\nimport cv2\n\n\n\nclass Normalizer:\n\n def __init__(self):\n pass\n \n def imageFromArg(self, image):\n if isinstance(image, (str, unicode)):\n return cv2.imread(image, 0)\n else:\n return image\n \n def videoReaderFromArg(self, video):\n if isinstance(video, (str, unicode)):\n vc = cv2.VideoCapture(video)\n else:\n vc = video\n return vc\n \n def normalize(self, background, in_video, out_video):\n vc = self.videoReaderFromArg(in_video)\n frames = int(vc.get(cv2.CAP_PROP_FRAME_COUNT))\n fps = float(vc.get(cv2.CAP_PROP_FPS)) \n if fps == float('inf'):\n fps = 300\n width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fourcc = int(vc.get(cv2.CAP_PROP_FOURCC))\n vw = cv2.VideoWriter(out_video, fourcc, fps, (width, height), False)\n \n self.normalizeVideo(background, vc, vw)\n \n \n def normalizeVideo(self, background, video_reader, video_writer):\n f = 1\n while(True):\n ret, frame = video_reader.read()\n \n if not ret:\n break;\n else:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n f += 1\n normal_frame = self.normalizeFrame(background, frame)\n video_writer.write(normal_frame)\n \n def normalizeFrame(self, background, frame):\n \n \n \n if callable(background):\n bg = background(frame)\n else:\n bg = self.imageFromArg(background)\n \n \n a = frame.astype('float')\n a = self.transformRange(a, 0, 255, 1, 255)\n \n \n b = bg.astype('float')\n b = self.transformRange(b, 0, 255, 1, 255)\n \n \n c = a/((b+1)/256)\n d = c*(c < 255)+255*np.ones(np.shape(c))*(c > 255)\n \n return d.astype('uint8') \n \n def transformRange(self, value, oldmin, oldmax, newmin, newmax):\n return (((value - oldmin) * (newmax - newmin)) / (oldmax - oldmin)) + newmin\n \ndef build_parser():\n parser = ArgumentParser()\n parser.add_argument('input_video', help='video to process')\n parser.add_argument('background', help='background image')\n parser.add_argument('output_video', help='file to save normalized video to')\n return parser\n\n\ndef main():\n parser = build_parser()\n opts = parser.parse_args()\n if not os.path.isfile(opts.input_video):\n parser.error(\"Video file %s does not exist.\" % opts.input_video)\n if not os.path.isfile(opts.background):\n parser.error(\"Image file %s does not exist.\" % opts.background)\n norm = Normalizer()\n norm.normalize(opts.background, opts.input_video, opts.output_video)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
5,
7,
10,
11,
13
]
}
|
[
5,
7,
10,
11,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import enter
import loginout
import roleinfo
import zhanyi
import package
<|reserved_special_token_1|>
import enter
import loginout
import roleinfo
import zhanyi
import package
#import matrix
|
flexible
|
{
"blob_id": "de665735f02c7569ab382fdc3e910d5d3ac05bb5",
"index": 9088,
"step-1": "<mask token>\n",
"step-2": "import enter\nimport loginout\nimport roleinfo\nimport zhanyi\nimport package\n",
"step-3": "import enter\nimport loginout\nimport roleinfo\nimport zhanyi\nimport package\n#import matrix",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import librosa
import soundfile
import os, glob
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
emotionsRavdessData = {
'01': 'neutral',
'02': 'calm',
'03': 'happy',
'04': 'sad',
'05': 'angry',
'06': 'fearful',
'07': 'disgust',
'08': 'surprised'
}
observed_emotions = ['neutral', 'calm', 'happy', 'disgust', 'sad', 'angry']
def extract_feature(file_name, mfcc, chroma, mel):
with soundfile.SoundFile(file_name) as file:
X = file.read(dtype="float32")
sample_rate = file.samplerate
if chroma:
stft = np.abs(librosa.stft(X))
result = np.array([])
if mfcc:
mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T, axis=0)
result = np.hstack((result, mfccs))
if chroma:
chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T, axis=0)
result = np.hstack((result, chroma))
if mel:
mel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T, axis=0)
result = np.hstack((result, mel))
return result
def load_dataset(test_size=0.15):
x, y = [], []
# Ravdess Dataset
for file in glob.glob("DataSets/ravdessData/Actor_*/*.wav"):
file_name = os.path.basename(file)
emotion = emotionsRavdessData[file_name.split("-")[2]]
if emotion not in observed_emotions:
continue
feature = extract_feature(file, mfcc=True, chroma=True, mel=True)
x.append(feature)
y.append(emotion)
# TESS Toronto Dataset
for file in glob.glob("DataSets/TESS_Toronto_emotional_speech_set_data/OAF_*/*.wav"):
file_name = os.path.basename(file)
emotion = file_name.split("_")[2].split(".")[0]
if emotion not in observed_emotions:
continue
feature = extract_feature(file, mfcc=True, chroma=True, mel=True)
x.append(feature)
y.append(emotion)
return train_test_split(np.array(x), y, test_size=test_size, random_state=9)
x_train, x_test, y_train, y_test = load_dataset(test_size=0.15)
model = MLPClassifier(alpha=0.01, batch_size=256, epsilon=1e-08, hidden_layer_sizes=(300,), learning_rate='adaptive',
max_iter=500)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
accuracy = accuracy_score(y_true=y_test, y_pred=y_pred)
## Train info
# print((x_train.shape[0], x_test.shape[0]))
# print(f'Features extracted: {x_train.shape[1]}')
# print("Accuracy: {:.2f}%".format(accuracy*100))
def emotionRecognize(file):
try:
new_emotion = extract_feature(file, mfcc=True, chroma=True, mel=True)
new_emotion = new_emotion.tolist()
new_emotion = [new_emotion]
new_emotion = np.array(new_emotion)
return model.predict(new_emotion)[0]
except:
return None
|
normal
|
{
"blob_id": "8cd54362680aa3a96babe100b9231f6f16b3f577",
"index": 6670,
"step-1": "<mask token>\n\n\ndef extract_feature(file_name, mfcc, chroma, mel):\n with soundfile.SoundFile(file_name) as file:\n X = file.read(dtype='float32')\n sample_rate = file.samplerate\n if chroma:\n stft = np.abs(librosa.stft(X))\n result = np.array([])\n if mfcc:\n mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate,\n n_mfcc=40).T, axis=0)\n result = np.hstack((result, mfccs))\n if chroma:\n chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=\n sample_rate).T, axis=0)\n result = np.hstack((result, chroma))\n if mel:\n mel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate)\n .T, axis=0)\n result = np.hstack((result, mel))\n return result\n\n\ndef load_dataset(test_size=0.15):\n x, y = [], []\n for file in glob.glob('DataSets/ravdessData/Actor_*/*.wav'):\n file_name = os.path.basename(file)\n emotion = emotionsRavdessData[file_name.split('-')[2]]\n if emotion not in observed_emotions:\n continue\n feature = extract_feature(file, mfcc=True, chroma=True, mel=True)\n x.append(feature)\n y.append(emotion)\n for file in glob.glob(\n 'DataSets/TESS_Toronto_emotional_speech_set_data/OAF_*/*.wav'):\n file_name = os.path.basename(file)\n emotion = file_name.split('_')[2].split('.')[0]\n if emotion not in observed_emotions:\n continue\n feature = extract_feature(file, mfcc=True, chroma=True, mel=True)\n x.append(feature)\n y.append(emotion)\n return train_test_split(np.array(x), y, test_size=test_size, random_state=9\n )\n\n\n<mask token>\n\n\ndef emotionRecognize(file):\n try:\n new_emotion = extract_feature(file, mfcc=True, chroma=True, mel=True)\n new_emotion = new_emotion.tolist()\n new_emotion = [new_emotion]\n new_emotion = np.array(new_emotion)\n return model.predict(new_emotion)[0]\n except:\n return None\n",
"step-2": "<mask token>\n\n\ndef extract_feature(file_name, mfcc, chroma, mel):\n with soundfile.SoundFile(file_name) as file:\n X = file.read(dtype='float32')\n sample_rate = file.samplerate\n if chroma:\n stft = np.abs(librosa.stft(X))\n result = np.array([])\n if mfcc:\n mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate,\n n_mfcc=40).T, axis=0)\n result = np.hstack((result, mfccs))\n if chroma:\n chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=\n sample_rate).T, axis=0)\n result = np.hstack((result, chroma))\n if mel:\n mel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate)\n .T, axis=0)\n result = np.hstack((result, mel))\n return result\n\n\ndef load_dataset(test_size=0.15):\n x, y = [], []\n for file in glob.glob('DataSets/ravdessData/Actor_*/*.wav'):\n file_name = os.path.basename(file)\n emotion = emotionsRavdessData[file_name.split('-')[2]]\n if emotion not in observed_emotions:\n continue\n feature = extract_feature(file, mfcc=True, chroma=True, mel=True)\n x.append(feature)\n y.append(emotion)\n for file in glob.glob(\n 'DataSets/TESS_Toronto_emotional_speech_set_data/OAF_*/*.wav'):\n file_name = os.path.basename(file)\n emotion = file_name.split('_')[2].split('.')[0]\n if emotion not in observed_emotions:\n continue\n feature = extract_feature(file, mfcc=True, chroma=True, mel=True)\n x.append(feature)\n y.append(emotion)\n return train_test_split(np.array(x), y, test_size=test_size, random_state=9\n )\n\n\n<mask token>\nmodel.fit(x_train, y_train)\n<mask token>\n\n\ndef emotionRecognize(file):\n try:\n new_emotion = extract_feature(file, mfcc=True, chroma=True, mel=True)\n new_emotion = new_emotion.tolist()\n new_emotion = [new_emotion]\n new_emotion = np.array(new_emotion)\n return model.predict(new_emotion)[0]\n except:\n return None\n",
"step-3": "<mask token>\nemotionsRavdessData = {'01': 'neutral', '02': 'calm', '03': 'happy', '04':\n 'sad', '05': 'angry', '06': 'fearful', '07': 'disgust', '08': 'surprised'}\nobserved_emotions = ['neutral', 'calm', 'happy', 'disgust', 'sad', 'angry']\n\n\ndef extract_feature(file_name, mfcc, chroma, mel):\n with soundfile.SoundFile(file_name) as file:\n X = file.read(dtype='float32')\n sample_rate = file.samplerate\n if chroma:\n stft = np.abs(librosa.stft(X))\n result = np.array([])\n if mfcc:\n mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate,\n n_mfcc=40).T, axis=0)\n result = np.hstack((result, mfccs))\n if chroma:\n chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=\n sample_rate).T, axis=0)\n result = np.hstack((result, chroma))\n if mel:\n mel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate)\n .T, axis=0)\n result = np.hstack((result, mel))\n return result\n\n\ndef load_dataset(test_size=0.15):\n x, y = [], []\n for file in glob.glob('DataSets/ravdessData/Actor_*/*.wav'):\n file_name = os.path.basename(file)\n emotion = emotionsRavdessData[file_name.split('-')[2]]\n if emotion not in observed_emotions:\n continue\n feature = extract_feature(file, mfcc=True, chroma=True, mel=True)\n x.append(feature)\n y.append(emotion)\n for file in glob.glob(\n 'DataSets/TESS_Toronto_emotional_speech_set_data/OAF_*/*.wav'):\n file_name = os.path.basename(file)\n emotion = file_name.split('_')[2].split('.')[0]\n if emotion not in observed_emotions:\n continue\n feature = extract_feature(file, mfcc=True, chroma=True, mel=True)\n x.append(feature)\n y.append(emotion)\n return train_test_split(np.array(x), y, test_size=test_size, random_state=9\n )\n\n\nx_train, x_test, y_train, y_test = load_dataset(test_size=0.15)\nmodel = MLPClassifier(alpha=0.01, batch_size=256, epsilon=1e-08,\n hidden_layer_sizes=(300,), learning_rate='adaptive', max_iter=500)\nmodel.fit(x_train, y_train)\ny_pred = model.predict(x_test)\naccuracy = accuracy_score(y_true=y_test, y_pred=y_pred)\n\n\ndef emotionRecognize(file):\n try:\n new_emotion = extract_feature(file, mfcc=True, chroma=True, mel=True)\n new_emotion = new_emotion.tolist()\n new_emotion = [new_emotion]\n new_emotion = np.array(new_emotion)\n return model.predict(new_emotion)[0]\n except:\n return None\n",
"step-4": "import librosa\nimport soundfile\nimport os, glob\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.metrics import accuracy_score\nemotionsRavdessData = {'01': 'neutral', '02': 'calm', '03': 'happy', '04':\n 'sad', '05': 'angry', '06': 'fearful', '07': 'disgust', '08': 'surprised'}\nobserved_emotions = ['neutral', 'calm', 'happy', 'disgust', 'sad', 'angry']\n\n\ndef extract_feature(file_name, mfcc, chroma, mel):\n with soundfile.SoundFile(file_name) as file:\n X = file.read(dtype='float32')\n sample_rate = file.samplerate\n if chroma:\n stft = np.abs(librosa.stft(X))\n result = np.array([])\n if mfcc:\n mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate,\n n_mfcc=40).T, axis=0)\n result = np.hstack((result, mfccs))\n if chroma:\n chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=\n sample_rate).T, axis=0)\n result = np.hstack((result, chroma))\n if mel:\n mel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate)\n .T, axis=0)\n result = np.hstack((result, mel))\n return result\n\n\ndef load_dataset(test_size=0.15):\n x, y = [], []\n for file in glob.glob('DataSets/ravdessData/Actor_*/*.wav'):\n file_name = os.path.basename(file)\n emotion = emotionsRavdessData[file_name.split('-')[2]]\n if emotion not in observed_emotions:\n continue\n feature = extract_feature(file, mfcc=True, chroma=True, mel=True)\n x.append(feature)\n y.append(emotion)\n for file in glob.glob(\n 'DataSets/TESS_Toronto_emotional_speech_set_data/OAF_*/*.wav'):\n file_name = os.path.basename(file)\n emotion = file_name.split('_')[2].split('.')[0]\n if emotion not in observed_emotions:\n continue\n feature = extract_feature(file, mfcc=True, chroma=True, mel=True)\n x.append(feature)\n y.append(emotion)\n return train_test_split(np.array(x), y, test_size=test_size, random_state=9\n )\n\n\nx_train, x_test, y_train, y_test = load_dataset(test_size=0.15)\nmodel = MLPClassifier(alpha=0.01, batch_size=256, epsilon=1e-08,\n hidden_layer_sizes=(300,), learning_rate='adaptive', max_iter=500)\nmodel.fit(x_train, y_train)\ny_pred = model.predict(x_test)\naccuracy = accuracy_score(y_true=y_test, y_pred=y_pred)\n\n\ndef emotionRecognize(file):\n try:\n new_emotion = extract_feature(file, mfcc=True, chroma=True, mel=True)\n new_emotion = new_emotion.tolist()\n new_emotion = [new_emotion]\n new_emotion = np.array(new_emotion)\n return model.predict(new_emotion)[0]\n except:\n return None\n",
"step-5": "import librosa\nimport soundfile\nimport os, glob\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.metrics import accuracy_score\n\nemotionsRavdessData = {\n '01': 'neutral',\n '02': 'calm',\n '03': 'happy',\n '04': 'sad',\n '05': 'angry',\n '06': 'fearful',\n '07': 'disgust',\n '08': 'surprised'\n}\n\nobserved_emotions = ['neutral', 'calm', 'happy', 'disgust', 'sad', 'angry']\n\n\ndef extract_feature(file_name, mfcc, chroma, mel):\n with soundfile.SoundFile(file_name) as file:\n X = file.read(dtype=\"float32\")\n sample_rate = file.samplerate\n if chroma:\n stft = np.abs(librosa.stft(X))\n result = np.array([])\n if mfcc:\n mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T, axis=0)\n result = np.hstack((result, mfccs))\n if chroma:\n chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T, axis=0)\n result = np.hstack((result, chroma))\n if mel:\n mel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T, axis=0)\n result = np.hstack((result, mel))\n return result\n\n\ndef load_dataset(test_size=0.15):\n x, y = [], []\n # Ravdess Dataset\n for file in glob.glob(\"DataSets/ravdessData/Actor_*/*.wav\"):\n file_name = os.path.basename(file)\n emotion = emotionsRavdessData[file_name.split(\"-\")[2]]\n if emotion not in observed_emotions:\n continue\n feature = extract_feature(file, mfcc=True, chroma=True, mel=True)\n x.append(feature)\n y.append(emotion)\n # TESS Toronto Dataset\n for file in glob.glob(\"DataSets/TESS_Toronto_emotional_speech_set_data/OAF_*/*.wav\"):\n file_name = os.path.basename(file)\n emotion = file_name.split(\"_\")[2].split(\".\")[0]\n if emotion not in observed_emotions:\n continue\n feature = extract_feature(file, mfcc=True, chroma=True, mel=True)\n x.append(feature)\n y.append(emotion)\n return train_test_split(np.array(x), y, test_size=test_size, random_state=9)\n\n\nx_train, x_test, y_train, y_test = load_dataset(test_size=0.15)\nmodel = MLPClassifier(alpha=0.01, batch_size=256, epsilon=1e-08, hidden_layer_sizes=(300,), learning_rate='adaptive',\n max_iter=500)\nmodel.fit(x_train, y_train)\ny_pred = model.predict(x_test)\naccuracy = accuracy_score(y_true=y_test, y_pred=y_pred)\n\n\n## Train info\n# print((x_train.shape[0], x_test.shape[0]))\n# print(f'Features extracted: {x_train.shape[1]}')\n# print(\"Accuracy: {:.2f}%\".format(accuracy*100))\n\n\ndef emotionRecognize(file):\n try:\n new_emotion = extract_feature(file, mfcc=True, chroma=True, mel=True)\n new_emotion = new_emotion.tolist()\n new_emotion = [new_emotion]\n new_emotion = np.array(new_emotion)\n return model.predict(new_emotion)[0]\n except:\n return None\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [url('^$', CommentListAPIView.as_view(), name='list'), url(
'^(?P<pk>\\d+)/$', CommentDetailAPIView, name='detail')]
<|reserved_special_token_1|>
from django.conf.urls import url
from django.contrib import admin
from comments.api.views import CommentListAPIView, CommentDetailAPIView
urlpatterns = [url('^$', CommentListAPIView.as_view(), name='list'), url(
'^(?P<pk>\\d+)/$', CommentDetailAPIView, name='detail')]
<|reserved_special_token_1|>
from django.conf.urls import url
from django.contrib import admin
from comments.api.views import CommentListAPIView, CommentDetailAPIView
urlpatterns = [
url(r'^$', CommentListAPIView.as_view(), name='list'),
url(r'^(?P<pk>\d+)/$', CommentDetailAPIView, name='detail'),
]
|
flexible
|
{
"blob_id": "e08820ff4fb35a3770fcb110ef7181aad1abbae5",
"index": 8778,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('^$', CommentListAPIView.as_view(), name='list'), url(\n '^(?P<pk>\\\\d+)/$', CommentDetailAPIView, name='detail')]\n",
"step-3": "from django.conf.urls import url\nfrom django.contrib import admin\nfrom comments.api.views import CommentListAPIView, CommentDetailAPIView\nurlpatterns = [url('^$', CommentListAPIView.as_view(), name='list'), url(\n '^(?P<pk>\\\\d+)/$', CommentDetailAPIView, name='detail')]\n",
"step-4": "from django.conf.urls import url\nfrom django.contrib import admin\n\nfrom comments.api.views import CommentListAPIView, CommentDetailAPIView\n\nurlpatterns = [\n url(r'^$', CommentListAPIView.as_view(), name='list'),\n url(r'^(?P<pk>\\d+)/$', CommentDetailAPIView, name='detail'),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
import config as cfg
import numpy as np
class lfwdata():
def __init__(self):
self._pairs = []
pairs = open(os.path.join(cfg.LFW_IMAGEPATH, '../pairs.txt'))
pairs.readline()
for pair in pairs:
pair = pair.split()
if len(pair) == 3:
img1 = os.path.join(
pair[0], pair[0] + '_{:04d}.jpg'.format(int(pair[1])))
img2 = os.path.join(
pair[0], pair[0] + '_{:04d}.jpg'.format(int(pair[2])))
label = True
elif len(pair) == 4:
img1 = os.path.join(
pair[0], pair[0] + '_{:04d}.jpg'.format(int(pair[1])))
img2 = os.path.join(
pair[2], pair[2] + '_{:04d}.jpg'.format(int(pair[3])))
label = False
else:
assert False, pair
self._pairs.append({'img': [img1, img2], 'label': label})
print('Number of pairs: {}'.format(len(self._pairs)))
if __name__ == '__main__':
pairs = lfwdata()
|
normal
|
{
"blob_id": "ccdd7a5e0a1de75762530a7cadd919a2ee753d18",
"index": 1758,
"step-1": "<mask token>\n\n\nclass lfwdata:\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass lfwdata:\n\n def __init__(self):\n self._pairs = []\n pairs = open(os.path.join(cfg.LFW_IMAGEPATH, '../pairs.txt'))\n pairs.readline()\n for pair in pairs:\n pair = pair.split()\n if len(pair) == 3:\n img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[1])))\n img2 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[2])))\n label = True\n elif len(pair) == 4:\n img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[1])))\n img2 = os.path.join(pair[2], pair[2] + '_{:04d}.jpg'.format\n (int(pair[3])))\n label = False\n else:\n assert False, pair\n self._pairs.append({'img': [img1, img2], 'label': label})\n print('Number of pairs: {}'.format(len(self._pairs)))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass lfwdata:\n\n def __init__(self):\n self._pairs = []\n pairs = open(os.path.join(cfg.LFW_IMAGEPATH, '../pairs.txt'))\n pairs.readline()\n for pair in pairs:\n pair = pair.split()\n if len(pair) == 3:\n img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[1])))\n img2 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[2])))\n label = True\n elif len(pair) == 4:\n img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[1])))\n img2 = os.path.join(pair[2], pair[2] + '_{:04d}.jpg'.format\n (int(pair[3])))\n label = False\n else:\n assert False, pair\n self._pairs.append({'img': [img1, img2], 'label': label})\n print('Number of pairs: {}'.format(len(self._pairs)))\n\n\nif __name__ == '__main__':\n pairs = lfwdata()\n",
"step-4": "import os\nimport config as cfg\nimport numpy as np\n\n\nclass lfwdata:\n\n def __init__(self):\n self._pairs = []\n pairs = open(os.path.join(cfg.LFW_IMAGEPATH, '../pairs.txt'))\n pairs.readline()\n for pair in pairs:\n pair = pair.split()\n if len(pair) == 3:\n img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[1])))\n img2 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[2])))\n label = True\n elif len(pair) == 4:\n img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[1])))\n img2 = os.path.join(pair[2], pair[2] + '_{:04d}.jpg'.format\n (int(pair[3])))\n label = False\n else:\n assert False, pair\n self._pairs.append({'img': [img1, img2], 'label': label})\n print('Number of pairs: {}'.format(len(self._pairs)))\n\n\nif __name__ == '__main__':\n pairs = lfwdata()\n",
"step-5": "import os\nimport config as cfg\nimport numpy as np\n\n\nclass lfwdata():\n\n def __init__(self):\n self._pairs = []\n\n pairs = open(os.path.join(cfg.LFW_IMAGEPATH, '../pairs.txt'))\n pairs.readline()\n for pair in pairs:\n pair = pair.split()\n if len(pair) == 3:\n img1 = os.path.join(\n pair[0], pair[0] + '_{:04d}.jpg'.format(int(pair[1])))\n img2 = os.path.join(\n pair[0], pair[0] + '_{:04d}.jpg'.format(int(pair[2])))\n label = True\n elif len(pair) == 4:\n img1 = os.path.join(\n pair[0], pair[0] + '_{:04d}.jpg'.format(int(pair[1])))\n img2 = os.path.join(\n pair[2], pair[2] + '_{:04d}.jpg'.format(int(pair[3])))\n label = False\n else:\n assert False, pair\n self._pairs.append({'img': [img1, img2], 'label': label})\n\n print('Number of pairs: {}'.format(len(self._pairs)))\n\nif __name__ == '__main__':\n\n pairs = lfwdata()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Brice Chou'
import os
import lib
import sys
import time
import getopt
import training
try:
import cv2
import h5py
except Exception as e:
error_info = 'Please install h5py/cv2 tools first. Error: {}.\n'.format(e)
print('\033[0;31m%s\033[0m' % error_info)
quit()
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def run():
# Set the window name
window_name = __author__
# Get a reference to webcam #-1 (the last one)
video_capture = cv2.VideoCapture(-1)
# Initialize some variables
unknown_folder_path = os.path.abspath('unknown')
i = lib.get_file_max_number(unknown_folder_path)
filerd = h5py.File('database/training_encodings.hdf5', 'r')
# Image encodings mode
encodings_mode = 'large'
# Temp to save predict result name
face_names = []
# Save the screen locations and encodings to find a person
screen_locations = []
screen_encodings = []
# Save the training data from database
training_names = []
training_eigenvalues = []
process_this_frame = True
for key in filerd.keys():
training_names.append(filerd[key].name.split('/')[-1])
training_eigenvalues.append(filerd[key].value)
filerd.close()
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size
# for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings
# in the current frame of video
screen_locations = lib.face_locations(small_frame, 1,
'hog')
screen_encodings = lib.face_encodings(small_frame, None,
1, encodings_mode)
face_names = []
# How manay faces in the screen
detected_face_length = len(screen_locations)
info = 'We detected \033[0;32m{}\033[0m faces in the screen.\n'
print(info.format(detected_face_length))
if detected_face_length >= 1:
for screen_encoding in screen_encodings:
# Compare the locations and get the face's name
name = lib.compare_faces(training_eigenvalues,
training_names,
screen_encoding, 0.31)
face_names.append(name)
# Auto save the unknown images
if '' == name:
img_file_path = '{}/{}.jpg'.format(
unknown_folder_path, i)
cv2.imwrite(img_file_path, frame)
i += 1
time.sleep(0.15)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(screen_locations, face_names):
# We detected in was scaled to 1/2 size
top *= 2
right *= 2
bottom *= 2
left *= 2
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
if '' != name:
# Draw a label with a name below the face
# # cv2.cv.CV_FILLED
cv2.rectangle(frame, (left - 60, bottom + 30),
(right + 60, bottom - 10), (0, 0, 255),
cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left - 50, bottom + 20),
font, 1, (255, 255, 255), 1)
# Display the resulting image
cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN)
# cv2.cv.CV_WINDOW_FULLSCREEN
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_FULLSCREEN)
cv2.imshow(window_name, frame)
key = cv2.waitKey(1)
if key == ord('s'):
label = 'cache/{}.jpg'.format(i)
cv2.imwrite(label, frame)
i += 1
elif key == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
def main(argv=None):
if argv is None:
argv = sys.argv
try:
try:
argv_list = argv[1:]
opts, args = getopt.getopt(argv_list, 'h', ['help'])
arg = argv_list[0]
if 'run' == arg:
run()
elif 'save' == arg:
training.save()
elif 'move' == arg:
training.move()
elif 'detect' == arg:
training.detect()
elif 'catch' == arg:
if 2 == len(argv_list):
training.catch(argv_list[1])
else:
training.catch()
elif 'rotate' == arg:
if 2 == len(argv_list):
training.rotate(amount=int(argv_list[1]))
else:
training.rotate()
except getopt.error, msg:
raise Usage(msg)
except Usage, err:
print >>sys.stderr, err.msg
print >>sys.stderr, 'for help use --help'
return 2
if __name__ == '__main__':
lib.initial_project_folder()
sys.exit(main())
|
normal
|
{
"blob_id": "398263b65fd98003f27020e46ae38e913dc5dd45",
"index": 323,
"step-1": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = 'Brice Chou'\n\nimport os\nimport lib\nimport sys\nimport time\nimport getopt\nimport training\n\ntry:\n import cv2\n import h5py\nexcept Exception as e:\n error_info = 'Please install h5py/cv2 tools first. Error: {}.\\n'.format(e)\n print('\\033[0;31m%s\\033[0m' % error_info)\n quit()\n\n\nclass Usage(Exception):\n def __init__(self, msg):\n self.msg = msg\n\n\ndef run():\n # Set the window name\n window_name = __author__\n\n # Get a reference to webcam #-1 (the last one)\n video_capture = cv2.VideoCapture(-1)\n\n # Initialize some variables\n unknown_folder_path = os.path.abspath('unknown')\n i = lib.get_file_max_number(unknown_folder_path)\n filerd = h5py.File('database/training_encodings.hdf5', 'r')\n\n # Image encodings mode\n encodings_mode = 'large'\n\n # Temp to save predict result name\n face_names = []\n\n # Save the screen locations and encodings to find a person\n screen_locations = []\n screen_encodings = []\n\n # Save the training data from database\n training_names = []\n training_eigenvalues = []\n\n process_this_frame = True\n\n for key in filerd.keys():\n training_names.append(filerd[key].name.split('/')[-1])\n training_eigenvalues.append(filerd[key].value)\n\n filerd.close()\n\n while True:\n # Grab a single frame of video\n ret, frame = video_capture.read()\n\n # Resize frame of video to 1/4 size\n # for faster face recognition processing\n small_frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)\n\n # Only process every other frame of video to save time\n if process_this_frame:\n # Find all the faces and face encodings\n # in the current frame of video\n screen_locations = lib.face_locations(small_frame, 1,\n 'hog')\n screen_encodings = lib.face_encodings(small_frame, None,\n 1, encodings_mode)\n face_names = []\n\n # How manay faces in the screen\n detected_face_length = len(screen_locations)\n info = 'We detected \\033[0;32m{}\\033[0m faces in the screen.\\n'\n print(info.format(detected_face_length))\n if detected_face_length >= 1:\n for screen_encoding in screen_encodings:\n # Compare the locations and get the face's name\n name = lib.compare_faces(training_eigenvalues,\n training_names,\n screen_encoding, 0.31)\n face_names.append(name)\n\n # Auto save the unknown images\n if '' == name:\n img_file_path = '{}/{}.jpg'.format(\n unknown_folder_path, i)\n cv2.imwrite(img_file_path, frame)\n i += 1\n time.sleep(0.15)\n\n process_this_frame = not process_this_frame\n\n # Display the results\n for (top, right, bottom, left), name in zip(screen_locations, face_names):\n # We detected in was scaled to 1/2 size\n top *= 2\n right *= 2\n bottom *= 2\n left *= 2\n\n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n\n if '' != name:\n # Draw a label with a name below the face\n # # cv2.cv.CV_FILLED\n cv2.rectangle(frame, (left - 60, bottom + 30),\n (right + 60, bottom - 10), (0, 0, 255),\n cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(frame, name, (left - 50, bottom + 20),\n font, 1, (255, 255, 255), 1)\n\n # Display the resulting image\n cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN)\n # cv2.cv.CV_WINDOW_FULLSCREEN\n cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,\n cv2.WINDOW_FULLSCREEN)\n cv2.imshow(window_name, frame)\n\n key = cv2.waitKey(1)\n if key == ord('s'):\n label = 'cache/{}.jpg'.format(i)\n cv2.imwrite(label, frame)\n i += 1\n elif key == ord('q'):\n break\n\n # Release handle to the webcam\n video_capture.release()\n cv2.destroyAllWindows()\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n try:\n try:\n argv_list = argv[1:]\n opts, args = getopt.getopt(argv_list, 'h', ['help'])\n arg = argv_list[0]\n if 'run' == arg:\n run()\n elif 'save' == arg:\n training.save()\n elif 'move' == arg:\n training.move()\n elif 'detect' == arg:\n training.detect()\n elif 'catch' == arg:\n if 2 == len(argv_list):\n training.catch(argv_list[1])\n else:\n training.catch()\n elif 'rotate' == arg:\n if 2 == len(argv_list):\n training.rotate(amount=int(argv_list[1]))\n else:\n training.rotate()\n except getopt.error, msg:\n raise Usage(msg)\n except Usage, err:\n print >>sys.stderr, err.msg\n print >>sys.stderr, 'for help use --help'\n return 2\n\n\nif __name__ == '__main__':\n lib.initial_project_folder()\n sys.exit(main())\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import csv
import os
from collections import namedtuple
from typing import List, Dict
from config import *
HEADER = ['File', 'LKHContigs', 'LKHValue', 'LKHTime', 'APContigs', 'APValue', 'APTime', 'ActualObjectiveValue']
Assembly_Stats = namedtuple('Assembly_Stats', HEADER)
dir = '/home/andreas/GDrive/workspace/sparsedata/ref1shuffled_c5_l700/calign.assembly'
def read_assembly_file(file: str) -> List:
if not os.path.isfile(file):
return [-1, -1, -1, -1, -1, -1]
with open(file, 'r') as f:
file_content_string = f.read()
if 'LKH_Contigs:\nLKH_Objective' in file_content_string:
lkh_gaps = -1
else:
lkh_gaps = len(file_content_string.split('LKH_Contigs:\n')[1].split('\nLKH_Objective')[0].split('\n')) - 1
lkh_value = int(file_content_string.split('LKH_Objective_Value: ')[1].split('\n')[0])
lkh_time = float(file_content_string.split('LKH_Time: ')[1].split('\n')[0])
if 'AP_Contigs:\nAP_Objective' in file_content_string:
ap_gaps = -1
else:
ap_gaps = len(file_content_string.split('AP_Contigs:\n')[1].split('\nAP_Objective')[0].split('\n')) - 1
ap_value = int(file_content_string.split('AP_Objective_Value: ')[1].split('\n')[0])
ap_time = float(file_content_string.split('AP_Time: ')[1].split('\n')[0])
return [lkh_value, lkh_gaps, lkh_time, ap_value, ap_gaps, ap_time]
def read_fasta_stats_file(file: str) -> Dict:
with open(file, 'r') as f:
file_content_string = f.read()
actual_objective_value = int(file_content_string.split('Objective function value: ')[1].split('\n')[0])
actual_gaps = int(file_content_string.split('Actual gaps: ')[1].split('\n')[0])
no_of_reads = int(file_content_string.split('Number of reads: ')[1].split('\n')[0])
return [no_of_reads, actual_objective_value, actual_gaps]
# def write_assembly_stats(assembly_stats_list: List[Assembly_Stats]) -> None:
# with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.csv', 'w') as f:
# f_csv = csv.writer(f, delimiter=',')
# f_csv.writerow(
# ['File', 'LKHContigs', 'LKHValue', 'LKHTime', 'APContigs', 'APValue', 'APTime', 'ActualObjectiveValue'])
# for elem in assembly_stats_list:
# f_csv.writerow(elem)
def write_assembly_stats(statsdict: Dict) -> None:
with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.csv', 'w') as f:
f_csv = csv.writer(f, delimiter=',')
f_csv.writerow(
['Genome', 'Coverage', 'AvgLength', 'Reads', 'ActualValue', 'ActualGaps',
'CalignLKHValue', 'CalignLKHGaps', 'CalignLKHTime',
'CalignAPValue', 'CalignAPGaps', 'CalignAPTime',
'CalignALKHValue', 'CalignALKHGaps', 'CalignALKHTime',
'CalignAAPValue', 'CalignAAPGaps', 'CalignAAPTime',
'CalignBLKHValue', 'CalignBLKHGaps', 'CalignBLKHTime',
'CalignBAPValue', 'CalignBAPGaps', 'CalignBAPTime',
])
for ref_name in [ref1_name, ref2_name, ref3_name]:
for c in coverages:
for length in average_length_list:
val = stats_dict[(ref_name, c, length)]
row = [ref_name, c, length]
row += val['Actual']
row += val['Calign']
row += val['Calign25']
row += val['Calign50']
f_csv.writerow(row)
def write_assembly_stats_tex(statsdict: Dict) -> None:
with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.tex', 'w') as f:
for ref_name in [ref1_name, ref2_name, ref3_name]:
if ref1_name == ref_name:
dashline_active = ''
else:
dashline_active = '\\hdashline\n'
f.write('{}\\bfseries {}\\\\\n'.format(dashline_active, ref_name))
for c in coverages:
f.write('$c = {}$\\\\\n'.format(c))
for length in average_length_list:
val = stats_dict[(ref_name, c, length)]
row = [length]
row += [val['Actual'][0]]
row += ['']
row += val['Actual'][1:]
row += ['']
row += [*val['Calign'][0:2], '{0:.2f}'.format(val['Calign'][2]), *val['Calign'][3:5],
'{0:.2f}'.format(val['Calign'][5])]
row += ['']
row += [*val['Calign25'][0:2], '{0:.2f}'.format(val['Calign25'][2]), *val['Calign25'][3:5],
'{0:.2f}'.format(val['Calign25'][5])]
row += ['']
row += [*val['Calign50'][0:2], '{0:.2f}'.format(val['Calign50'][2]), *val['Calign50'][3:5],
'{0:.2f}'.format(val['Calign50'][5])]
f.write(' & '.join([str(x) for x in row]) + '\\\\\n')
def write_assembly_stats2(statsdict: Dict) -> None:
with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats2.csv', 'w') as f:
f_csv = csv.writer(f, delimiter=',')
refs = [ref1_name, ref2_name]
f_csv.writerow(range(len(refs) * 9))
f_csv.writerow(
[stats_dict[(ref_name, c, l)]['Actual'][0] for ref_name in refs for c in
coverages for l in average_length_list])
f_csv.writerow(
[stats_dict[(ref_name, c, l)]['Actual'][1] for ref_name in refs for c in
coverages for l
in average_length_list])
f_csv.writerow(
[stats_dict[(ref_name, c, l)]['Actual'][2] for ref_name in refs for c in
coverages for l
in average_length_list])
for foo in ['Calign', 'Calign25', 'Calign50']:
for i in range(6):
if i in [2, 5]:
f_csv.writerow(
['{0:.2f}'.format(stats_dict[(ref_name, c, l)][foo][i]) for ref_name in refs for c in
coverages
for l in average_length_list])
else:
f_csv.writerow(
[stats_dict[(ref_name, c, l)][foo][i] for ref_name in refs for c in
coverages
for l in average_length_list])
assembly_stats_list = []
stats_dict = {}
# for dir in sorted(glob.glob('/home/andreas/GDrive/workspace/sparsedata/ref[1,2,3]_c[5,20,40]*/')):
for ref_number in [1, 2, 3]:
for coverage in coverages:
for length in average_length_list:
# file_sub_dir = dir.split('/')[-2] # example ref1_c5_l100
# ref_number = int(file_sub_dir.split('ref')[1].split('_')[0])
ref_name = references[ref_number - 1]
# coverage = int(file_sub_dir.split('_c')[1].split('_')[0])
# length = int(file_sub_dir.split('_l')[1])
dir = '/home/andreas/GDrive/workspace/sparsedata/ref{}_c{}_l{}/'.format(ref_number, coverage, length)
stats_dict[(ref_name, coverage, length)] = {'Actual': read_fasta_stats_file(dir + 'fasta.stat'),
'Calign': read_assembly_file(dir + 'calign.assembly'),
'Calign25': read_assembly_file(
dir + 'calign_0_{}.assembly'.format(length // 4)),
'Calign50': read_assembly_file(
dir + 'calign_0_{}.assembly'.format(length // 2))}
# dir = '{}-{}-{}'.format(references[ref_number - 1], coverage, length)
# assembly_stats_list.append(
# Assembly_Stats(dir, len(lkh_contigs), lkh_value, lkh_time, len(ap_contigs), ap_value, ap_time,
# actual_Objective_value))
def write_whole_stats() -> None:
headers = ['CalignLKH', 'CalignAP', 'CalignALKH', 'CalignAAP', 'CalignBLKH',
'CalignBAP']
vals = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,
'CalignBAP': 0}
gaps = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,
'CalignBAP': 0}
both = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,
'CalignBAP': 0}
atspvsapval = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,
'CalignBAP': 0}
atspvsap = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,
'CalignBAP': 0}
with open(DIR + 'assembly_stats.csv', 'r') as f:
f_csv = csv.DictReader(f, delimiter=',')
for row in f_csv:
for elem in headers:
if row['ActualValue'] == row[elem + 'Value']:
vals[elem] += 1
if row['ActualGaps'] == row[elem + 'Gaps']:
gaps[elem] += 1
if row['ActualValue'] == row[elem + 'Value'] and row['ActualGaps'] == row[elem + 'Gaps']:
both[elem] += 1
if row['CalignLKHValue'] == row['CalignAPValue']:
atspvsapval['CalignLKH'] += 1
atspvsapval['CalignAP'] += 1
if row['CalignALKHValue'] == row['CalignAAPValue']:
atspvsapval['CalignALKH'] += 1
atspvsapval['CalignAAP'] += 1
if row['CalignBLKHValue'] == row['CalignBAPValue']:
atspvsapval['CalignBLKH'] += 1
atspvsapval['CalignBAP'] += 1
if row['CalignLKHValue'] == row['CalignAPValue'] and row['CalignLKHGaps'] == row['CalignAPGaps']:
atspvsap['CalignLKH'] += 1
atspvsap['CalignAP'] += 1
if row['CalignALKHValue'] == row['CalignAAPValue'] and row['CalignALKHGaps'] == row['CalignAAPGaps']:
atspvsap['CalignALKH'] += 1
atspvsap['CalignAAP'] += 1
if row['CalignBLKHValue'] == row['CalignBAPValue'] and row['CalignBLKHGaps'] == row['CalignBAPGaps']:
atspvsap['CalignBLKH'] += 1
atspvsap['CalignBAP'] += 1
with open(DIR + 'complete_stats.csv', 'w') as g:
g_csv = csv.DictWriter(g, delimiter='&', fieldnames=headers)
g_csv.writeheader()
g_csv.writerow(vals)
g_csv.writerow(gaps)
g_csv.writerow(both)
g_csv.writerow(atspvsapval)
g_csv.writerow(atspvsap)
write_assembly_stats(stats_dict)
write_assembly_stats2(stats_dict)
write_assembly_stats_tex(stats_dict)
write_whole_stats()
|
normal
|
{
"blob_id": "edd98e3996b0fce46d33dd33340018ab5b029637",
"index": 2333,
"step-1": "<mask token>\n\n\ndef read_assembly_file(file: str) ->List:\n if not os.path.isfile(file):\n return [-1, -1, -1, -1, -1, -1]\n with open(file, 'r') as f:\n file_content_string = f.read()\n if 'LKH_Contigs:\\nLKH_Objective' in file_content_string:\n lkh_gaps = -1\n else:\n lkh_gaps = len(file_content_string.split('LKH_Contigs:\\n')[1].\n split('\\nLKH_Objective')[0].split('\\n')) - 1\n lkh_value = int(file_content_string.split('LKH_Objective_Value: ')[\n 1].split('\\n')[0])\n lkh_time = float(file_content_string.split('LKH_Time: ')[1].split(\n '\\n')[0])\n if 'AP_Contigs:\\nAP_Objective' in file_content_string:\n ap_gaps = -1\n else:\n ap_gaps = len(file_content_string.split('AP_Contigs:\\n')[1].\n split('\\nAP_Objective')[0].split('\\n')) - 1\n ap_value = int(file_content_string.split('AP_Objective_Value: ')[1]\n .split('\\n')[0])\n ap_time = float(file_content_string.split('AP_Time: ')[1].split(\n '\\n')[0])\n return [lkh_value, lkh_gaps, lkh_time, ap_value, ap_gaps, ap_time]\n\n\ndef read_fasta_stats_file(file: str) ->Dict:\n with open(file, 'r') as f:\n file_content_string = f.read()\n actual_objective_value = int(file_content_string.split(\n 'Objective function value: ')[1].split('\\n')[0])\n actual_gaps = int(file_content_string.split('Actual gaps: ')[1].\n split('\\n')[0])\n no_of_reads = int(file_content_string.split('Number of reads: ')[1]\n .split('\\n')[0])\n return [no_of_reads, actual_objective_value, actual_gaps]\n\n\ndef write_assembly_stats(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.csv',\n 'w') as f:\n f_csv = csv.writer(f, delimiter=',')\n f_csv.writerow(['Genome', 'Coverage', 'AvgLength', 'Reads',\n 'ActualValue', 'ActualGaps', 'CalignLKHValue', 'CalignLKHGaps',\n 'CalignLKHTime', 'CalignAPValue', 'CalignAPGaps',\n 'CalignAPTime', 'CalignALKHValue', 'CalignALKHGaps',\n 'CalignALKHTime', 'CalignAAPValue', 'CalignAAPGaps',\n 'CalignAAPTime', 'CalignBLKHValue', 'CalignBLKHGaps',\n 'CalignBLKHTime', 'CalignBAPValue', 'CalignBAPGaps',\n 'CalignBAPTime'])\n for ref_name in [ref1_name, ref2_name, ref3_name]:\n for c in coverages:\n for length in average_length_list:\n val = stats_dict[ref_name, c, length]\n row = [ref_name, c, length]\n row += val['Actual']\n row += val['Calign']\n row += val['Calign25']\n row += val['Calign50']\n f_csv.writerow(row)\n\n\ndef write_assembly_stats_tex(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.tex',\n 'w') as f:\n for ref_name in [ref1_name, ref2_name, ref3_name]:\n if ref1_name == ref_name:\n dashline_active = ''\n else:\n dashline_active = '\\\\hdashline\\n'\n f.write('{}\\\\bfseries {}\\\\\\\\\\n'.format(dashline_active, ref_name))\n for c in coverages:\n f.write('$c = {}$\\\\\\\\\\n'.format(c))\n for length in average_length_list:\n val = stats_dict[ref_name, c, length]\n row = [length]\n row += [val['Actual'][0]]\n row += ['']\n row += val['Actual'][1:]\n row += ['']\n row += [*val['Calign'][0:2], '{0:.2f}'.format(val[\n 'Calign'][2]), *val['Calign'][3:5], '{0:.2f}'.\n format(val['Calign'][5])]\n row += ['']\n row += [*val['Calign25'][0:2], '{0:.2f}'.format(val[\n 'Calign25'][2]), *val['Calign25'][3:5], '{0:.2f}'.\n format(val['Calign25'][5])]\n row += ['']\n row += [*val['Calign50'][0:2], '{0:.2f}'.format(val[\n 'Calign50'][2]), *val['Calign50'][3:5], '{0:.2f}'.\n format(val['Calign50'][5])]\n f.write(' & '.join([str(x) for x in row]) + '\\\\\\\\\\n')\n\n\ndef write_assembly_stats2(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats2.csv',\n 'w') as f:\n f_csv = csv.writer(f, delimiter=',')\n refs = [ref1_name, ref2_name]\n f_csv.writerow(range(len(refs) * 9))\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][0] for\n ref_name in refs for c in coverages for l in average_length_list])\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][1] for\n ref_name in refs for c in coverages for l in average_length_list])\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][2] for\n ref_name in refs for c in coverages for l in average_length_list])\n for foo in ['Calign', 'Calign25', 'Calign50']:\n for i in range(6):\n if i in [2, 5]:\n f_csv.writerow(['{0:.2f}'.format(stats_dict[ref_name, c,\n l][foo][i]) for ref_name in refs for c in coverages for\n l in average_length_list])\n else:\n f_csv.writerow([stats_dict[ref_name, c, l][foo][i] for\n ref_name in refs for c in coverages for l in\n average_length_list])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_assembly_file(file: str) ->List:\n if not os.path.isfile(file):\n return [-1, -1, -1, -1, -1, -1]\n with open(file, 'r') as f:\n file_content_string = f.read()\n if 'LKH_Contigs:\\nLKH_Objective' in file_content_string:\n lkh_gaps = -1\n else:\n lkh_gaps = len(file_content_string.split('LKH_Contigs:\\n')[1].\n split('\\nLKH_Objective')[0].split('\\n')) - 1\n lkh_value = int(file_content_string.split('LKH_Objective_Value: ')[\n 1].split('\\n')[0])\n lkh_time = float(file_content_string.split('LKH_Time: ')[1].split(\n '\\n')[0])\n if 'AP_Contigs:\\nAP_Objective' in file_content_string:\n ap_gaps = -1\n else:\n ap_gaps = len(file_content_string.split('AP_Contigs:\\n')[1].\n split('\\nAP_Objective')[0].split('\\n')) - 1\n ap_value = int(file_content_string.split('AP_Objective_Value: ')[1]\n .split('\\n')[0])\n ap_time = float(file_content_string.split('AP_Time: ')[1].split(\n '\\n')[0])\n return [lkh_value, lkh_gaps, lkh_time, ap_value, ap_gaps, ap_time]\n\n\ndef read_fasta_stats_file(file: str) ->Dict:\n with open(file, 'r') as f:\n file_content_string = f.read()\n actual_objective_value = int(file_content_string.split(\n 'Objective function value: ')[1].split('\\n')[0])\n actual_gaps = int(file_content_string.split('Actual gaps: ')[1].\n split('\\n')[0])\n no_of_reads = int(file_content_string.split('Number of reads: ')[1]\n .split('\\n')[0])\n return [no_of_reads, actual_objective_value, actual_gaps]\n\n\ndef write_assembly_stats(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.csv',\n 'w') as f:\n f_csv = csv.writer(f, delimiter=',')\n f_csv.writerow(['Genome', 'Coverage', 'AvgLength', 'Reads',\n 'ActualValue', 'ActualGaps', 'CalignLKHValue', 'CalignLKHGaps',\n 'CalignLKHTime', 'CalignAPValue', 'CalignAPGaps',\n 'CalignAPTime', 'CalignALKHValue', 'CalignALKHGaps',\n 'CalignALKHTime', 'CalignAAPValue', 'CalignAAPGaps',\n 'CalignAAPTime', 'CalignBLKHValue', 'CalignBLKHGaps',\n 'CalignBLKHTime', 'CalignBAPValue', 'CalignBAPGaps',\n 'CalignBAPTime'])\n for ref_name in [ref1_name, ref2_name, ref3_name]:\n for c in coverages:\n for length in average_length_list:\n val = stats_dict[ref_name, c, length]\n row = [ref_name, c, length]\n row += val['Actual']\n row += val['Calign']\n row += val['Calign25']\n row += val['Calign50']\n f_csv.writerow(row)\n\n\ndef write_assembly_stats_tex(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.tex',\n 'w') as f:\n for ref_name in [ref1_name, ref2_name, ref3_name]:\n if ref1_name == ref_name:\n dashline_active = ''\n else:\n dashline_active = '\\\\hdashline\\n'\n f.write('{}\\\\bfseries {}\\\\\\\\\\n'.format(dashline_active, ref_name))\n for c in coverages:\n f.write('$c = {}$\\\\\\\\\\n'.format(c))\n for length in average_length_list:\n val = stats_dict[ref_name, c, length]\n row = [length]\n row += [val['Actual'][0]]\n row += ['']\n row += val['Actual'][1:]\n row += ['']\n row += [*val['Calign'][0:2], '{0:.2f}'.format(val[\n 'Calign'][2]), *val['Calign'][3:5], '{0:.2f}'.\n format(val['Calign'][5])]\n row += ['']\n row += [*val['Calign25'][0:2], '{0:.2f}'.format(val[\n 'Calign25'][2]), *val['Calign25'][3:5], '{0:.2f}'.\n format(val['Calign25'][5])]\n row += ['']\n row += [*val['Calign50'][0:2], '{0:.2f}'.format(val[\n 'Calign50'][2]), *val['Calign50'][3:5], '{0:.2f}'.\n format(val['Calign50'][5])]\n f.write(' & '.join([str(x) for x in row]) + '\\\\\\\\\\n')\n\n\ndef write_assembly_stats2(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats2.csv',\n 'w') as f:\n f_csv = csv.writer(f, delimiter=',')\n refs = [ref1_name, ref2_name]\n f_csv.writerow(range(len(refs) * 9))\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][0] for\n ref_name in refs for c in coverages for l in average_length_list])\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][1] for\n ref_name in refs for c in coverages for l in average_length_list])\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][2] for\n ref_name in refs for c in coverages for l in average_length_list])\n for foo in ['Calign', 'Calign25', 'Calign50']:\n for i in range(6):\n if i in [2, 5]:\n f_csv.writerow(['{0:.2f}'.format(stats_dict[ref_name, c,\n l][foo][i]) for ref_name in refs for c in coverages for\n l in average_length_list])\n else:\n f_csv.writerow([stats_dict[ref_name, c, l][foo][i] for\n ref_name in refs for c in coverages for l in\n average_length_list])\n\n\n<mask token>\nfor ref_number in [1, 2, 3]:\n for coverage in coverages:\n for length in average_length_list:\n ref_name = references[ref_number - 1]\n dir = ('/home/andreas/GDrive/workspace/sparsedata/ref{}_c{}_l{}/'\n .format(ref_number, coverage, length))\n stats_dict[ref_name, coverage, length] = {'Actual':\n read_fasta_stats_file(dir + 'fasta.stat'), 'Calign':\n read_assembly_file(dir + 'calign.assembly'), 'Calign25':\n read_assembly_file(dir + 'calign_0_{}.assembly'.format(\n length // 4)), 'Calign50': read_assembly_file(dir +\n 'calign_0_{}.assembly'.format(length // 2))}\n\n\ndef write_whole_stats() ->None:\n headers = ['CalignLKH', 'CalignAP', 'CalignALKH', 'CalignAAP',\n 'CalignBLKH', 'CalignBAP']\n vals = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0,\n 'CalignBLKH': 0, 'CalignBAP': 0}\n gaps = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0,\n 'CalignBLKH': 0, 'CalignBAP': 0}\n both = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0,\n 'CalignBLKH': 0, 'CalignBAP': 0}\n atspvsapval = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0,\n 'CalignAAP': 0, 'CalignBLKH': 0, 'CalignBAP': 0}\n atspvsap = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP':\n 0, 'CalignBLKH': 0, 'CalignBAP': 0}\n with open(DIR + 'assembly_stats.csv', 'r') as f:\n f_csv = csv.DictReader(f, delimiter=',')\n for row in f_csv:\n for elem in headers:\n if row['ActualValue'] == row[elem + 'Value']:\n vals[elem] += 1\n if row['ActualGaps'] == row[elem + 'Gaps']:\n gaps[elem] += 1\n if row['ActualValue'] == row[elem + 'Value'] and row[\n 'ActualGaps'] == row[elem + 'Gaps']:\n both[elem] += 1\n if row['CalignLKHValue'] == row['CalignAPValue']:\n atspvsapval['CalignLKH'] += 1\n atspvsapval['CalignAP'] += 1\n if row['CalignALKHValue'] == row['CalignAAPValue']:\n atspvsapval['CalignALKH'] += 1\n atspvsapval['CalignAAP'] += 1\n if row['CalignBLKHValue'] == row['CalignBAPValue']:\n atspvsapval['CalignBLKH'] += 1\n atspvsapval['CalignBAP'] += 1\n if row['CalignLKHValue'] == row['CalignAPValue'] and row[\n 'CalignLKHGaps'] == row['CalignAPGaps']:\n atspvsap['CalignLKH'] += 1\n atspvsap['CalignAP'] += 1\n if row['CalignALKHValue'] == row['CalignAAPValue'] and row[\n 'CalignALKHGaps'] == row['CalignAAPGaps']:\n atspvsap['CalignALKH'] += 1\n atspvsap['CalignAAP'] += 1\n if row['CalignBLKHValue'] == row['CalignBAPValue'] and row[\n 'CalignBLKHGaps'] == row['CalignBAPGaps']:\n atspvsap['CalignBLKH'] += 1\n atspvsap['CalignBAP'] += 1\n with open(DIR + 'complete_stats.csv', 'w') as g:\n g_csv = csv.DictWriter(g, delimiter='&', fieldnames=headers)\n g_csv.writeheader()\n g_csv.writerow(vals)\n g_csv.writerow(gaps)\n g_csv.writerow(both)\n g_csv.writerow(atspvsapval)\n g_csv.writerow(atspvsap)\n\n\nwrite_assembly_stats(stats_dict)\nwrite_assembly_stats2(stats_dict)\nwrite_assembly_stats_tex(stats_dict)\nwrite_whole_stats()\n",
"step-3": "<mask token>\nHEADER = ['File', 'LKHContigs', 'LKHValue', 'LKHTime', 'APContigs',\n 'APValue', 'APTime', 'ActualObjectiveValue']\nAssembly_Stats = namedtuple('Assembly_Stats', HEADER)\ndir = (\n '/home/andreas/GDrive/workspace/sparsedata/ref1shuffled_c5_l700/calign.assembly'\n )\n\n\ndef read_assembly_file(file: str) ->List:\n if not os.path.isfile(file):\n return [-1, -1, -1, -1, -1, -1]\n with open(file, 'r') as f:\n file_content_string = f.read()\n if 'LKH_Contigs:\\nLKH_Objective' in file_content_string:\n lkh_gaps = -1\n else:\n lkh_gaps = len(file_content_string.split('LKH_Contigs:\\n')[1].\n split('\\nLKH_Objective')[0].split('\\n')) - 1\n lkh_value = int(file_content_string.split('LKH_Objective_Value: ')[\n 1].split('\\n')[0])\n lkh_time = float(file_content_string.split('LKH_Time: ')[1].split(\n '\\n')[0])\n if 'AP_Contigs:\\nAP_Objective' in file_content_string:\n ap_gaps = -1\n else:\n ap_gaps = len(file_content_string.split('AP_Contigs:\\n')[1].\n split('\\nAP_Objective')[0].split('\\n')) - 1\n ap_value = int(file_content_string.split('AP_Objective_Value: ')[1]\n .split('\\n')[0])\n ap_time = float(file_content_string.split('AP_Time: ')[1].split(\n '\\n')[0])\n return [lkh_value, lkh_gaps, lkh_time, ap_value, ap_gaps, ap_time]\n\n\ndef read_fasta_stats_file(file: str) ->Dict:\n with open(file, 'r') as f:\n file_content_string = f.read()\n actual_objective_value = int(file_content_string.split(\n 'Objective function value: ')[1].split('\\n')[0])\n actual_gaps = int(file_content_string.split('Actual gaps: ')[1].\n split('\\n')[0])\n no_of_reads = int(file_content_string.split('Number of reads: ')[1]\n .split('\\n')[0])\n return [no_of_reads, actual_objective_value, actual_gaps]\n\n\ndef write_assembly_stats(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.csv',\n 'w') as f:\n f_csv = csv.writer(f, delimiter=',')\n f_csv.writerow(['Genome', 'Coverage', 'AvgLength', 'Reads',\n 'ActualValue', 'ActualGaps', 'CalignLKHValue', 'CalignLKHGaps',\n 'CalignLKHTime', 'CalignAPValue', 'CalignAPGaps',\n 'CalignAPTime', 'CalignALKHValue', 'CalignALKHGaps',\n 'CalignALKHTime', 'CalignAAPValue', 'CalignAAPGaps',\n 'CalignAAPTime', 'CalignBLKHValue', 'CalignBLKHGaps',\n 'CalignBLKHTime', 'CalignBAPValue', 'CalignBAPGaps',\n 'CalignBAPTime'])\n for ref_name in [ref1_name, ref2_name, ref3_name]:\n for c in coverages:\n for length in average_length_list:\n val = stats_dict[ref_name, c, length]\n row = [ref_name, c, length]\n row += val['Actual']\n row += val['Calign']\n row += val['Calign25']\n row += val['Calign50']\n f_csv.writerow(row)\n\n\ndef write_assembly_stats_tex(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.tex',\n 'w') as f:\n for ref_name in [ref1_name, ref2_name, ref3_name]:\n if ref1_name == ref_name:\n dashline_active = ''\n else:\n dashline_active = '\\\\hdashline\\n'\n f.write('{}\\\\bfseries {}\\\\\\\\\\n'.format(dashline_active, ref_name))\n for c in coverages:\n f.write('$c = {}$\\\\\\\\\\n'.format(c))\n for length in average_length_list:\n val = stats_dict[ref_name, c, length]\n row = [length]\n row += [val['Actual'][0]]\n row += ['']\n row += val['Actual'][1:]\n row += ['']\n row += [*val['Calign'][0:2], '{0:.2f}'.format(val[\n 'Calign'][2]), *val['Calign'][3:5], '{0:.2f}'.\n format(val['Calign'][5])]\n row += ['']\n row += [*val['Calign25'][0:2], '{0:.2f}'.format(val[\n 'Calign25'][2]), *val['Calign25'][3:5], '{0:.2f}'.\n format(val['Calign25'][5])]\n row += ['']\n row += [*val['Calign50'][0:2], '{0:.2f}'.format(val[\n 'Calign50'][2]), *val['Calign50'][3:5], '{0:.2f}'.\n format(val['Calign50'][5])]\n f.write(' & '.join([str(x) for x in row]) + '\\\\\\\\\\n')\n\n\ndef write_assembly_stats2(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats2.csv',\n 'w') as f:\n f_csv = csv.writer(f, delimiter=',')\n refs = [ref1_name, ref2_name]\n f_csv.writerow(range(len(refs) * 9))\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][0] for\n ref_name in refs for c in coverages for l in average_length_list])\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][1] for\n ref_name in refs for c in coverages for l in average_length_list])\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][2] for\n ref_name in refs for c in coverages for l in average_length_list])\n for foo in ['Calign', 'Calign25', 'Calign50']:\n for i in range(6):\n if i in [2, 5]:\n f_csv.writerow(['{0:.2f}'.format(stats_dict[ref_name, c,\n l][foo][i]) for ref_name in refs for c in coverages for\n l in average_length_list])\n else:\n f_csv.writerow([stats_dict[ref_name, c, l][foo][i] for\n ref_name in refs for c in coverages for l in\n average_length_list])\n\n\nassembly_stats_list = []\nstats_dict = {}\nfor ref_number in [1, 2, 3]:\n for coverage in coverages:\n for length in average_length_list:\n ref_name = references[ref_number - 1]\n dir = ('/home/andreas/GDrive/workspace/sparsedata/ref{}_c{}_l{}/'\n .format(ref_number, coverage, length))\n stats_dict[ref_name, coverage, length] = {'Actual':\n read_fasta_stats_file(dir + 'fasta.stat'), 'Calign':\n read_assembly_file(dir + 'calign.assembly'), 'Calign25':\n read_assembly_file(dir + 'calign_0_{}.assembly'.format(\n length // 4)), 'Calign50': read_assembly_file(dir +\n 'calign_0_{}.assembly'.format(length // 2))}\n\n\ndef write_whole_stats() ->None:\n headers = ['CalignLKH', 'CalignAP', 'CalignALKH', 'CalignAAP',\n 'CalignBLKH', 'CalignBAP']\n vals = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0,\n 'CalignBLKH': 0, 'CalignBAP': 0}\n gaps = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0,\n 'CalignBLKH': 0, 'CalignBAP': 0}\n both = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0,\n 'CalignBLKH': 0, 'CalignBAP': 0}\n atspvsapval = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0,\n 'CalignAAP': 0, 'CalignBLKH': 0, 'CalignBAP': 0}\n atspvsap = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP':\n 0, 'CalignBLKH': 0, 'CalignBAP': 0}\n with open(DIR + 'assembly_stats.csv', 'r') as f:\n f_csv = csv.DictReader(f, delimiter=',')\n for row in f_csv:\n for elem in headers:\n if row['ActualValue'] == row[elem + 'Value']:\n vals[elem] += 1\n if row['ActualGaps'] == row[elem + 'Gaps']:\n gaps[elem] += 1\n if row['ActualValue'] == row[elem + 'Value'] and row[\n 'ActualGaps'] == row[elem + 'Gaps']:\n both[elem] += 1\n if row['CalignLKHValue'] == row['CalignAPValue']:\n atspvsapval['CalignLKH'] += 1\n atspvsapval['CalignAP'] += 1\n if row['CalignALKHValue'] == row['CalignAAPValue']:\n atspvsapval['CalignALKH'] += 1\n atspvsapval['CalignAAP'] += 1\n if row['CalignBLKHValue'] == row['CalignBAPValue']:\n atspvsapval['CalignBLKH'] += 1\n atspvsapval['CalignBAP'] += 1\n if row['CalignLKHValue'] == row['CalignAPValue'] and row[\n 'CalignLKHGaps'] == row['CalignAPGaps']:\n atspvsap['CalignLKH'] += 1\n atspvsap['CalignAP'] += 1\n if row['CalignALKHValue'] == row['CalignAAPValue'] and row[\n 'CalignALKHGaps'] == row['CalignAAPGaps']:\n atspvsap['CalignALKH'] += 1\n atspvsap['CalignAAP'] += 1\n if row['CalignBLKHValue'] == row['CalignBAPValue'] and row[\n 'CalignBLKHGaps'] == row['CalignBAPGaps']:\n atspvsap['CalignBLKH'] += 1\n atspvsap['CalignBAP'] += 1\n with open(DIR + 'complete_stats.csv', 'w') as g:\n g_csv = csv.DictWriter(g, delimiter='&', fieldnames=headers)\n g_csv.writeheader()\n g_csv.writerow(vals)\n g_csv.writerow(gaps)\n g_csv.writerow(both)\n g_csv.writerow(atspvsapval)\n g_csv.writerow(atspvsap)\n\n\nwrite_assembly_stats(stats_dict)\nwrite_assembly_stats2(stats_dict)\nwrite_assembly_stats_tex(stats_dict)\nwrite_whole_stats()\n",
"step-4": "import csv\nimport os\nfrom collections import namedtuple\nfrom typing import List, Dict\nfrom config import *\nHEADER = ['File', 'LKHContigs', 'LKHValue', 'LKHTime', 'APContigs',\n 'APValue', 'APTime', 'ActualObjectiveValue']\nAssembly_Stats = namedtuple('Assembly_Stats', HEADER)\ndir = (\n '/home/andreas/GDrive/workspace/sparsedata/ref1shuffled_c5_l700/calign.assembly'\n )\n\n\ndef read_assembly_file(file: str) ->List:\n if not os.path.isfile(file):\n return [-1, -1, -1, -1, -1, -1]\n with open(file, 'r') as f:\n file_content_string = f.read()\n if 'LKH_Contigs:\\nLKH_Objective' in file_content_string:\n lkh_gaps = -1\n else:\n lkh_gaps = len(file_content_string.split('LKH_Contigs:\\n')[1].\n split('\\nLKH_Objective')[0].split('\\n')) - 1\n lkh_value = int(file_content_string.split('LKH_Objective_Value: ')[\n 1].split('\\n')[0])\n lkh_time = float(file_content_string.split('LKH_Time: ')[1].split(\n '\\n')[0])\n if 'AP_Contigs:\\nAP_Objective' in file_content_string:\n ap_gaps = -1\n else:\n ap_gaps = len(file_content_string.split('AP_Contigs:\\n')[1].\n split('\\nAP_Objective')[0].split('\\n')) - 1\n ap_value = int(file_content_string.split('AP_Objective_Value: ')[1]\n .split('\\n')[0])\n ap_time = float(file_content_string.split('AP_Time: ')[1].split(\n '\\n')[0])\n return [lkh_value, lkh_gaps, lkh_time, ap_value, ap_gaps, ap_time]\n\n\ndef read_fasta_stats_file(file: str) ->Dict:\n with open(file, 'r') as f:\n file_content_string = f.read()\n actual_objective_value = int(file_content_string.split(\n 'Objective function value: ')[1].split('\\n')[0])\n actual_gaps = int(file_content_string.split('Actual gaps: ')[1].\n split('\\n')[0])\n no_of_reads = int(file_content_string.split('Number of reads: ')[1]\n .split('\\n')[0])\n return [no_of_reads, actual_objective_value, actual_gaps]\n\n\ndef write_assembly_stats(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.csv',\n 'w') as f:\n f_csv = csv.writer(f, delimiter=',')\n f_csv.writerow(['Genome', 'Coverage', 'AvgLength', 'Reads',\n 'ActualValue', 'ActualGaps', 'CalignLKHValue', 'CalignLKHGaps',\n 'CalignLKHTime', 'CalignAPValue', 'CalignAPGaps',\n 'CalignAPTime', 'CalignALKHValue', 'CalignALKHGaps',\n 'CalignALKHTime', 'CalignAAPValue', 'CalignAAPGaps',\n 'CalignAAPTime', 'CalignBLKHValue', 'CalignBLKHGaps',\n 'CalignBLKHTime', 'CalignBAPValue', 'CalignBAPGaps',\n 'CalignBAPTime'])\n for ref_name in [ref1_name, ref2_name, ref3_name]:\n for c in coverages:\n for length in average_length_list:\n val = stats_dict[ref_name, c, length]\n row = [ref_name, c, length]\n row += val['Actual']\n row += val['Calign']\n row += val['Calign25']\n row += val['Calign50']\n f_csv.writerow(row)\n\n\ndef write_assembly_stats_tex(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.tex',\n 'w') as f:\n for ref_name in [ref1_name, ref2_name, ref3_name]:\n if ref1_name == ref_name:\n dashline_active = ''\n else:\n dashline_active = '\\\\hdashline\\n'\n f.write('{}\\\\bfseries {}\\\\\\\\\\n'.format(dashline_active, ref_name))\n for c in coverages:\n f.write('$c = {}$\\\\\\\\\\n'.format(c))\n for length in average_length_list:\n val = stats_dict[ref_name, c, length]\n row = [length]\n row += [val['Actual'][0]]\n row += ['']\n row += val['Actual'][1:]\n row += ['']\n row += [*val['Calign'][0:2], '{0:.2f}'.format(val[\n 'Calign'][2]), *val['Calign'][3:5], '{0:.2f}'.\n format(val['Calign'][5])]\n row += ['']\n row += [*val['Calign25'][0:2], '{0:.2f}'.format(val[\n 'Calign25'][2]), *val['Calign25'][3:5], '{0:.2f}'.\n format(val['Calign25'][5])]\n row += ['']\n row += [*val['Calign50'][0:2], '{0:.2f}'.format(val[\n 'Calign50'][2]), *val['Calign50'][3:5], '{0:.2f}'.\n format(val['Calign50'][5])]\n f.write(' & '.join([str(x) for x in row]) + '\\\\\\\\\\n')\n\n\ndef write_assembly_stats2(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats2.csv',\n 'w') as f:\n f_csv = csv.writer(f, delimiter=',')\n refs = [ref1_name, ref2_name]\n f_csv.writerow(range(len(refs) * 9))\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][0] for\n ref_name in refs for c in coverages for l in average_length_list])\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][1] for\n ref_name in refs for c in coverages for l in average_length_list])\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][2] for\n ref_name in refs for c in coverages for l in average_length_list])\n for foo in ['Calign', 'Calign25', 'Calign50']:\n for i in range(6):\n if i in [2, 5]:\n f_csv.writerow(['{0:.2f}'.format(stats_dict[ref_name, c,\n l][foo][i]) for ref_name in refs for c in coverages for\n l in average_length_list])\n else:\n f_csv.writerow([stats_dict[ref_name, c, l][foo][i] for\n ref_name in refs for c in coverages for l in\n average_length_list])\n\n\nassembly_stats_list = []\nstats_dict = {}\nfor ref_number in [1, 2, 3]:\n for coverage in coverages:\n for length in average_length_list:\n ref_name = references[ref_number - 1]\n dir = ('/home/andreas/GDrive/workspace/sparsedata/ref{}_c{}_l{}/'\n .format(ref_number, coverage, length))\n stats_dict[ref_name, coverage, length] = {'Actual':\n read_fasta_stats_file(dir + 'fasta.stat'), 'Calign':\n read_assembly_file(dir + 'calign.assembly'), 'Calign25':\n read_assembly_file(dir + 'calign_0_{}.assembly'.format(\n length // 4)), 'Calign50': read_assembly_file(dir +\n 'calign_0_{}.assembly'.format(length // 2))}\n\n\ndef write_whole_stats() ->None:\n headers = ['CalignLKH', 'CalignAP', 'CalignALKH', 'CalignAAP',\n 'CalignBLKH', 'CalignBAP']\n vals = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0,\n 'CalignBLKH': 0, 'CalignBAP': 0}\n gaps = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0,\n 'CalignBLKH': 0, 'CalignBAP': 0}\n both = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0,\n 'CalignBLKH': 0, 'CalignBAP': 0}\n atspvsapval = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0,\n 'CalignAAP': 0, 'CalignBLKH': 0, 'CalignBAP': 0}\n atspvsap = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP':\n 0, 'CalignBLKH': 0, 'CalignBAP': 0}\n with open(DIR + 'assembly_stats.csv', 'r') as f:\n f_csv = csv.DictReader(f, delimiter=',')\n for row in f_csv:\n for elem in headers:\n if row['ActualValue'] == row[elem + 'Value']:\n vals[elem] += 1\n if row['ActualGaps'] == row[elem + 'Gaps']:\n gaps[elem] += 1\n if row['ActualValue'] == row[elem + 'Value'] and row[\n 'ActualGaps'] == row[elem + 'Gaps']:\n both[elem] += 1\n if row['CalignLKHValue'] == row['CalignAPValue']:\n atspvsapval['CalignLKH'] += 1\n atspvsapval['CalignAP'] += 1\n if row['CalignALKHValue'] == row['CalignAAPValue']:\n atspvsapval['CalignALKH'] += 1\n atspvsapval['CalignAAP'] += 1\n if row['CalignBLKHValue'] == row['CalignBAPValue']:\n atspvsapval['CalignBLKH'] += 1\n atspvsapval['CalignBAP'] += 1\n if row['CalignLKHValue'] == row['CalignAPValue'] and row[\n 'CalignLKHGaps'] == row['CalignAPGaps']:\n atspvsap['CalignLKH'] += 1\n atspvsap['CalignAP'] += 1\n if row['CalignALKHValue'] == row['CalignAAPValue'] and row[\n 'CalignALKHGaps'] == row['CalignAAPGaps']:\n atspvsap['CalignALKH'] += 1\n atspvsap['CalignAAP'] += 1\n if row['CalignBLKHValue'] == row['CalignBAPValue'] and row[\n 'CalignBLKHGaps'] == row['CalignBAPGaps']:\n atspvsap['CalignBLKH'] += 1\n atspvsap['CalignBAP'] += 1\n with open(DIR + 'complete_stats.csv', 'w') as g:\n g_csv = csv.DictWriter(g, delimiter='&', fieldnames=headers)\n g_csv.writeheader()\n g_csv.writerow(vals)\n g_csv.writerow(gaps)\n g_csv.writerow(both)\n g_csv.writerow(atspvsapval)\n g_csv.writerow(atspvsap)\n\n\nwrite_assembly_stats(stats_dict)\nwrite_assembly_stats2(stats_dict)\nwrite_assembly_stats_tex(stats_dict)\nwrite_whole_stats()\n",
"step-5": "import csv\nimport os\nfrom collections import namedtuple\nfrom typing import List, Dict\n\nfrom config import *\n\nHEADER = ['File', 'LKHContigs', 'LKHValue', 'LKHTime', 'APContigs', 'APValue', 'APTime', 'ActualObjectiveValue']\nAssembly_Stats = namedtuple('Assembly_Stats', HEADER)\n\ndir = '/home/andreas/GDrive/workspace/sparsedata/ref1shuffled_c5_l700/calign.assembly'\n\n\ndef read_assembly_file(file: str) -> List:\n if not os.path.isfile(file):\n return [-1, -1, -1, -1, -1, -1]\n with open(file, 'r') as f:\n file_content_string = f.read()\n if 'LKH_Contigs:\\nLKH_Objective' in file_content_string:\n lkh_gaps = -1\n else:\n lkh_gaps = len(file_content_string.split('LKH_Contigs:\\n')[1].split('\\nLKH_Objective')[0].split('\\n')) - 1\n lkh_value = int(file_content_string.split('LKH_Objective_Value: ')[1].split('\\n')[0])\n lkh_time = float(file_content_string.split('LKH_Time: ')[1].split('\\n')[0])\n if 'AP_Contigs:\\nAP_Objective' in file_content_string:\n ap_gaps = -1\n else:\n ap_gaps = len(file_content_string.split('AP_Contigs:\\n')[1].split('\\nAP_Objective')[0].split('\\n')) - 1\n ap_value = int(file_content_string.split('AP_Objective_Value: ')[1].split('\\n')[0])\n ap_time = float(file_content_string.split('AP_Time: ')[1].split('\\n')[0])\n\n return [lkh_value, lkh_gaps, lkh_time, ap_value, ap_gaps, ap_time]\n\n\ndef read_fasta_stats_file(file: str) -> Dict:\n with open(file, 'r') as f:\n file_content_string = f.read()\n actual_objective_value = int(file_content_string.split('Objective function value: ')[1].split('\\n')[0])\n actual_gaps = int(file_content_string.split('Actual gaps: ')[1].split('\\n')[0])\n no_of_reads = int(file_content_string.split('Number of reads: ')[1].split('\\n')[0])\n return [no_of_reads, actual_objective_value, actual_gaps]\n\n\n# def write_assembly_stats(assembly_stats_list: List[Assembly_Stats]) -> None:\n# with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.csv', 'w') as f:\n# f_csv = csv.writer(f, delimiter=',')\n# f_csv.writerow(\n# ['File', 'LKHContigs', 'LKHValue', 'LKHTime', 'APContigs', 'APValue', 'APTime', 'ActualObjectiveValue'])\n# for elem in assembly_stats_list:\n# f_csv.writerow(elem)\n\ndef write_assembly_stats(statsdict: Dict) -> None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.csv', 'w') as f:\n f_csv = csv.writer(f, delimiter=',')\n f_csv.writerow(\n ['Genome', 'Coverage', 'AvgLength', 'Reads', 'ActualValue', 'ActualGaps',\n 'CalignLKHValue', 'CalignLKHGaps', 'CalignLKHTime',\n 'CalignAPValue', 'CalignAPGaps', 'CalignAPTime',\n 'CalignALKHValue', 'CalignALKHGaps', 'CalignALKHTime',\n 'CalignAAPValue', 'CalignAAPGaps', 'CalignAAPTime',\n 'CalignBLKHValue', 'CalignBLKHGaps', 'CalignBLKHTime',\n 'CalignBAPValue', 'CalignBAPGaps', 'CalignBAPTime',\n ])\n for ref_name in [ref1_name, ref2_name, ref3_name]:\n for c in coverages:\n for length in average_length_list:\n val = stats_dict[(ref_name, c, length)]\n row = [ref_name, c, length]\n row += val['Actual']\n row += val['Calign']\n row += val['Calign25']\n row += val['Calign50']\n\n f_csv.writerow(row)\n\n\ndef write_assembly_stats_tex(statsdict: Dict) -> None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.tex', 'w') as f:\n for ref_name in [ref1_name, ref2_name, ref3_name]:\n if ref1_name == ref_name:\n dashline_active = ''\n else:\n dashline_active = '\\\\hdashline\\n'\n f.write('{}\\\\bfseries {}\\\\\\\\\\n'.format(dashline_active, ref_name))\n for c in coverages:\n f.write('$c = {}$\\\\\\\\\\n'.format(c))\n for length in average_length_list:\n val = stats_dict[(ref_name, c, length)]\n row = [length]\n row += [val['Actual'][0]]\n row += ['']\n row += val['Actual'][1:]\n row += ['']\n row += [*val['Calign'][0:2], '{0:.2f}'.format(val['Calign'][2]), *val['Calign'][3:5],\n '{0:.2f}'.format(val['Calign'][5])]\n row += ['']\n row += [*val['Calign25'][0:2], '{0:.2f}'.format(val['Calign25'][2]), *val['Calign25'][3:5],\n '{0:.2f}'.format(val['Calign25'][5])]\n row += ['']\n row += [*val['Calign50'][0:2], '{0:.2f}'.format(val['Calign50'][2]), *val['Calign50'][3:5],\n '{0:.2f}'.format(val['Calign50'][5])]\n f.write(' & '.join([str(x) for x in row]) + '\\\\\\\\\\n')\n\n\ndef write_assembly_stats2(statsdict: Dict) -> None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats2.csv', 'w') as f:\n f_csv = csv.writer(f, delimiter=',')\n refs = [ref1_name, ref2_name]\n f_csv.writerow(range(len(refs) * 9))\n\n f_csv.writerow(\n [stats_dict[(ref_name, c, l)]['Actual'][0] for ref_name in refs for c in\n coverages for l in average_length_list])\n f_csv.writerow(\n [stats_dict[(ref_name, c, l)]['Actual'][1] for ref_name in refs for c in\n coverages for l\n in average_length_list])\n f_csv.writerow(\n [stats_dict[(ref_name, c, l)]['Actual'][2] for ref_name in refs for c in\n coverages for l\n in average_length_list])\n for foo in ['Calign', 'Calign25', 'Calign50']:\n for i in range(6):\n if i in [2, 5]:\n f_csv.writerow(\n ['{0:.2f}'.format(stats_dict[(ref_name, c, l)][foo][i]) for ref_name in refs for c in\n coverages\n for l in average_length_list])\n else:\n f_csv.writerow(\n [stats_dict[(ref_name, c, l)][foo][i] for ref_name in refs for c in\n coverages\n for l in average_length_list])\n\n\nassembly_stats_list = []\nstats_dict = {}\n# for dir in sorted(glob.glob('/home/andreas/GDrive/workspace/sparsedata/ref[1,2,3]_c[5,20,40]*/')):\nfor ref_number in [1, 2, 3]:\n for coverage in coverages:\n for length in average_length_list:\n # file_sub_dir = dir.split('/')[-2] # example ref1_c5_l100\n # ref_number = int(file_sub_dir.split('ref')[1].split('_')[0])\n ref_name = references[ref_number - 1]\n # coverage = int(file_sub_dir.split('_c')[1].split('_')[0])\n # length = int(file_sub_dir.split('_l')[1])\n dir = '/home/andreas/GDrive/workspace/sparsedata/ref{}_c{}_l{}/'.format(ref_number, coverage, length)\n stats_dict[(ref_name, coverage, length)] = {'Actual': read_fasta_stats_file(dir + 'fasta.stat'),\n 'Calign': read_assembly_file(dir + 'calign.assembly'),\n 'Calign25': read_assembly_file(\n dir + 'calign_0_{}.assembly'.format(length // 4)),\n 'Calign50': read_assembly_file(\n dir + 'calign_0_{}.assembly'.format(length // 2))}\n\n\n # dir = '{}-{}-{}'.format(references[ref_number - 1], coverage, length)\n # assembly_stats_list.append(\n # Assembly_Stats(dir, len(lkh_contigs), lkh_value, lkh_time, len(ap_contigs), ap_value, ap_time,\n # actual_Objective_value))\n\n\ndef write_whole_stats() -> None:\n headers = ['CalignLKH', 'CalignAP', 'CalignALKH', 'CalignAAP', 'CalignBLKH',\n 'CalignBAP']\n vals = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,\n 'CalignBAP': 0}\n gaps = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,\n 'CalignBAP': 0}\n both = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,\n 'CalignBAP': 0}\n atspvsapval = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,\n 'CalignBAP': 0}\n atspvsap = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,\n 'CalignBAP': 0}\n with open(DIR + 'assembly_stats.csv', 'r') as f:\n f_csv = csv.DictReader(f, delimiter=',')\n for row in f_csv:\n for elem in headers:\n if row['ActualValue'] == row[elem + 'Value']:\n vals[elem] += 1\n if row['ActualGaps'] == row[elem + 'Gaps']:\n gaps[elem] += 1\n if row['ActualValue'] == row[elem + 'Value'] and row['ActualGaps'] == row[elem + 'Gaps']:\n both[elem] += 1\n if row['CalignLKHValue'] == row['CalignAPValue']:\n atspvsapval['CalignLKH'] += 1\n atspvsapval['CalignAP'] += 1\n if row['CalignALKHValue'] == row['CalignAAPValue']:\n atspvsapval['CalignALKH'] += 1\n atspvsapval['CalignAAP'] += 1\n if row['CalignBLKHValue'] == row['CalignBAPValue']:\n atspvsapval['CalignBLKH'] += 1\n atspvsapval['CalignBAP'] += 1\n if row['CalignLKHValue'] == row['CalignAPValue'] and row['CalignLKHGaps'] == row['CalignAPGaps']:\n atspvsap['CalignLKH'] += 1\n atspvsap['CalignAP'] += 1\n if row['CalignALKHValue'] == row['CalignAAPValue'] and row['CalignALKHGaps'] == row['CalignAAPGaps']:\n atspvsap['CalignALKH'] += 1\n atspvsap['CalignAAP'] += 1\n if row['CalignBLKHValue'] == row['CalignBAPValue'] and row['CalignBLKHGaps'] == row['CalignBAPGaps']:\n atspvsap['CalignBLKH'] += 1\n atspvsap['CalignBAP'] += 1\n with open(DIR + 'complete_stats.csv', 'w') as g:\n g_csv = csv.DictWriter(g, delimiter='&', fieldnames=headers)\n g_csv.writeheader()\n g_csv.writerow(vals)\n g_csv.writerow(gaps)\n g_csv.writerow(both)\n g_csv.writerow(atspvsapval)\n g_csv.writerow(atspvsap)\n\n\nwrite_assembly_stats(stats_dict)\nwrite_assembly_stats2(stats_dict)\nwrite_assembly_stats_tex(stats_dict)\nwrite_whole_stats()\n",
"step-ids": [
5,
7,
8,
9,
10
]
}
|
[
5,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
class UtilTestCase(TestCase):
<|reserved_special_token_0|>
def test_get_tree_queryset(self):
qs = get_tree_queryset(Country)
self.assertEqual(len(qs), 257)
self.assertEqual(qs[0].name, 'root')
qs = get_tree_queryset(Country, node_id=Country.objects.get(name=
'Europe').id)
self.assertEqual(len(qs), 50)
self.assertEqual(qs[0].name, u'Åland Islands')
qs = get_tree_queryset(Country, max_level=1)
self.assertEqual(len(qs), 8)
self.assertEqual(qs[0].name, 'root')
qs = get_tree_queryset(Country, max_level=True)
self.assertEqual(len(qs), 8)
qs = get_tree_queryset(Country, include_root=False)
self.assertEqual(len(qs), 256)
self.assertEqual(qs[0].name, 'Africa')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UtilTestCase(TestCase):
<|reserved_special_token_0|>
def test_get_tree_queryset(self):
qs = get_tree_queryset(Country)
self.assertEqual(len(qs), 257)
self.assertEqual(qs[0].name, 'root')
qs = get_tree_queryset(Country, node_id=Country.objects.get(name=
'Europe').id)
self.assertEqual(len(qs), 50)
self.assertEqual(qs[0].name, u'Åland Islands')
qs = get_tree_queryset(Country, max_level=1)
self.assertEqual(len(qs), 8)
self.assertEqual(qs[0].name, 'root')
qs = get_tree_queryset(Country, max_level=True)
self.assertEqual(len(qs), 8)
qs = get_tree_queryset(Country, include_root=False)
self.assertEqual(len(qs), 256)
self.assertEqual(qs[0].name, 'Africa')
def test_get_javascript_value(self):
self.assertEqual(get_javascript_value(True), 'true')
self.assertEqual(get_javascript_value(False), 'false')
self.assertEqual(get_javascript_value(10), '10')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UtilTestCase(TestCase):
def setUp(self):
super(UtilTestCase, self).setUp()
read_testdata()
def test_get_tree_queryset(self):
qs = get_tree_queryset(Country)
self.assertEqual(len(qs), 257)
self.assertEqual(qs[0].name, 'root')
qs = get_tree_queryset(Country, node_id=Country.objects.get(name=
'Europe').id)
self.assertEqual(len(qs), 50)
self.assertEqual(qs[0].name, u'Åland Islands')
qs = get_tree_queryset(Country, max_level=1)
self.assertEqual(len(qs), 8)
self.assertEqual(qs[0].name, 'root')
qs = get_tree_queryset(Country, max_level=True)
self.assertEqual(len(qs), 8)
qs = get_tree_queryset(Country, include_root=False)
self.assertEqual(len(qs), 256)
self.assertEqual(qs[0].name, 'Africa')
def test_get_javascript_value(self):
self.assertEqual(get_javascript_value(True), 'true')
self.assertEqual(get_javascript_value(False), 'false')
self.assertEqual(get_javascript_value(10), '10')
<|reserved_special_token_1|>
from django.test import TestCase
from django_mptt_admin.util import get_tree_queryset, get_javascript_value
from ..models import Country
from .utils import read_testdata
class UtilTestCase(TestCase):
def setUp(self):
super(UtilTestCase, self).setUp()
read_testdata()
def test_get_tree_queryset(self):
qs = get_tree_queryset(Country)
self.assertEqual(len(qs), 257)
self.assertEqual(qs[0].name, 'root')
qs = get_tree_queryset(Country, node_id=Country.objects.get(name=
'Europe').id)
self.assertEqual(len(qs), 50)
self.assertEqual(qs[0].name, u'Åland Islands')
qs = get_tree_queryset(Country, max_level=1)
self.assertEqual(len(qs), 8)
self.assertEqual(qs[0].name, 'root')
qs = get_tree_queryset(Country, max_level=True)
self.assertEqual(len(qs), 8)
qs = get_tree_queryset(Country, include_root=False)
self.assertEqual(len(qs), 256)
self.assertEqual(qs[0].name, 'Africa')
def test_get_javascript_value(self):
self.assertEqual(get_javascript_value(True), 'true')
self.assertEqual(get_javascript_value(False), 'false')
self.assertEqual(get_javascript_value(10), '10')
<|reserved_special_token_1|>
# coding=utf-8
from django.test import TestCase
from django_mptt_admin.util import get_tree_queryset, get_javascript_value
from ..models import Country
from .utils import read_testdata
class UtilTestCase(TestCase):
def setUp(self):
super(UtilTestCase, self).setUp()
read_testdata()
def test_get_tree_queryset(self):
# get default queryset
qs = get_tree_queryset(Country)
self.assertEqual(len(qs), 257)
self.assertEqual(qs[0].name, 'root')
# subtree
qs = get_tree_queryset(Country, node_id=Country.objects.get(name='Europe').id)
self.assertEqual(len(qs), 50)
self.assertEqual(qs[0].name, u'Åland Islands')
# max_level 1
qs = get_tree_queryset(Country, max_level=1)
self.assertEqual(len(qs), 8)
self.assertEqual(qs[0].name, 'root')
# max_level True
qs = get_tree_queryset(Country, max_level=True)
self.assertEqual(len(qs), 8)
# exclude root
qs = get_tree_queryset(Country, include_root=False)
self.assertEqual(len(qs), 256)
self.assertEqual(qs[0].name, 'Africa')
def test_get_javascript_value(self):
self.assertEqual(get_javascript_value(True), 'true')
self.assertEqual(get_javascript_value(False), 'false')
self.assertEqual(get_javascript_value(10), '10')
|
flexible
|
{
"blob_id": "ac5c4edda8a5df7abc030fd637866fa4c8fc4bfc",
"index": 1493,
"step-1": "<mask token>\n\n\nclass UtilTestCase(TestCase):\n <mask token>\n\n def test_get_tree_queryset(self):\n qs = get_tree_queryset(Country)\n self.assertEqual(len(qs), 257)\n self.assertEqual(qs[0].name, 'root')\n qs = get_tree_queryset(Country, node_id=Country.objects.get(name=\n 'Europe').id)\n self.assertEqual(len(qs), 50)\n self.assertEqual(qs[0].name, u'Åland Islands')\n qs = get_tree_queryset(Country, max_level=1)\n self.assertEqual(len(qs), 8)\n self.assertEqual(qs[0].name, 'root')\n qs = get_tree_queryset(Country, max_level=True)\n self.assertEqual(len(qs), 8)\n qs = get_tree_queryset(Country, include_root=False)\n self.assertEqual(len(qs), 256)\n self.assertEqual(qs[0].name, 'Africa')\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass UtilTestCase(TestCase):\n <mask token>\n\n def test_get_tree_queryset(self):\n qs = get_tree_queryset(Country)\n self.assertEqual(len(qs), 257)\n self.assertEqual(qs[0].name, 'root')\n qs = get_tree_queryset(Country, node_id=Country.objects.get(name=\n 'Europe').id)\n self.assertEqual(len(qs), 50)\n self.assertEqual(qs[0].name, u'Åland Islands')\n qs = get_tree_queryset(Country, max_level=1)\n self.assertEqual(len(qs), 8)\n self.assertEqual(qs[0].name, 'root')\n qs = get_tree_queryset(Country, max_level=True)\n self.assertEqual(len(qs), 8)\n qs = get_tree_queryset(Country, include_root=False)\n self.assertEqual(len(qs), 256)\n self.assertEqual(qs[0].name, 'Africa')\n\n def test_get_javascript_value(self):\n self.assertEqual(get_javascript_value(True), 'true')\n self.assertEqual(get_javascript_value(False), 'false')\n self.assertEqual(get_javascript_value(10), '10')\n",
"step-3": "<mask token>\n\n\nclass UtilTestCase(TestCase):\n\n def setUp(self):\n super(UtilTestCase, self).setUp()\n read_testdata()\n\n def test_get_tree_queryset(self):\n qs = get_tree_queryset(Country)\n self.assertEqual(len(qs), 257)\n self.assertEqual(qs[0].name, 'root')\n qs = get_tree_queryset(Country, node_id=Country.objects.get(name=\n 'Europe').id)\n self.assertEqual(len(qs), 50)\n self.assertEqual(qs[0].name, u'Åland Islands')\n qs = get_tree_queryset(Country, max_level=1)\n self.assertEqual(len(qs), 8)\n self.assertEqual(qs[0].name, 'root')\n qs = get_tree_queryset(Country, max_level=True)\n self.assertEqual(len(qs), 8)\n qs = get_tree_queryset(Country, include_root=False)\n self.assertEqual(len(qs), 256)\n self.assertEqual(qs[0].name, 'Africa')\n\n def test_get_javascript_value(self):\n self.assertEqual(get_javascript_value(True), 'true')\n self.assertEqual(get_javascript_value(False), 'false')\n self.assertEqual(get_javascript_value(10), '10')\n",
"step-4": "from django.test import TestCase\nfrom django_mptt_admin.util import get_tree_queryset, get_javascript_value\nfrom ..models import Country\nfrom .utils import read_testdata\n\n\nclass UtilTestCase(TestCase):\n\n def setUp(self):\n super(UtilTestCase, self).setUp()\n read_testdata()\n\n def test_get_tree_queryset(self):\n qs = get_tree_queryset(Country)\n self.assertEqual(len(qs), 257)\n self.assertEqual(qs[0].name, 'root')\n qs = get_tree_queryset(Country, node_id=Country.objects.get(name=\n 'Europe').id)\n self.assertEqual(len(qs), 50)\n self.assertEqual(qs[0].name, u'Åland Islands')\n qs = get_tree_queryset(Country, max_level=1)\n self.assertEqual(len(qs), 8)\n self.assertEqual(qs[0].name, 'root')\n qs = get_tree_queryset(Country, max_level=True)\n self.assertEqual(len(qs), 8)\n qs = get_tree_queryset(Country, include_root=False)\n self.assertEqual(len(qs), 256)\n self.assertEqual(qs[0].name, 'Africa')\n\n def test_get_javascript_value(self):\n self.assertEqual(get_javascript_value(True), 'true')\n self.assertEqual(get_javascript_value(False), 'false')\n self.assertEqual(get_javascript_value(10), '10')\n",
"step-5": "# coding=utf-8\nfrom django.test import TestCase\n\nfrom django_mptt_admin.util import get_tree_queryset, get_javascript_value\n\nfrom ..models import Country\n\nfrom .utils import read_testdata\n\n\nclass UtilTestCase(TestCase):\n def setUp(self):\n super(UtilTestCase, self).setUp()\n\n read_testdata()\n\n def test_get_tree_queryset(self):\n # get default queryset\n qs = get_tree_queryset(Country)\n self.assertEqual(len(qs), 257)\n self.assertEqual(qs[0].name, 'root')\n\n # subtree\n qs = get_tree_queryset(Country, node_id=Country.objects.get(name='Europe').id)\n self.assertEqual(len(qs), 50)\n self.assertEqual(qs[0].name, u'Åland Islands')\n\n # max_level 1\n qs = get_tree_queryset(Country, max_level=1)\n self.assertEqual(len(qs), 8)\n self.assertEqual(qs[0].name, 'root')\n\n # max_level True\n qs = get_tree_queryset(Country, max_level=True)\n self.assertEqual(len(qs), 8)\n\n # exclude root\n qs = get_tree_queryset(Country, include_root=False)\n self.assertEqual(len(qs), 256)\n self.assertEqual(qs[0].name, 'Africa')\n\n def test_get_javascript_value(self):\n self.assertEqual(get_javascript_value(True), 'true')\n self.assertEqual(get_javascript_value(False), 'false')\n self.assertEqual(get_javascript_value(10), '10')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def program_skeleton(dictionary: dict):
if dictionary['tasks']['environmental_vars']['run'] == True:
dictionary['tasks']['environmental_vars']['log'][
'environmental_vars_set'] = lv.set_environmental_vars(dictionary
['tasks'])
dictionary['tasks']['environmental_vars']['goog_creds'
] = creds.get_creds()
dictionary['tasks']['environmental_vars']['sheets_service'
] = sheets.get_sheet_service(dictionary['tasks'][
'environmental_vars']['goog_creds'])
dictionary['tasks']['environmental_vars']['drive_service'
] = drive.get_drive_service(dictionary['tasks'][
'environmental_vars']['goog_creds'])
dictionary['tasks']['environmental_vars']['criteria_sheet_meta'
] = sheets.confirm_sheet_ids(dictionary['tasks'][
'environmental_vars']['criteria_sheet_ids'], dictionary['tasks'
]['environmental_vars']['sheets_service'])
lv.batchify(dictionary['tasks']['environmental_vars'][
'criteria_sheet_meta'], dictionary['tasks'][
'environmental_vars']['batch_size'])
dictionary['tasks']['environmental_vars']['dnn'
] = sheets.batch_download(dictionary['tasks'][
'environmental_vars']['criteria_sheet_meta']['dnn'], dictionary
['tasks']['environmental_vars']['sheets_service'], True)
if dictionary['tasks']['scrape_web_data_rew']['run'] == True:
dictionary['tasks']['environmental_vars']['input_list'
] = sheets.batch_download(dictionary['tasks'][
'environmental_vars']['criteria_sheet_meta']['input_list'],
dictionary['tasks']['environmental_vars']['sheets_service'], True)
rew3.initial(dictionary['tasks']['environmental_vars']['input_list'
], dictionary['tasks']['environmental_vars']['sheets_service'])
if dictionary['tasks']['scrape_web_data_sheets']['run'] == True:
if dictionary['tasks']['scrape_web_data_sheets']['input_list']['run'
] == True:
dictionary['tasks']['environmental_vars']['input_list'
] = sheets.batch_download(dictionary['tasks'][
'environmental_vars']['criteria_sheet_meta']['input_list'],
dictionary['tasks']['environmental_vars']['sheets_service'],
True)
scraper.scrape(dictionary['tasks']['environmental_vars'][
'input_list'], dictionary['tasks']['environmental_vars'][
'sheets_service'], dictionary['tasks']['environmental_vars'
]['drive_service'], dictionary['tasks'][
'environmental_vars']['output_folder_id'])
if dictionary['tasks']['confirm_folder_structure']['run'] == True:
dictionary['tasks']['confirm_folder_structure']['log'][
'folder_structure_confirmed'] = cfs.confirm_folder_structure(
dictionary)
if dictionary['tasks']['scrape_web_data']['run'] == True:
dictionary['tasks']['scrape_web_data']['log']['cities'] = rw.file_list(
dictionary['tasks']['environmental_vars']['directories']['cities'])
df = dictionary['tasks']['environmental_vars']['dfs']['cities'
] = m.merge_zip_data(dictionary['tasks']['scrape_web_data'][
'log']['cities'])
df_f.filter_state_data(df, 'ct')
df['to_merge'] = dictionary['tasks']['environmental_vars'][
'directories']['to_merge']
df['directory'] = df[['to_merge', 'state_name', 'city']].apply(lambda
x: dictionary['tasks']['environmental_vars']['sep'].join(x), axis=1
)
scraper.scrape(df)
if dictionary['tasks']['merge_data']['run'] == True:
dictionary['tasks']['merge_data']['log']['files_to_merge'
] = rw.file_list_walk(dictionary['tasks']['environmental_vars']
['directories']['to_merge'])
dictionary['tasks']['environmental_vars']['dfs']['master_merge'
] = m.merge_agent_data(dictionary['tasks']['merge_data']['log']
['files_to_merge'])
rw.df_toCsv(dictionary['tasks'], dictionary['tasks'][
'environmental_vars']['file_names']['agent_data_raw'],
dictionary['tasks']['environmental_vars']['dfs']['master_merge'
], dictionary['tasks']['environmental_vars']['directories'][
'merged_data'])
rw.df_toJson(dictionary['tasks'], dictionary['tasks'][
'environmental_vars']['file_names']['agent_data_raw'],
dictionary['tasks']['environmental_vars']['dfs']['master_merge'
], dictionary['tasks']['environmental_vars']['directories'][
'merged_data'])
if dictionary['tasks']['filter_data']['run'] == True:
print('filtering_data')
dictionary['tasks']['filter_data']['log']['files_to_filter'
] = rw.file_list(dictionary['tasks']['environmental_vars'][
'directories']['merged_data'])
dictionary['tasks']['filter_data']['log']['dnn_filter'] = rw.file_list(
dictionary['tasks']['environmental_vars']['directories']['dnn'])
df = dictionary['tasks']['environmental_vars']['dfs']['dnn'
] = m.merge_csv(dictionary['tasks']['filter_data']['log'][
'dnn_filter'])
df['first_name'] = df['first_name'].str.lower()
df['last_name'] = df['last_name'].str.lower()
try:
if dictionary['tasks']['environmental_vars']['dfs'][
'merged_agent_data'].empty:
dictionary['tasks']['environmental_vars']['dfs'][
'merged_agent_data'] = m.merge_json(dictionary['tasks']
['filter_data']['log']['files_to_filter'])
else:
print('The Df already exists')
pass
except:
print('The Df no exists')
dictionary['tasks']['environmental_vars']['dfs'][
'merged_agent_data'] = m.merge_json(dictionary['tasks'][
'filter_data']['log']['files_to_filter'])
df_f.clean_realtor_data(dictionary['tasks']['environmental_vars'][
'dfs']['merged_agent_data'])
df_f.filter_realtor_data(dictionary['tasks']['environmental_vars'][
'dfs']['merged_agent_data'], df, 800000, 3)
rw.df_toCsv(dictionary['tasks'], dictionary['tasks'][
'environmental_vars']['file_names']['agent_data_mapped'],
dictionary['tasks']['environmental_vars']['dfs'][
'merged_agent_data'], dictionary['tasks']['environmental_vars']
['directories']['mapped_data'])
rw.df_toJson(dictionary['tasks'], dictionary['tasks'][
'environmental_vars']['file_names']['agent_data_mapped'],
dictionary['tasks']['environmental_vars']['dfs'][
'merged_agent_data'], dictionary['tasks']['environmental_vars']
['directories']['mapped_data'])
<|reserved_special_token_1|>
import write
import merge as m
import load_df as ldf
import load_vars as lv
import log as log
import clean_df as clean
import download as dl
import gc
import confirm_drcts as cfs
import fix_files as ff
import readwrite as rw
import df_filter as df_f
import realtor_scraper_sheets_3 as scraper
import get_creds as creds
import goog_sheets as sheets
from pprint import pprint
import google_drive as drive
import batch_download as download
import rew_scraper as rew_scraper
import rew_scraper3 as rew3
def program_skeleton(dictionary: dict):
if dictionary['tasks']['environmental_vars']['run'] == True:
dictionary['tasks']['environmental_vars']['log'][
'environmental_vars_set'] = lv.set_environmental_vars(dictionary
['tasks'])
dictionary['tasks']['environmental_vars']['goog_creds'
] = creds.get_creds()
dictionary['tasks']['environmental_vars']['sheets_service'
] = sheets.get_sheet_service(dictionary['tasks'][
'environmental_vars']['goog_creds'])
dictionary['tasks']['environmental_vars']['drive_service'
] = drive.get_drive_service(dictionary['tasks'][
'environmental_vars']['goog_creds'])
dictionary['tasks']['environmental_vars']['criteria_sheet_meta'
] = sheets.confirm_sheet_ids(dictionary['tasks'][
'environmental_vars']['criteria_sheet_ids'], dictionary['tasks'
]['environmental_vars']['sheets_service'])
lv.batchify(dictionary['tasks']['environmental_vars'][
'criteria_sheet_meta'], dictionary['tasks'][
'environmental_vars']['batch_size'])
dictionary['tasks']['environmental_vars']['dnn'
] = sheets.batch_download(dictionary['tasks'][
'environmental_vars']['criteria_sheet_meta']['dnn'], dictionary
['tasks']['environmental_vars']['sheets_service'], True)
if dictionary['tasks']['scrape_web_data_rew']['run'] == True:
dictionary['tasks']['environmental_vars']['input_list'
] = sheets.batch_download(dictionary['tasks'][
'environmental_vars']['criteria_sheet_meta']['input_list'],
dictionary['tasks']['environmental_vars']['sheets_service'], True)
rew3.initial(dictionary['tasks']['environmental_vars']['input_list'
], dictionary['tasks']['environmental_vars']['sheets_service'])
if dictionary['tasks']['scrape_web_data_sheets']['run'] == True:
if dictionary['tasks']['scrape_web_data_sheets']['input_list']['run'
] == True:
dictionary['tasks']['environmental_vars']['input_list'
] = sheets.batch_download(dictionary['tasks'][
'environmental_vars']['criteria_sheet_meta']['input_list'],
dictionary['tasks']['environmental_vars']['sheets_service'],
True)
scraper.scrape(dictionary['tasks']['environmental_vars'][
'input_list'], dictionary['tasks']['environmental_vars'][
'sheets_service'], dictionary['tasks']['environmental_vars'
]['drive_service'], dictionary['tasks'][
'environmental_vars']['output_folder_id'])
if dictionary['tasks']['confirm_folder_structure']['run'] == True:
dictionary['tasks']['confirm_folder_structure']['log'][
'folder_structure_confirmed'] = cfs.confirm_folder_structure(
dictionary)
if dictionary['tasks']['scrape_web_data']['run'] == True:
dictionary['tasks']['scrape_web_data']['log']['cities'] = rw.file_list(
dictionary['tasks']['environmental_vars']['directories']['cities'])
df = dictionary['tasks']['environmental_vars']['dfs']['cities'
] = m.merge_zip_data(dictionary['tasks']['scrape_web_data'][
'log']['cities'])
df_f.filter_state_data(df, 'ct')
df['to_merge'] = dictionary['tasks']['environmental_vars'][
'directories']['to_merge']
df['directory'] = df[['to_merge', 'state_name', 'city']].apply(lambda
x: dictionary['tasks']['environmental_vars']['sep'].join(x), axis=1
)
scraper.scrape(df)
if dictionary['tasks']['merge_data']['run'] == True:
dictionary['tasks']['merge_data']['log']['files_to_merge'
] = rw.file_list_walk(dictionary['tasks']['environmental_vars']
['directories']['to_merge'])
dictionary['tasks']['environmental_vars']['dfs']['master_merge'
] = m.merge_agent_data(dictionary['tasks']['merge_data']['log']
['files_to_merge'])
rw.df_toCsv(dictionary['tasks'], dictionary['tasks'][
'environmental_vars']['file_names']['agent_data_raw'],
dictionary['tasks']['environmental_vars']['dfs']['master_merge'
], dictionary['tasks']['environmental_vars']['directories'][
'merged_data'])
rw.df_toJson(dictionary['tasks'], dictionary['tasks'][
'environmental_vars']['file_names']['agent_data_raw'],
dictionary['tasks']['environmental_vars']['dfs']['master_merge'
], dictionary['tasks']['environmental_vars']['directories'][
'merged_data'])
if dictionary['tasks']['filter_data']['run'] == True:
print('filtering_data')
dictionary['tasks']['filter_data']['log']['files_to_filter'
] = rw.file_list(dictionary['tasks']['environmental_vars'][
'directories']['merged_data'])
dictionary['tasks']['filter_data']['log']['dnn_filter'] = rw.file_list(
dictionary['tasks']['environmental_vars']['directories']['dnn'])
df = dictionary['tasks']['environmental_vars']['dfs']['dnn'
] = m.merge_csv(dictionary['tasks']['filter_data']['log'][
'dnn_filter'])
df['first_name'] = df['first_name'].str.lower()
df['last_name'] = df['last_name'].str.lower()
try:
if dictionary['tasks']['environmental_vars']['dfs'][
'merged_agent_data'].empty:
dictionary['tasks']['environmental_vars']['dfs'][
'merged_agent_data'] = m.merge_json(dictionary['tasks']
['filter_data']['log']['files_to_filter'])
else:
print('The Df already exists')
pass
except:
print('The Df no exists')
dictionary['tasks']['environmental_vars']['dfs'][
'merged_agent_data'] = m.merge_json(dictionary['tasks'][
'filter_data']['log']['files_to_filter'])
df_f.clean_realtor_data(dictionary['tasks']['environmental_vars'][
'dfs']['merged_agent_data'])
df_f.filter_realtor_data(dictionary['tasks']['environmental_vars'][
'dfs']['merged_agent_data'], df, 800000, 3)
rw.df_toCsv(dictionary['tasks'], dictionary['tasks'][
'environmental_vars']['file_names']['agent_data_mapped'],
dictionary['tasks']['environmental_vars']['dfs'][
'merged_agent_data'], dictionary['tasks']['environmental_vars']
['directories']['mapped_data'])
rw.df_toJson(dictionary['tasks'], dictionary['tasks'][
'environmental_vars']['file_names']['agent_data_mapped'],
dictionary['tasks']['environmental_vars']['dfs'][
'merged_agent_data'], dictionary['tasks']['environmental_vars']
['directories']['mapped_data'])
<|reserved_special_token_1|>
#program_skeleton.py
#import load_json_files as bm
import write
import merge as m
import load_df as ldf
import load_vars as lv
import log as log
import clean_df as clean
import download as dl
import gc
import confirm_drcts as cfs
import fix_files as ff
import readwrite as rw
import df_filter as df_f
import realtor_scraper_sheets_3 as scraper
import get_creds as creds
import goog_sheets as sheets
from pprint import pprint
import google_drive as drive
import batch_download as download
import rew_scraper as rew_scraper
import rew_scraper3 as rew3
def program_skeleton(dictionary: dict):
## Batch Merge creates a back_up of contacts from csv in batches no greater than 500 contacts per document. Can be expanded. Keeps files from getting to large
if dictionary['tasks']['environmental_vars']['run'] == True:
dictionary['tasks']['environmental_vars']['log']['environmental_vars_set'] = lv.set_environmental_vars(dictionary['tasks'])
dictionary['tasks']['environmental_vars']['goog_creds'] = creds.get_creds()
dictionary['tasks']['environmental_vars']['sheets_service'] = sheets.get_sheet_service(dictionary['tasks']['environmental_vars']['goog_creds'])
dictionary['tasks']['environmental_vars']['drive_service'] = drive.get_drive_service(dictionary['tasks']['environmental_vars']['goog_creds'])
dictionary['tasks']['environmental_vars']['criteria_sheet_meta'] = sheets.confirm_sheet_ids(dictionary['tasks']['environmental_vars']['criteria_sheet_ids'],dictionary['tasks']['environmental_vars']['sheets_service'])
#dictionary['tasks']['environmental_vars']['output_sheet_meta'] = drive.add_spreadsheet_to_folder(dictionary['tasks']['environmental_vars']['drive_service'],dictionary['tasks']['environmental_vars']['output_folder_id'],dictionary['tasks']['environmental_vars']['date']['datetime'])
#dictionary['tasks']['environmental_vars']['dfs']['cities_search'] = goog_sheets.
#pprint(dictionary['tasks']['environmental_vars']['sheet_meta'])
lv.batchify(dictionary['tasks']['environmental_vars']['criteria_sheet_meta'],dictionary['tasks']['environmental_vars']['batch_size'])
dictionary['tasks']['environmental_vars']['dnn'] = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['dnn'],dictionary['tasks']['environmental_vars']['sheets_service'],True)
#sheets.batch_download(dictionary['tasks']['environmental_vars'])
#print(dictionary['tasks']['environmental_vars']['directories']['log_directory'])
#log.json_dump(dictionary['tasks'])
#log.csv_dump(dictionary['tasks'])
#print(dictionary)
if dictionary['tasks']['scrape_web_data_rew']['run'] == True:
#if dictionary['tasks']['scrape_web_data_sheets']['input_list']['run'] == True:
#pprint(dictionary['tasks']['environmental_vars']['criteria_sheet_meta'])
#input_df = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],True)
dictionary['tasks']['environmental_vars']['input_list'] = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],True)
#pprint(dictionary['tasks']['environmental_vars']['sheets_service'])
rew3.initial(dictionary['tasks']['environmental_vars']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'])
#rew_scraper.scrape("agents/areas/toronto-on",dictionary['tasks']['environmental_vars']['sheets_service'],2,2)
#print('true')
if dictionary['tasks']['scrape_web_data_sheets']['run'] == True:
if dictionary['tasks']['scrape_web_data_sheets']['input_list']['run'] == True:
#pprint(dictionary['tasks']['environmental_vars']['criteria_sheet_meta'])
#input_df = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],True)
dictionary['tasks']['environmental_vars']['input_list'] = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],True)
scraper.scrape(dictionary['tasks']['environmental_vars']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],dictionary['tasks']['environmental_vars']['drive_service'],dictionary['tasks']['environmental_vars']['output_folder_id'])
#print('true')
#download.batch_download(dictionary['tasks']['environmental_vars'])
if dictionary['tasks']['confirm_folder_structure']['run'] == True:
dictionary['tasks']['confirm_folder_structure']['log']['folder_structure_confirmed'] = cfs.confirm_folder_structure(dictionary)
#ff.fix_files(dictionary) # fix files if necessary. This is a fuck up on my end...
if dictionary['tasks']['scrape_web_data']['run'] == True:
dictionary['tasks']['scrape_web_data']['log']['cities'] = rw.file_list(dictionary['tasks']['environmental_vars']['directories']['cities'])
df = dictionary['tasks']['environmental_vars']['dfs']['cities'] = m.merge_zip_data(dictionary['tasks']['scrape_web_data']['log']['cities'])
df_f.filter_state_data(df,'ct')
#dictionary['tasks']['environmental_vars']['dfs']['cities']['directory'] = df. apply dictionary['tasks']['environmental_vars']['sep'].join((dictionary['tasks']['environmental_vars']['directories']['to_merge'], dictionary['tasks']['environmental_vars']['dfs']['cities'].state_name,dictionary['tasks']['environmental_vars']['dfs']['cities'].city))
df['to_merge'] = dictionary['tasks']['environmental_vars']['directories']['to_merge']
df['directory'] = df[['to_merge','state_name', 'city']].apply(lambda x: dictionary['tasks']['environmental_vars']['sep'].join(x), axis=1)
#df['period'] = df[['Year', 'quarter']].apply(lambda x: ''.join(x), axis=1)
#print(dictionary['tasks']['environmental_vars']['dfs']['cities'].directory)
scraper.scrape(df)
#dictionary['tasks']['environmental_vars']['dfs'][''] = m.merge_zip_data(dictionary['tasks']['scrape_web_data']['log']['zip_codes'])
#dictionary['tasks']['environmental_vars']['dfs']['zip_codes'] = rw.file_list(dictionary['tasks']['environmental_vars']['files']['zip_database'])
if dictionary['tasks']['merge_data']['run'] == True:
dictionary['tasks']['merge_data']['log']['files_to_merge'] = rw.file_list_walk(dictionary['tasks']['environmental_vars']['directories']['to_merge'])
dictionary['tasks']['environmental_vars']['dfs']['master_merge'] = m.merge_agent_data(dictionary['tasks']['merge_data']['log']['files_to_merge'])
#rw.df_toJson(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['master_merge'],dictionary['tasks']['environmental_vars']['dfs']['master_merge'],dictionary['tasks']['environmental_vars']['directories']['merged_data'])
rw.df_toCsv(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['agent_data_raw'],dictionary['tasks']['environmental_vars']['dfs']['master_merge'],dictionary['tasks']['environmental_vars']['directories']['merged_data'])
rw.df_toJson(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['agent_data_raw'],dictionary['tasks']['environmental_vars']['dfs']['master_merge'],dictionary['tasks']['environmental_vars']['directories']['merged_data'])
#print(dictionary['tasks']['environmental_vars']['dfs']['master_merge'])
if dictionary['tasks']['filter_data']['run'] == True:
print('filtering_data')
dictionary['tasks']['filter_data']['log']['files_to_filter'] = rw.file_list(dictionary['tasks']['environmental_vars']['directories']['merged_data'])
dictionary['tasks']['filter_data']['log']['dnn_filter'] = rw.file_list(dictionary['tasks']['environmental_vars']['directories']['dnn'])
df = dictionary['tasks']['environmental_vars']['dfs']['dnn'] = m.merge_csv(dictionary['tasks']['filter_data']['log']['dnn_filter'])
df["first_name"] = df["first_name"].str.lower()
df["last_name"] = df["last_name"].str.lower()
## checks to see if the df is already in memory. If not the pass
try:
if dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'].empty:
#if try succeeds and if is true then fill it anyways
dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'] = m.merge_json(dictionary['tasks']['filter_data']['log']['files_to_filter'])
else:
#if alrady exists move on
print('The Df already exists')
pass
#do something
except:
#if exception is raised then the df does not exist. Create it
print('The Df no exists')
dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'] = m.merge_json(dictionary['tasks']['filter_data']['log']['files_to_filter'])
df_f.clean_realtor_data(dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'])
df_f.filter_realtor_data(dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'],df,800000,3)
rw.df_toCsv(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['agent_data_mapped'],dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'],dictionary['tasks']['environmental_vars']['directories']['mapped_data'])
rw.df_toJson(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['agent_data_mapped'],dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'],dictionary['tasks']['environmental_vars']['directories']['mapped_data'])
#if dictionary['tasks']['extract_agent_data']['run'] == True:
# dictionary['tasks']['environmental_vars']['dfs']['agent_data'] = m.merge_agent_data(dictionary['tasks'])
|
flexible
|
{
"blob_id": "6a8007e44d2c4b56426cd49772cbc23df2eca49c",
"index": 6917,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef program_skeleton(dictionary: dict):\n if dictionary['tasks']['environmental_vars']['run'] == True:\n dictionary['tasks']['environmental_vars']['log'][\n 'environmental_vars_set'] = lv.set_environmental_vars(dictionary\n ['tasks'])\n dictionary['tasks']['environmental_vars']['goog_creds'\n ] = creds.get_creds()\n dictionary['tasks']['environmental_vars']['sheets_service'\n ] = sheets.get_sheet_service(dictionary['tasks'][\n 'environmental_vars']['goog_creds'])\n dictionary['tasks']['environmental_vars']['drive_service'\n ] = drive.get_drive_service(dictionary['tasks'][\n 'environmental_vars']['goog_creds'])\n dictionary['tasks']['environmental_vars']['criteria_sheet_meta'\n ] = sheets.confirm_sheet_ids(dictionary['tasks'][\n 'environmental_vars']['criteria_sheet_ids'], dictionary['tasks'\n ]['environmental_vars']['sheets_service'])\n lv.batchify(dictionary['tasks']['environmental_vars'][\n 'criteria_sheet_meta'], dictionary['tasks'][\n 'environmental_vars']['batch_size'])\n dictionary['tasks']['environmental_vars']['dnn'\n ] = sheets.batch_download(dictionary['tasks'][\n 'environmental_vars']['criteria_sheet_meta']['dnn'], dictionary\n ['tasks']['environmental_vars']['sheets_service'], True)\n if dictionary['tasks']['scrape_web_data_rew']['run'] == True:\n dictionary['tasks']['environmental_vars']['input_list'\n ] = sheets.batch_download(dictionary['tasks'][\n 'environmental_vars']['criteria_sheet_meta']['input_list'],\n dictionary['tasks']['environmental_vars']['sheets_service'], True)\n rew3.initial(dictionary['tasks']['environmental_vars']['input_list'\n ], dictionary['tasks']['environmental_vars']['sheets_service'])\n if dictionary['tasks']['scrape_web_data_sheets']['run'] == True:\n if dictionary['tasks']['scrape_web_data_sheets']['input_list']['run'\n ] == True:\n dictionary['tasks']['environmental_vars']['input_list'\n ] = sheets.batch_download(dictionary['tasks'][\n 'environmental_vars']['criteria_sheet_meta']['input_list'],\n dictionary['tasks']['environmental_vars']['sheets_service'],\n True)\n scraper.scrape(dictionary['tasks']['environmental_vars'][\n 'input_list'], dictionary['tasks']['environmental_vars'][\n 'sheets_service'], dictionary['tasks']['environmental_vars'\n ]['drive_service'], dictionary['tasks'][\n 'environmental_vars']['output_folder_id'])\n if dictionary['tasks']['confirm_folder_structure']['run'] == True:\n dictionary['tasks']['confirm_folder_structure']['log'][\n 'folder_structure_confirmed'] = cfs.confirm_folder_structure(\n dictionary)\n if dictionary['tasks']['scrape_web_data']['run'] == True:\n dictionary['tasks']['scrape_web_data']['log']['cities'] = rw.file_list(\n dictionary['tasks']['environmental_vars']['directories']['cities'])\n df = dictionary['tasks']['environmental_vars']['dfs']['cities'\n ] = m.merge_zip_data(dictionary['tasks']['scrape_web_data'][\n 'log']['cities'])\n df_f.filter_state_data(df, 'ct')\n df['to_merge'] = dictionary['tasks']['environmental_vars'][\n 'directories']['to_merge']\n df['directory'] = df[['to_merge', 'state_name', 'city']].apply(lambda\n x: dictionary['tasks']['environmental_vars']['sep'].join(x), axis=1\n )\n scraper.scrape(df)\n if dictionary['tasks']['merge_data']['run'] == True:\n dictionary['tasks']['merge_data']['log']['files_to_merge'\n ] = rw.file_list_walk(dictionary['tasks']['environmental_vars']\n ['directories']['to_merge'])\n dictionary['tasks']['environmental_vars']['dfs']['master_merge'\n ] = m.merge_agent_data(dictionary['tasks']['merge_data']['log']\n ['files_to_merge'])\n rw.df_toCsv(dictionary['tasks'], dictionary['tasks'][\n 'environmental_vars']['file_names']['agent_data_raw'],\n dictionary['tasks']['environmental_vars']['dfs']['master_merge'\n ], dictionary['tasks']['environmental_vars']['directories'][\n 'merged_data'])\n rw.df_toJson(dictionary['tasks'], dictionary['tasks'][\n 'environmental_vars']['file_names']['agent_data_raw'],\n dictionary['tasks']['environmental_vars']['dfs']['master_merge'\n ], dictionary['tasks']['environmental_vars']['directories'][\n 'merged_data'])\n if dictionary['tasks']['filter_data']['run'] == True:\n print('filtering_data')\n dictionary['tasks']['filter_data']['log']['files_to_filter'\n ] = rw.file_list(dictionary['tasks']['environmental_vars'][\n 'directories']['merged_data'])\n dictionary['tasks']['filter_data']['log']['dnn_filter'] = rw.file_list(\n dictionary['tasks']['environmental_vars']['directories']['dnn'])\n df = dictionary['tasks']['environmental_vars']['dfs']['dnn'\n ] = m.merge_csv(dictionary['tasks']['filter_data']['log'][\n 'dnn_filter'])\n df['first_name'] = df['first_name'].str.lower()\n df['last_name'] = df['last_name'].str.lower()\n try:\n if dictionary['tasks']['environmental_vars']['dfs'][\n 'merged_agent_data'].empty:\n dictionary['tasks']['environmental_vars']['dfs'][\n 'merged_agent_data'] = m.merge_json(dictionary['tasks']\n ['filter_data']['log']['files_to_filter'])\n else:\n print('The Df already exists')\n pass\n except:\n print('The Df no exists')\n dictionary['tasks']['environmental_vars']['dfs'][\n 'merged_agent_data'] = m.merge_json(dictionary['tasks'][\n 'filter_data']['log']['files_to_filter'])\n df_f.clean_realtor_data(dictionary['tasks']['environmental_vars'][\n 'dfs']['merged_agent_data'])\n df_f.filter_realtor_data(dictionary['tasks']['environmental_vars'][\n 'dfs']['merged_agent_data'], df, 800000, 3)\n rw.df_toCsv(dictionary['tasks'], dictionary['tasks'][\n 'environmental_vars']['file_names']['agent_data_mapped'],\n dictionary['tasks']['environmental_vars']['dfs'][\n 'merged_agent_data'], dictionary['tasks']['environmental_vars']\n ['directories']['mapped_data'])\n rw.df_toJson(dictionary['tasks'], dictionary['tasks'][\n 'environmental_vars']['file_names']['agent_data_mapped'],\n dictionary['tasks']['environmental_vars']['dfs'][\n 'merged_agent_data'], dictionary['tasks']['environmental_vars']\n ['directories']['mapped_data'])\n",
"step-3": "import write\nimport merge as m\nimport load_df as ldf\nimport load_vars as lv\nimport log as log\nimport clean_df as clean\nimport download as dl\nimport gc\nimport confirm_drcts as cfs\nimport fix_files as ff\nimport readwrite as rw\nimport df_filter as df_f\nimport realtor_scraper_sheets_3 as scraper\nimport get_creds as creds\nimport goog_sheets as sheets\nfrom pprint import pprint\nimport google_drive as drive\nimport batch_download as download\nimport rew_scraper as rew_scraper\nimport rew_scraper3 as rew3\n\n\ndef program_skeleton(dictionary: dict):\n if dictionary['tasks']['environmental_vars']['run'] == True:\n dictionary['tasks']['environmental_vars']['log'][\n 'environmental_vars_set'] = lv.set_environmental_vars(dictionary\n ['tasks'])\n dictionary['tasks']['environmental_vars']['goog_creds'\n ] = creds.get_creds()\n dictionary['tasks']['environmental_vars']['sheets_service'\n ] = sheets.get_sheet_service(dictionary['tasks'][\n 'environmental_vars']['goog_creds'])\n dictionary['tasks']['environmental_vars']['drive_service'\n ] = drive.get_drive_service(dictionary['tasks'][\n 'environmental_vars']['goog_creds'])\n dictionary['tasks']['environmental_vars']['criteria_sheet_meta'\n ] = sheets.confirm_sheet_ids(dictionary['tasks'][\n 'environmental_vars']['criteria_sheet_ids'], dictionary['tasks'\n ]['environmental_vars']['sheets_service'])\n lv.batchify(dictionary['tasks']['environmental_vars'][\n 'criteria_sheet_meta'], dictionary['tasks'][\n 'environmental_vars']['batch_size'])\n dictionary['tasks']['environmental_vars']['dnn'\n ] = sheets.batch_download(dictionary['tasks'][\n 'environmental_vars']['criteria_sheet_meta']['dnn'], dictionary\n ['tasks']['environmental_vars']['sheets_service'], True)\n if dictionary['tasks']['scrape_web_data_rew']['run'] == True:\n dictionary['tasks']['environmental_vars']['input_list'\n ] = sheets.batch_download(dictionary['tasks'][\n 'environmental_vars']['criteria_sheet_meta']['input_list'],\n dictionary['tasks']['environmental_vars']['sheets_service'], True)\n rew3.initial(dictionary['tasks']['environmental_vars']['input_list'\n ], dictionary['tasks']['environmental_vars']['sheets_service'])\n if dictionary['tasks']['scrape_web_data_sheets']['run'] == True:\n if dictionary['tasks']['scrape_web_data_sheets']['input_list']['run'\n ] == True:\n dictionary['tasks']['environmental_vars']['input_list'\n ] = sheets.batch_download(dictionary['tasks'][\n 'environmental_vars']['criteria_sheet_meta']['input_list'],\n dictionary['tasks']['environmental_vars']['sheets_service'],\n True)\n scraper.scrape(dictionary['tasks']['environmental_vars'][\n 'input_list'], dictionary['tasks']['environmental_vars'][\n 'sheets_service'], dictionary['tasks']['environmental_vars'\n ]['drive_service'], dictionary['tasks'][\n 'environmental_vars']['output_folder_id'])\n if dictionary['tasks']['confirm_folder_structure']['run'] == True:\n dictionary['tasks']['confirm_folder_structure']['log'][\n 'folder_structure_confirmed'] = cfs.confirm_folder_structure(\n dictionary)\n if dictionary['tasks']['scrape_web_data']['run'] == True:\n dictionary['tasks']['scrape_web_data']['log']['cities'] = rw.file_list(\n dictionary['tasks']['environmental_vars']['directories']['cities'])\n df = dictionary['tasks']['environmental_vars']['dfs']['cities'\n ] = m.merge_zip_data(dictionary['tasks']['scrape_web_data'][\n 'log']['cities'])\n df_f.filter_state_data(df, 'ct')\n df['to_merge'] = dictionary['tasks']['environmental_vars'][\n 'directories']['to_merge']\n df['directory'] = df[['to_merge', 'state_name', 'city']].apply(lambda\n x: dictionary['tasks']['environmental_vars']['sep'].join(x), axis=1\n )\n scraper.scrape(df)\n if dictionary['tasks']['merge_data']['run'] == True:\n dictionary['tasks']['merge_data']['log']['files_to_merge'\n ] = rw.file_list_walk(dictionary['tasks']['environmental_vars']\n ['directories']['to_merge'])\n dictionary['tasks']['environmental_vars']['dfs']['master_merge'\n ] = m.merge_agent_data(dictionary['tasks']['merge_data']['log']\n ['files_to_merge'])\n rw.df_toCsv(dictionary['tasks'], dictionary['tasks'][\n 'environmental_vars']['file_names']['agent_data_raw'],\n dictionary['tasks']['environmental_vars']['dfs']['master_merge'\n ], dictionary['tasks']['environmental_vars']['directories'][\n 'merged_data'])\n rw.df_toJson(dictionary['tasks'], dictionary['tasks'][\n 'environmental_vars']['file_names']['agent_data_raw'],\n dictionary['tasks']['environmental_vars']['dfs']['master_merge'\n ], dictionary['tasks']['environmental_vars']['directories'][\n 'merged_data'])\n if dictionary['tasks']['filter_data']['run'] == True:\n print('filtering_data')\n dictionary['tasks']['filter_data']['log']['files_to_filter'\n ] = rw.file_list(dictionary['tasks']['environmental_vars'][\n 'directories']['merged_data'])\n dictionary['tasks']['filter_data']['log']['dnn_filter'] = rw.file_list(\n dictionary['tasks']['environmental_vars']['directories']['dnn'])\n df = dictionary['tasks']['environmental_vars']['dfs']['dnn'\n ] = m.merge_csv(dictionary['tasks']['filter_data']['log'][\n 'dnn_filter'])\n df['first_name'] = df['first_name'].str.lower()\n df['last_name'] = df['last_name'].str.lower()\n try:\n if dictionary['tasks']['environmental_vars']['dfs'][\n 'merged_agent_data'].empty:\n dictionary['tasks']['environmental_vars']['dfs'][\n 'merged_agent_data'] = m.merge_json(dictionary['tasks']\n ['filter_data']['log']['files_to_filter'])\n else:\n print('The Df already exists')\n pass\n except:\n print('The Df no exists')\n dictionary['tasks']['environmental_vars']['dfs'][\n 'merged_agent_data'] = m.merge_json(dictionary['tasks'][\n 'filter_data']['log']['files_to_filter'])\n df_f.clean_realtor_data(dictionary['tasks']['environmental_vars'][\n 'dfs']['merged_agent_data'])\n df_f.filter_realtor_data(dictionary['tasks']['environmental_vars'][\n 'dfs']['merged_agent_data'], df, 800000, 3)\n rw.df_toCsv(dictionary['tasks'], dictionary['tasks'][\n 'environmental_vars']['file_names']['agent_data_mapped'],\n dictionary['tasks']['environmental_vars']['dfs'][\n 'merged_agent_data'], dictionary['tasks']['environmental_vars']\n ['directories']['mapped_data'])\n rw.df_toJson(dictionary['tasks'], dictionary['tasks'][\n 'environmental_vars']['file_names']['agent_data_mapped'],\n dictionary['tasks']['environmental_vars']['dfs'][\n 'merged_agent_data'], dictionary['tasks']['environmental_vars']\n ['directories']['mapped_data'])\n",
"step-4": "#program_skeleton.py\n#import load_json_files as bm\n\nimport write\nimport merge as m\nimport load_df as ldf\nimport load_vars as lv\nimport log as log\nimport clean_df as clean\nimport download as dl\nimport gc\nimport confirm_drcts as cfs\nimport fix_files as ff\nimport readwrite as rw\nimport df_filter as df_f\nimport realtor_scraper_sheets_3 as scraper\nimport get_creds as creds\nimport goog_sheets as sheets\nfrom pprint import pprint\nimport google_drive as drive\nimport batch_download as download\nimport rew_scraper as rew_scraper\nimport rew_scraper3 as rew3\n\n\ndef program_skeleton(dictionary: dict):\n\n## Batch Merge creates a back_up of contacts from csv in batches no greater than 500 contacts per document. Can be expanded. Keeps files from getting to large\n \n \n if dictionary['tasks']['environmental_vars']['run'] == True:\n dictionary['tasks']['environmental_vars']['log']['environmental_vars_set'] = lv.set_environmental_vars(dictionary['tasks'])\n dictionary['tasks']['environmental_vars']['goog_creds'] = creds.get_creds()\n dictionary['tasks']['environmental_vars']['sheets_service'] = sheets.get_sheet_service(dictionary['tasks']['environmental_vars']['goog_creds'])\n dictionary['tasks']['environmental_vars']['drive_service'] = drive.get_drive_service(dictionary['tasks']['environmental_vars']['goog_creds'])\n dictionary['tasks']['environmental_vars']['criteria_sheet_meta'] = sheets.confirm_sheet_ids(dictionary['tasks']['environmental_vars']['criteria_sheet_ids'],dictionary['tasks']['environmental_vars']['sheets_service'])\n #dictionary['tasks']['environmental_vars']['output_sheet_meta'] = drive.add_spreadsheet_to_folder(dictionary['tasks']['environmental_vars']['drive_service'],dictionary['tasks']['environmental_vars']['output_folder_id'],dictionary['tasks']['environmental_vars']['date']['datetime'])\n \n #dictionary['tasks']['environmental_vars']['dfs']['cities_search'] = goog_sheets.\n #pprint(dictionary['tasks']['environmental_vars']['sheet_meta'])\n lv.batchify(dictionary['tasks']['environmental_vars']['criteria_sheet_meta'],dictionary['tasks']['environmental_vars']['batch_size'])\n dictionary['tasks']['environmental_vars']['dnn'] = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['dnn'],dictionary['tasks']['environmental_vars']['sheets_service'],True)\n \n #sheets.batch_download(dictionary['tasks']['environmental_vars'])\n #print(dictionary['tasks']['environmental_vars']['directories']['log_directory'])\n #log.json_dump(dictionary['tasks'])\n #log.csv_dump(dictionary['tasks'])\n #print(dictionary)\n if dictionary['tasks']['scrape_web_data_rew']['run'] == True:\n #if dictionary['tasks']['scrape_web_data_sheets']['input_list']['run'] == True:\n #pprint(dictionary['tasks']['environmental_vars']['criteria_sheet_meta'])\n #input_df = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],True)\n dictionary['tasks']['environmental_vars']['input_list'] = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],True)\n #pprint(dictionary['tasks']['environmental_vars']['sheets_service'])\n rew3.initial(dictionary['tasks']['environmental_vars']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'])\n #rew_scraper.scrape(\"agents/areas/toronto-on\",dictionary['tasks']['environmental_vars']['sheets_service'],2,2)\n #print('true')\n\n if dictionary['tasks']['scrape_web_data_sheets']['run'] == True:\n if dictionary['tasks']['scrape_web_data_sheets']['input_list']['run'] == True:\n #pprint(dictionary['tasks']['environmental_vars']['criteria_sheet_meta'])\n #input_df = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],True)\n dictionary['tasks']['environmental_vars']['input_list'] = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],True)\n scraper.scrape(dictionary['tasks']['environmental_vars']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],dictionary['tasks']['environmental_vars']['drive_service'],dictionary['tasks']['environmental_vars']['output_folder_id'])\n #print('true')\n \n\n\n #download.batch_download(dictionary['tasks']['environmental_vars'])\n \n\n\n\n if dictionary['tasks']['confirm_folder_structure']['run'] == True:\n dictionary['tasks']['confirm_folder_structure']['log']['folder_structure_confirmed'] = cfs.confirm_folder_structure(dictionary)\n #ff.fix_files(dictionary) # fix files if necessary. This is a fuck up on my end...\n \n if dictionary['tasks']['scrape_web_data']['run'] == True:\n dictionary['tasks']['scrape_web_data']['log']['cities'] = rw.file_list(dictionary['tasks']['environmental_vars']['directories']['cities'])\n df = dictionary['tasks']['environmental_vars']['dfs']['cities'] = m.merge_zip_data(dictionary['tasks']['scrape_web_data']['log']['cities'])\n df_f.filter_state_data(df,'ct')\n #dictionary['tasks']['environmental_vars']['dfs']['cities']['directory'] = df. apply dictionary['tasks']['environmental_vars']['sep'].join((dictionary['tasks']['environmental_vars']['directories']['to_merge'], dictionary['tasks']['environmental_vars']['dfs']['cities'].state_name,dictionary['tasks']['environmental_vars']['dfs']['cities'].city))\n df['to_merge'] = dictionary['tasks']['environmental_vars']['directories']['to_merge']\n df['directory'] = df[['to_merge','state_name', 'city']].apply(lambda x: dictionary['tasks']['environmental_vars']['sep'].join(x), axis=1)\n #df['period'] = df[['Year', 'quarter']].apply(lambda x: ''.join(x), axis=1)\n #print(dictionary['tasks']['environmental_vars']['dfs']['cities'].directory)\n scraper.scrape(df)\n\n\n #dictionary['tasks']['environmental_vars']['dfs'][''] = m.merge_zip_data(dictionary['tasks']['scrape_web_data']['log']['zip_codes'])\n #dictionary['tasks']['environmental_vars']['dfs']['zip_codes'] = rw.file_list(dictionary['tasks']['environmental_vars']['files']['zip_database'])\n\n if dictionary['tasks']['merge_data']['run'] == True:\n dictionary['tasks']['merge_data']['log']['files_to_merge'] = rw.file_list_walk(dictionary['tasks']['environmental_vars']['directories']['to_merge'])\n dictionary['tasks']['environmental_vars']['dfs']['master_merge'] = m.merge_agent_data(dictionary['tasks']['merge_data']['log']['files_to_merge'])\n #rw.df_toJson(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['master_merge'],dictionary['tasks']['environmental_vars']['dfs']['master_merge'],dictionary['tasks']['environmental_vars']['directories']['merged_data'])\n rw.df_toCsv(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['agent_data_raw'],dictionary['tasks']['environmental_vars']['dfs']['master_merge'],dictionary['tasks']['environmental_vars']['directories']['merged_data'])\n rw.df_toJson(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['agent_data_raw'],dictionary['tasks']['environmental_vars']['dfs']['master_merge'],dictionary['tasks']['environmental_vars']['directories']['merged_data'])\n #print(dictionary['tasks']['environmental_vars']['dfs']['master_merge'])\n\n if dictionary['tasks']['filter_data']['run'] == True:\n print('filtering_data')\n dictionary['tasks']['filter_data']['log']['files_to_filter'] = rw.file_list(dictionary['tasks']['environmental_vars']['directories']['merged_data'])\n dictionary['tasks']['filter_data']['log']['dnn_filter'] = rw.file_list(dictionary['tasks']['environmental_vars']['directories']['dnn'])\n \n \n df = dictionary['tasks']['environmental_vars']['dfs']['dnn'] = m.merge_csv(dictionary['tasks']['filter_data']['log']['dnn_filter'])\n \n df[\"first_name\"] = df[\"first_name\"].str.lower()\n df[\"last_name\"] = df[\"last_name\"].str.lower()\n ## checks to see if the df is already in memory. If not the pass \n try:\n if dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'].empty:\n #if try succeeds and if is true then fill it anyways\n dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'] = m.merge_json(dictionary['tasks']['filter_data']['log']['files_to_filter'])\n \n else:\n #if alrady exists move on\n print('The Df already exists')\n pass\n #do something\n except:\n #if exception is raised then the df does not exist. Create it\n print('The Df no exists')\n dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'] = m.merge_json(dictionary['tasks']['filter_data']['log']['files_to_filter'])\n\n \n \n df_f.clean_realtor_data(dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'])\n df_f.filter_realtor_data(dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'],df,800000,3)\n\n rw.df_toCsv(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['agent_data_mapped'],dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'],dictionary['tasks']['environmental_vars']['directories']['mapped_data'])\n rw.df_toJson(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['agent_data_mapped'],dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'],dictionary['tasks']['environmental_vars']['directories']['mapped_data'])\n\n\n \n \n \n\n\n\n\n\n\n #if dictionary['tasks']['extract_agent_data']['run'] == True:\n # dictionary['tasks']['environmental_vars']['dfs']['agent_data'] = m.merge_agent_data(dictionary['tasks'])\n\n\n \n\n \n\n ",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def decode_page(page_bytes, charsets=('utf-8',)):
"""通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)"""
page_html = None
for charset in charsets:
try:
page_html = page_bytes.decode(charset)
break
except UnicodeDecodeError:
pass
return page_html
def get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):
"""获取页面的HTML代码(通过递归实现指定次数的重试操作)"""
page_html = None
try:
page_html = decode_page(urlopen(seed_url).read(), charsets)
except URLError:
if retry_times > 0:
return get_page_html(seed_url, retry_times=retry_times - 1,
charsets=charsets)
return page_html
def get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):
"""从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)"""
soup = BeautifulSoup(page_html, 'html.parser')
for h1 in soup.find_all('h1'):
return h1.get_text()
def get_link_list(page_html):
soup = BeautifulSoup(page_html, 'html.parser')
list = []
for a_link in soup.find_all('a'):
link = a_link['href']
if 'https://' in link or 'http://' in link:
list.append(link)
return list
def start_crawl(seed_url, match_pattern, *, max_depth=-1):
"""开始执行爬虫程序并对指定的数据进行持久化操作"""
conn = pymysql.connect(host='localhost', port=3306, user='root',
password='Huhaohao@123', charset='utf8')
with conn.cursor() as cursor:
cursor.execute('use crawler')
cursor.execute('CREATE TABLE IF NOT EXISTS tb_result ' + '(' +
'title TEXT NOT NULL,' + 'link TEXT NOT NULL' + ')')
try:
with conn.cursor() as cursor:
url_list = [seed_url]
visited_url_list = {seed_url: 0}
while url_list:
current_url = url_list.pop(0)
depth = visited_url_list[current_url]
if depth != max_depth:
page_html = get_page_html(current_url, charsets=(
'utf-8', 'gbk', 'gb2312'))
links_list = get_link_list(page_html)
param_list = []
for link in links_list:
if link not in visited_url_list:
visited_url_list[link] = depth + 1
page_html = get_page_html(link, charsets=(
'utf-8', 'gbk', 'gb2312'))
headings = get_matched_parts(page_html,
'<h1>(.*)<span')
if headings:
param_list.append((headings, link))
cursor.executemany(
'insert into tb_result(title, link) values(%s, %s)',
param_list)
conn.commit()
except Error:
pass
finally:
conn.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def decode_page(page_bytes, charsets=('utf-8',)):
"""通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)"""
page_html = None
for charset in charsets:
try:
page_html = page_bytes.decode(charset)
break
except UnicodeDecodeError:
pass
return page_html
def get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):
"""获取页面的HTML代码(通过递归实现指定次数的重试操作)"""
page_html = None
try:
page_html = decode_page(urlopen(seed_url).read(), charsets)
except URLError:
if retry_times > 0:
return get_page_html(seed_url, retry_times=retry_times - 1,
charsets=charsets)
return page_html
def get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):
"""从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)"""
soup = BeautifulSoup(page_html, 'html.parser')
for h1 in soup.find_all('h1'):
return h1.get_text()
def get_link_list(page_html):
soup = BeautifulSoup(page_html, 'html.parser')
list = []
for a_link in soup.find_all('a'):
link = a_link['href']
if 'https://' in link or 'http://' in link:
list.append(link)
return list
def start_crawl(seed_url, match_pattern, *, max_depth=-1):
"""开始执行爬虫程序并对指定的数据进行持久化操作"""
conn = pymysql.connect(host='localhost', port=3306, user='root',
password='Huhaohao@123', charset='utf8')
with conn.cursor() as cursor:
cursor.execute('use crawler')
cursor.execute('CREATE TABLE IF NOT EXISTS tb_result ' + '(' +
'title TEXT NOT NULL,' + 'link TEXT NOT NULL' + ')')
try:
with conn.cursor() as cursor:
url_list = [seed_url]
visited_url_list = {seed_url: 0}
while url_list:
current_url = url_list.pop(0)
depth = visited_url_list[current_url]
if depth != max_depth:
page_html = get_page_html(current_url, charsets=(
'utf-8', 'gbk', 'gb2312'))
links_list = get_link_list(page_html)
param_list = []
for link in links_list:
if link not in visited_url_list:
visited_url_list[link] = depth + 1
page_html = get_page_html(link, charsets=(
'utf-8', 'gbk', 'gb2312'))
headings = get_matched_parts(page_html,
'<h1>(.*)<span')
if headings:
param_list.append((headings, link))
cursor.executemany(
'insert into tb_result(title, link) values(%s, %s)',
param_list)
conn.commit()
except Error:
pass
finally:
conn.close()
def main():
"""主函数"""
ssl._create_default_https_context = ssl._create_unverified_context
start_crawl('http://sports.sohu.com/nba_a.shtml',
'<a[^>]*href=["\\\'](.*?)["\\\']', max_depth=2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def decode_page(page_bytes, charsets=('utf-8',)):
"""通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)"""
page_html = None
for charset in charsets:
try:
page_html = page_bytes.decode(charset)
break
except UnicodeDecodeError:
pass
return page_html
def get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):
"""获取页面的HTML代码(通过递归实现指定次数的重试操作)"""
page_html = None
try:
page_html = decode_page(urlopen(seed_url).read(), charsets)
except URLError:
if retry_times > 0:
return get_page_html(seed_url, retry_times=retry_times - 1,
charsets=charsets)
return page_html
def get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):
"""从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)"""
soup = BeautifulSoup(page_html, 'html.parser')
for h1 in soup.find_all('h1'):
return h1.get_text()
def get_link_list(page_html):
soup = BeautifulSoup(page_html, 'html.parser')
list = []
for a_link in soup.find_all('a'):
link = a_link['href']
if 'https://' in link or 'http://' in link:
list.append(link)
return list
def start_crawl(seed_url, match_pattern, *, max_depth=-1):
"""开始执行爬虫程序并对指定的数据进行持久化操作"""
conn = pymysql.connect(host='localhost', port=3306, user='root',
password='Huhaohao@123', charset='utf8')
with conn.cursor() as cursor:
cursor.execute('use crawler')
cursor.execute('CREATE TABLE IF NOT EXISTS tb_result ' + '(' +
'title TEXT NOT NULL,' + 'link TEXT NOT NULL' + ')')
try:
with conn.cursor() as cursor:
url_list = [seed_url]
visited_url_list = {seed_url: 0}
while url_list:
current_url = url_list.pop(0)
depth = visited_url_list[current_url]
if depth != max_depth:
page_html = get_page_html(current_url, charsets=(
'utf-8', 'gbk', 'gb2312'))
links_list = get_link_list(page_html)
param_list = []
for link in links_list:
if link not in visited_url_list:
visited_url_list[link] = depth + 1
page_html = get_page_html(link, charsets=(
'utf-8', 'gbk', 'gb2312'))
headings = get_matched_parts(page_html,
'<h1>(.*)<span')
if headings:
param_list.append((headings, link))
cursor.executemany(
'insert into tb_result(title, link) values(%s, %s)',
param_list)
conn.commit()
except Error:
pass
finally:
conn.close()
def main():
"""主函数"""
ssl._create_default_https_context = ssl._create_unverified_context
start_crawl('http://sports.sohu.com/nba_a.shtml',
'<a[^>]*href=["\\\'](.*?)["\\\']', max_depth=2)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from urllib.error import URLError
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import pymysql
import ssl
from pymysql import Error
def decode_page(page_bytes, charsets=('utf-8',)):
"""通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)"""
page_html = None
for charset in charsets:
try:
page_html = page_bytes.decode(charset)
break
except UnicodeDecodeError:
pass
return page_html
def get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):
"""获取页面的HTML代码(通过递归实现指定次数的重试操作)"""
page_html = None
try:
page_html = decode_page(urlopen(seed_url).read(), charsets)
except URLError:
if retry_times > 0:
return get_page_html(seed_url, retry_times=retry_times - 1,
charsets=charsets)
return page_html
def get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):
"""从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)"""
soup = BeautifulSoup(page_html, 'html.parser')
for h1 in soup.find_all('h1'):
return h1.get_text()
def get_link_list(page_html):
soup = BeautifulSoup(page_html, 'html.parser')
list = []
for a_link in soup.find_all('a'):
link = a_link['href']
if 'https://' in link or 'http://' in link:
list.append(link)
return list
def start_crawl(seed_url, match_pattern, *, max_depth=-1):
"""开始执行爬虫程序并对指定的数据进行持久化操作"""
conn = pymysql.connect(host='localhost', port=3306, user='root',
password='Huhaohao@123', charset='utf8')
with conn.cursor() as cursor:
cursor.execute('use crawler')
cursor.execute('CREATE TABLE IF NOT EXISTS tb_result ' + '(' +
'title TEXT NOT NULL,' + 'link TEXT NOT NULL' + ')')
try:
with conn.cursor() as cursor:
url_list = [seed_url]
visited_url_list = {seed_url: 0}
while url_list:
current_url = url_list.pop(0)
depth = visited_url_list[current_url]
if depth != max_depth:
page_html = get_page_html(current_url, charsets=(
'utf-8', 'gbk', 'gb2312'))
links_list = get_link_list(page_html)
param_list = []
for link in links_list:
if link not in visited_url_list:
visited_url_list[link] = depth + 1
page_html = get_page_html(link, charsets=(
'utf-8', 'gbk', 'gb2312'))
headings = get_matched_parts(page_html,
'<h1>(.*)<span')
if headings:
param_list.append((headings, link))
cursor.executemany(
'insert into tb_result(title, link) values(%s, %s)',
param_list)
conn.commit()
except Error:
pass
finally:
conn.close()
def main():
"""主函数"""
ssl._create_default_https_context = ssl._create_unverified_context
start_crawl('http://sports.sohu.com/nba_a.shtml',
'<a[^>]*href=["\\\'](.*?)["\\\']', max_depth=2)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from urllib.error import URLError
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import pymysql
import ssl
from pymysql import Error
def decode_page(page_bytes, charsets=('utf-8',)):
"""通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)"""
page_html = None
for charset in charsets:
try:
page_html = page_bytes.decode(charset)
break
except UnicodeDecodeError:
pass
# logging.error('Decode:', error)
return page_html
def get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):
"""获取页面的HTML代码(通过递归实现指定次数的重试操作)"""
page_html = None
try:
page_html = decode_page(urlopen(seed_url).read(), charsets)
except URLError:
# logging.error('URL:', error)
if retry_times > 0:
return get_page_html(seed_url, retry_times=retry_times - 1,
charsets=charsets)
return page_html
def get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):
"""从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)"""
soup = BeautifulSoup(page_html, 'html.parser')
for h1 in soup.find_all('h1'):
return h1.get_text()
def get_link_list(page_html):
soup = BeautifulSoup(page_html, 'html.parser')
list = []
for a_link in soup.find_all('a'):
link = a_link['href']
if ('https://' in link) or ('http://' in link):
list.append(link)
# print(page_html)
#print(list)
return list
def start_crawl(seed_url, match_pattern, *, max_depth=-1):
"""开始执行爬虫程序并对指定的数据进行持久化操作"""
# conn = pymysql.connect(host='localhost', port=3306,
# database='crawler', user='root',
# password='Huhaohao@123', charset='utf8')
conn = pymysql.connect(host='localhost', port=3306,
user='root', password='Huhaohao@123', charset='utf8')
with conn.cursor() as cursor:
#cursor.execute("create database crawler if not exists;")
cursor.execute('use crawler')
cursor.execute(
"CREATE TABLE IF NOT EXISTS tb_result " +
"(" +
"title TEXT NOT NULL," +
"link TEXT NOT NULL" +
")"
)
try:
with conn.cursor() as cursor:
url_list = [seed_url]
# 通过下面的字典避免重复抓取并控制抓取深度
visited_url_list = {seed_url: 0}
while url_list:
current_url = url_list.pop(0)
depth = visited_url_list[current_url]
if depth != max_depth:
# 尝试用utf-8/gbk/gb2312三种字符集进行页面解码
page_html = get_page_html(current_url, charsets=('utf-8', 'gbk', 'gb2312'))
links_list = get_link_list(page_html)
param_list = []
for link in links_list:
if link not in visited_url_list:
visited_url_list[link] = depth + 1
page_html = get_page_html(link, charsets=('utf-8', 'gbk', 'gb2312'))
headings = get_matched_parts(page_html, r'<h1>(.*)<span')
if headings:
param_list.append((headings, link))
cursor.executemany('insert into tb_result(title, link) values(%s, %s)',
param_list)
conn.commit()
except Error:
pass
# logging.error('SQL:', error)
finally:
conn.close()
def main():
"""主函数"""
ssl._create_default_https_context = ssl._create_unverified_context
start_crawl('http://sports.sohu.com/nba_a.shtml',
r'<a[^>]*href=["\'](.*?)["\']',
max_depth=2)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "53fae0103168f4074ba0645c33e4640fcefdfc96",
"index": 731,
"step-1": "<mask token>\n\n\ndef decode_page(page_bytes, charsets=('utf-8',)):\n \"\"\"通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)\"\"\"\n page_html = None\n for charset in charsets:\n try:\n page_html = page_bytes.decode(charset)\n break\n except UnicodeDecodeError:\n pass\n return page_html\n\n\ndef get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):\n \"\"\"获取页面的HTML代码(通过递归实现指定次数的重试操作)\"\"\"\n page_html = None\n try:\n page_html = decode_page(urlopen(seed_url).read(), charsets)\n except URLError:\n if retry_times > 0:\n return get_page_html(seed_url, retry_times=retry_times - 1,\n charsets=charsets)\n return page_html\n\n\ndef get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):\n \"\"\"从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)\"\"\"\n soup = BeautifulSoup(page_html, 'html.parser')\n for h1 in soup.find_all('h1'):\n return h1.get_text()\n\n\ndef get_link_list(page_html):\n soup = BeautifulSoup(page_html, 'html.parser')\n list = []\n for a_link in soup.find_all('a'):\n link = a_link['href']\n if 'https://' in link or 'http://' in link:\n list.append(link)\n return list\n\n\ndef start_crawl(seed_url, match_pattern, *, max_depth=-1):\n \"\"\"开始执行爬虫程序并对指定的数据进行持久化操作\"\"\"\n conn = pymysql.connect(host='localhost', port=3306, user='root',\n password='Huhaohao@123', charset='utf8')\n with conn.cursor() as cursor:\n cursor.execute('use crawler')\n cursor.execute('CREATE TABLE IF NOT EXISTS tb_result ' + '(' +\n 'title TEXT NOT NULL,' + 'link TEXT NOT NULL' + ')')\n try:\n with conn.cursor() as cursor:\n url_list = [seed_url]\n visited_url_list = {seed_url: 0}\n while url_list:\n current_url = url_list.pop(0)\n depth = visited_url_list[current_url]\n if depth != max_depth:\n page_html = get_page_html(current_url, charsets=(\n 'utf-8', 'gbk', 'gb2312'))\n links_list = get_link_list(page_html)\n param_list = []\n for link in links_list:\n if link not in visited_url_list:\n visited_url_list[link] = depth + 1\n page_html = get_page_html(link, charsets=(\n 'utf-8', 'gbk', 'gb2312'))\n headings = get_matched_parts(page_html,\n '<h1>(.*)<span')\n if headings:\n param_list.append((headings, link))\n cursor.executemany(\n 'insert into tb_result(title, link) values(%s, %s)',\n param_list)\n conn.commit()\n except Error:\n pass\n finally:\n conn.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef decode_page(page_bytes, charsets=('utf-8',)):\n \"\"\"通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)\"\"\"\n page_html = None\n for charset in charsets:\n try:\n page_html = page_bytes.decode(charset)\n break\n except UnicodeDecodeError:\n pass\n return page_html\n\n\ndef get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):\n \"\"\"获取页面的HTML代码(通过递归实现指定次数的重试操作)\"\"\"\n page_html = None\n try:\n page_html = decode_page(urlopen(seed_url).read(), charsets)\n except URLError:\n if retry_times > 0:\n return get_page_html(seed_url, retry_times=retry_times - 1,\n charsets=charsets)\n return page_html\n\n\ndef get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):\n \"\"\"从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)\"\"\"\n soup = BeautifulSoup(page_html, 'html.parser')\n for h1 in soup.find_all('h1'):\n return h1.get_text()\n\n\ndef get_link_list(page_html):\n soup = BeautifulSoup(page_html, 'html.parser')\n list = []\n for a_link in soup.find_all('a'):\n link = a_link['href']\n if 'https://' in link or 'http://' in link:\n list.append(link)\n return list\n\n\ndef start_crawl(seed_url, match_pattern, *, max_depth=-1):\n \"\"\"开始执行爬虫程序并对指定的数据进行持久化操作\"\"\"\n conn = pymysql.connect(host='localhost', port=3306, user='root',\n password='Huhaohao@123', charset='utf8')\n with conn.cursor() as cursor:\n cursor.execute('use crawler')\n cursor.execute('CREATE TABLE IF NOT EXISTS tb_result ' + '(' +\n 'title TEXT NOT NULL,' + 'link TEXT NOT NULL' + ')')\n try:\n with conn.cursor() as cursor:\n url_list = [seed_url]\n visited_url_list = {seed_url: 0}\n while url_list:\n current_url = url_list.pop(0)\n depth = visited_url_list[current_url]\n if depth != max_depth:\n page_html = get_page_html(current_url, charsets=(\n 'utf-8', 'gbk', 'gb2312'))\n links_list = get_link_list(page_html)\n param_list = []\n for link in links_list:\n if link not in visited_url_list:\n visited_url_list[link] = depth + 1\n page_html = get_page_html(link, charsets=(\n 'utf-8', 'gbk', 'gb2312'))\n headings = get_matched_parts(page_html,\n '<h1>(.*)<span')\n if headings:\n param_list.append((headings, link))\n cursor.executemany(\n 'insert into tb_result(title, link) values(%s, %s)',\n param_list)\n conn.commit()\n except Error:\n pass\n finally:\n conn.close()\n\n\ndef main():\n \"\"\"主函数\"\"\"\n ssl._create_default_https_context = ssl._create_unverified_context\n start_crawl('http://sports.sohu.com/nba_a.shtml',\n '<a[^>]*href=[\"\\\\\\'](.*?)[\"\\\\\\']', max_depth=2)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef decode_page(page_bytes, charsets=('utf-8',)):\n \"\"\"通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)\"\"\"\n page_html = None\n for charset in charsets:\n try:\n page_html = page_bytes.decode(charset)\n break\n except UnicodeDecodeError:\n pass\n return page_html\n\n\ndef get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):\n \"\"\"获取页面的HTML代码(通过递归实现指定次数的重试操作)\"\"\"\n page_html = None\n try:\n page_html = decode_page(urlopen(seed_url).read(), charsets)\n except URLError:\n if retry_times > 0:\n return get_page_html(seed_url, retry_times=retry_times - 1,\n charsets=charsets)\n return page_html\n\n\ndef get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):\n \"\"\"从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)\"\"\"\n soup = BeautifulSoup(page_html, 'html.parser')\n for h1 in soup.find_all('h1'):\n return h1.get_text()\n\n\ndef get_link_list(page_html):\n soup = BeautifulSoup(page_html, 'html.parser')\n list = []\n for a_link in soup.find_all('a'):\n link = a_link['href']\n if 'https://' in link or 'http://' in link:\n list.append(link)\n return list\n\n\ndef start_crawl(seed_url, match_pattern, *, max_depth=-1):\n \"\"\"开始执行爬虫程序并对指定的数据进行持久化操作\"\"\"\n conn = pymysql.connect(host='localhost', port=3306, user='root',\n password='Huhaohao@123', charset='utf8')\n with conn.cursor() as cursor:\n cursor.execute('use crawler')\n cursor.execute('CREATE TABLE IF NOT EXISTS tb_result ' + '(' +\n 'title TEXT NOT NULL,' + 'link TEXT NOT NULL' + ')')\n try:\n with conn.cursor() as cursor:\n url_list = [seed_url]\n visited_url_list = {seed_url: 0}\n while url_list:\n current_url = url_list.pop(0)\n depth = visited_url_list[current_url]\n if depth != max_depth:\n page_html = get_page_html(current_url, charsets=(\n 'utf-8', 'gbk', 'gb2312'))\n links_list = get_link_list(page_html)\n param_list = []\n for link in links_list:\n if link not in visited_url_list:\n visited_url_list[link] = depth + 1\n page_html = get_page_html(link, charsets=(\n 'utf-8', 'gbk', 'gb2312'))\n headings = get_matched_parts(page_html,\n '<h1>(.*)<span')\n if headings:\n param_list.append((headings, link))\n cursor.executemany(\n 'insert into tb_result(title, link) values(%s, %s)',\n param_list)\n conn.commit()\n except Error:\n pass\n finally:\n conn.close()\n\n\ndef main():\n \"\"\"主函数\"\"\"\n ssl._create_default_https_context = ssl._create_unverified_context\n start_crawl('http://sports.sohu.com/nba_a.shtml',\n '<a[^>]*href=[\"\\\\\\'](.*?)[\"\\\\\\']', max_depth=2)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from urllib.error import URLError\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\nimport pymysql\nimport ssl\nfrom pymysql import Error\n\n\ndef decode_page(page_bytes, charsets=('utf-8',)):\n \"\"\"通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)\"\"\"\n page_html = None\n for charset in charsets:\n try:\n page_html = page_bytes.decode(charset)\n break\n except UnicodeDecodeError:\n pass\n return page_html\n\n\ndef get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):\n \"\"\"获取页面的HTML代码(通过递归实现指定次数的重试操作)\"\"\"\n page_html = None\n try:\n page_html = decode_page(urlopen(seed_url).read(), charsets)\n except URLError:\n if retry_times > 0:\n return get_page_html(seed_url, retry_times=retry_times - 1,\n charsets=charsets)\n return page_html\n\n\ndef get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):\n \"\"\"从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)\"\"\"\n soup = BeautifulSoup(page_html, 'html.parser')\n for h1 in soup.find_all('h1'):\n return h1.get_text()\n\n\ndef get_link_list(page_html):\n soup = BeautifulSoup(page_html, 'html.parser')\n list = []\n for a_link in soup.find_all('a'):\n link = a_link['href']\n if 'https://' in link or 'http://' in link:\n list.append(link)\n return list\n\n\ndef start_crawl(seed_url, match_pattern, *, max_depth=-1):\n \"\"\"开始执行爬虫程序并对指定的数据进行持久化操作\"\"\"\n conn = pymysql.connect(host='localhost', port=3306, user='root',\n password='Huhaohao@123', charset='utf8')\n with conn.cursor() as cursor:\n cursor.execute('use crawler')\n cursor.execute('CREATE TABLE IF NOT EXISTS tb_result ' + '(' +\n 'title TEXT NOT NULL,' + 'link TEXT NOT NULL' + ')')\n try:\n with conn.cursor() as cursor:\n url_list = [seed_url]\n visited_url_list = {seed_url: 0}\n while url_list:\n current_url = url_list.pop(0)\n depth = visited_url_list[current_url]\n if depth != max_depth:\n page_html = get_page_html(current_url, charsets=(\n 'utf-8', 'gbk', 'gb2312'))\n links_list = get_link_list(page_html)\n param_list = []\n for link in links_list:\n if link not in visited_url_list:\n visited_url_list[link] = depth + 1\n page_html = get_page_html(link, charsets=(\n 'utf-8', 'gbk', 'gb2312'))\n headings = get_matched_parts(page_html,\n '<h1>(.*)<span')\n if headings:\n param_list.append((headings, link))\n cursor.executemany(\n 'insert into tb_result(title, link) values(%s, %s)',\n param_list)\n conn.commit()\n except Error:\n pass\n finally:\n conn.close()\n\n\ndef main():\n \"\"\"主函数\"\"\"\n ssl._create_default_https_context = ssl._create_unverified_context\n start_crawl('http://sports.sohu.com/nba_a.shtml',\n '<a[^>]*href=[\"\\\\\\'](.*?)[\"\\\\\\']', max_depth=2)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from urllib.error import URLError\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\nimport pymysql\nimport ssl\nfrom pymysql import Error\n\ndef decode_page(page_bytes, charsets=('utf-8',)):\n \"\"\"通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)\"\"\"\n page_html = None\n for charset in charsets:\n try:\n page_html = page_bytes.decode(charset)\n break\n except UnicodeDecodeError:\n pass\n # logging.error('Decode:', error)\n return page_html\n\n\ndef get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):\n \"\"\"获取页面的HTML代码(通过递归实现指定次数的重试操作)\"\"\"\n page_html = None\n try:\n page_html = decode_page(urlopen(seed_url).read(), charsets)\n except URLError:\n # logging.error('URL:', error)\n if retry_times > 0:\n return get_page_html(seed_url, retry_times=retry_times - 1,\n charsets=charsets)\n return page_html\n\n\ndef get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):\n \"\"\"从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)\"\"\"\n soup = BeautifulSoup(page_html, 'html.parser')\n for h1 in soup.find_all('h1'):\n return h1.get_text()\n\n\ndef get_link_list(page_html):\n soup = BeautifulSoup(page_html, 'html.parser')\n list = []\n for a_link in soup.find_all('a'):\n link = a_link['href']\n if ('https://' in link) or ('http://' in link):\n list.append(link)\n # print(page_html)\n #print(list)\n return list\n\n\ndef start_crawl(seed_url, match_pattern, *, max_depth=-1):\n \"\"\"开始执行爬虫程序并对指定的数据进行持久化操作\"\"\"\n # conn = pymysql.connect(host='localhost', port=3306,\n # database='crawler', user='root',\n # password='Huhaohao@123', charset='utf8')\n\n conn = pymysql.connect(host='localhost', port=3306,\n user='root', password='Huhaohao@123', charset='utf8')\n\n with conn.cursor() as cursor:\n #cursor.execute(\"create database crawler if not exists;\")\n cursor.execute('use crawler')\n cursor.execute(\n \"CREATE TABLE IF NOT EXISTS tb_result \" +\n \"(\" +\n \"title TEXT NOT NULL,\" +\n \"link TEXT NOT NULL\" +\n \")\"\n )\n\n\n\n\n try:\n with conn.cursor() as cursor:\n url_list = [seed_url]\n # 通过下面的字典避免重复抓取并控制抓取深度\n visited_url_list = {seed_url: 0}\n while url_list:\n current_url = url_list.pop(0)\n depth = visited_url_list[current_url]\n if depth != max_depth:\n # 尝试用utf-8/gbk/gb2312三种字符集进行页面解码\n page_html = get_page_html(current_url, charsets=('utf-8', 'gbk', 'gb2312'))\n links_list = get_link_list(page_html)\n param_list = []\n for link in links_list:\n if link not in visited_url_list:\n visited_url_list[link] = depth + 1\n page_html = get_page_html(link, charsets=('utf-8', 'gbk', 'gb2312'))\n headings = get_matched_parts(page_html, r'<h1>(.*)<span')\n if headings:\n param_list.append((headings, link))\n cursor.executemany('insert into tb_result(title, link) values(%s, %s)',\n param_list)\n conn.commit()\n except Error:\n pass\n # logging.error('SQL:', error)\n finally:\n conn.close()\n\ndef main():\n \"\"\"主函数\"\"\"\n ssl._create_default_https_context = ssl._create_unverified_context\n start_crawl('http://sports.sohu.com/nba_a.shtml',\n r'<a[^>]*href=[\"\\'](.*?)[\"\\']',\n max_depth=2)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import socket
from threading import Thread
from ast import literal_eval
clients = {}
addresses = {}
host = '127.0.0.1'
port = 5678
active = []
addr = (host, port)
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(addr)
groups = []
def broadcast(msg, prefix=""): # prefix is for name identification.
"""Broadcasts a message to all the clients."""
for sock in clients:
sock.send(bytes(prefix, "utf8")+msg)
def broadcast_file(msg):
for sock in clients:
sock.send(msg)
def private_message(address,message):
message = '<private>' + message
receiver = list(filter(lambda x: address in str(x), clients))[0]
receiver.send(bytes(message,'utf-8'))
def accept_incoming_connections():
"""Sets up handling for incoming clients."""
while True:
client, client_address = server.accept()
print(str(client_address[0]) + ":" + str(client_address[1]) + " has connected.")
addresses[client] = client_address
Thread(target=handle_client, args=(client,)).start()
def handle_client(client): # Takes client socket as argument.
"""Handles a single client connection."""
name = client.recv(2048).decode("utf8")
welcome = 'Welcome %s! Enter {quit} to exit.' % name
try:
client.send(bytes(welcome, "utf8"))
msg = "%s: has joined the chat!" % name
broadcast(bytes(msg, "utf8"))
clients[client] = name
temp_client = {'Address':addresses[client],'Name':clients[client]}
active.append(temp_client)
broadcast(bytes(str(active),'utf-8'))
while True:
msg = client.recv(2048)
try:
if '(' in msg.decode('utf-8') and ')' in msg.decode('utf-8'):
temp = msg.decode('utf-8').split(')')
address = temp[0] + ')'
private_message(address,temp[1])
elif msg != bytes("{quit}", "utf8"):
broadcast(msg, "<global>" + name + ": ")
print(client)
else:
#client.send(bytes("{quit}", "utf8"))
client.close()
active.remove({'Address':addresses[client],'Name':clients[client]})
del clients[client]
broadcast(bytes("%s has left the chat." % name, "utf8"))
broadcast(bytes(str(active),'utf-8'))
break
except:
print(msg)
broadcast_file(msg)
except Exception as e:
print(e)
if __name__ == "__main__":
server.listen(5) # Listens for 5 connections at max.
print("Waiting for connection...")
accept_clients_thread = Thread(target=accept_incoming_connections)
accept_clients_thread.start() # Starts the infinite loop.
accept_clients_thread.join()
server.close()
|
normal
|
{
"blob_id": "9f02313b6f91f83e3a8b4af8d9447b1d8f3558f6",
"index": 4430,
"step-1": "<mask token>\n\n\ndef broadcast(msg, prefix=''):\n \"\"\"Broadcasts a message to all the clients.\"\"\"\n for sock in clients:\n sock.send(bytes(prefix, 'utf8') + msg)\n\n\ndef broadcast_file(msg):\n for sock in clients:\n sock.send(msg)\n\n\ndef private_message(address, message):\n message = '<private>' + message\n receiver = list(filter(lambda x: address in str(x), clients))[0]\n receiver.send(bytes(message, 'utf-8'))\n\n\ndef accept_incoming_connections():\n \"\"\"Sets up handling for incoming clients.\"\"\"\n while True:\n client, client_address = server.accept()\n print(str(client_address[0]) + ':' + str(client_address[1]) +\n ' has connected.')\n addresses[client] = client_address\n Thread(target=handle_client, args=(client,)).start()\n\n\ndef handle_client(client):\n \"\"\"Handles a single client connection.\"\"\"\n name = client.recv(2048).decode('utf8')\n welcome = 'Welcome %s! Enter {quit} to exit.' % name\n try:\n client.send(bytes(welcome, 'utf8'))\n msg = '%s: has joined the chat!' % name\n broadcast(bytes(msg, 'utf8'))\n clients[client] = name\n temp_client = {'Address': addresses[client], 'Name': clients[client]}\n active.append(temp_client)\n broadcast(bytes(str(active), 'utf-8'))\n while True:\n msg = client.recv(2048)\n try:\n if '(' in msg.decode('utf-8') and ')' in msg.decode('utf-8'):\n temp = msg.decode('utf-8').split(')')\n address = temp[0] + ')'\n private_message(address, temp[1])\n elif msg != bytes('{quit}', 'utf8'):\n broadcast(msg, '<global>' + name + ': ')\n print(client)\n else:\n client.close()\n active.remove({'Address': addresses[client], 'Name':\n clients[client]})\n del clients[client]\n broadcast(bytes('%s has left the chat.' % name, 'utf8'))\n broadcast(bytes(str(active), 'utf-8'))\n break\n except:\n print(msg)\n broadcast_file(msg)\n except Exception as e:\n print(e)\n\n\n<mask token>\n",
"step-2": "<mask token>\nserver.bind(addr)\n<mask token>\n\n\ndef broadcast(msg, prefix=''):\n \"\"\"Broadcasts a message to all the clients.\"\"\"\n for sock in clients:\n sock.send(bytes(prefix, 'utf8') + msg)\n\n\ndef broadcast_file(msg):\n for sock in clients:\n sock.send(msg)\n\n\ndef private_message(address, message):\n message = '<private>' + message\n receiver = list(filter(lambda x: address in str(x), clients))[0]\n receiver.send(bytes(message, 'utf-8'))\n\n\ndef accept_incoming_connections():\n \"\"\"Sets up handling for incoming clients.\"\"\"\n while True:\n client, client_address = server.accept()\n print(str(client_address[0]) + ':' + str(client_address[1]) +\n ' has connected.')\n addresses[client] = client_address\n Thread(target=handle_client, args=(client,)).start()\n\n\ndef handle_client(client):\n \"\"\"Handles a single client connection.\"\"\"\n name = client.recv(2048).decode('utf8')\n welcome = 'Welcome %s! Enter {quit} to exit.' % name\n try:\n client.send(bytes(welcome, 'utf8'))\n msg = '%s: has joined the chat!' % name\n broadcast(bytes(msg, 'utf8'))\n clients[client] = name\n temp_client = {'Address': addresses[client], 'Name': clients[client]}\n active.append(temp_client)\n broadcast(bytes(str(active), 'utf-8'))\n while True:\n msg = client.recv(2048)\n try:\n if '(' in msg.decode('utf-8') and ')' in msg.decode('utf-8'):\n temp = msg.decode('utf-8').split(')')\n address = temp[0] + ')'\n private_message(address, temp[1])\n elif msg != bytes('{quit}', 'utf8'):\n broadcast(msg, '<global>' + name + ': ')\n print(client)\n else:\n client.close()\n active.remove({'Address': addresses[client], 'Name':\n clients[client]})\n del clients[client]\n broadcast(bytes('%s has left the chat.' % name, 'utf8'))\n broadcast(bytes(str(active), 'utf-8'))\n break\n except:\n print(msg)\n broadcast_file(msg)\n except Exception as e:\n print(e)\n\n\nif __name__ == '__main__':\n server.listen(5)\n print('Waiting for connection...')\n accept_clients_thread = Thread(target=accept_incoming_connections)\n accept_clients_thread.start()\n accept_clients_thread.join()\n server.close()\n",
"step-3": "<mask token>\nclients = {}\naddresses = {}\nhost = '127.0.0.1'\nport = 5678\nactive = []\naddr = host, port\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind(addr)\ngroups = []\n\n\ndef broadcast(msg, prefix=''):\n \"\"\"Broadcasts a message to all the clients.\"\"\"\n for sock in clients:\n sock.send(bytes(prefix, 'utf8') + msg)\n\n\ndef broadcast_file(msg):\n for sock in clients:\n sock.send(msg)\n\n\ndef private_message(address, message):\n message = '<private>' + message\n receiver = list(filter(lambda x: address in str(x), clients))[0]\n receiver.send(bytes(message, 'utf-8'))\n\n\ndef accept_incoming_connections():\n \"\"\"Sets up handling for incoming clients.\"\"\"\n while True:\n client, client_address = server.accept()\n print(str(client_address[0]) + ':' + str(client_address[1]) +\n ' has connected.')\n addresses[client] = client_address\n Thread(target=handle_client, args=(client,)).start()\n\n\ndef handle_client(client):\n \"\"\"Handles a single client connection.\"\"\"\n name = client.recv(2048).decode('utf8')\n welcome = 'Welcome %s! Enter {quit} to exit.' % name\n try:\n client.send(bytes(welcome, 'utf8'))\n msg = '%s: has joined the chat!' % name\n broadcast(bytes(msg, 'utf8'))\n clients[client] = name\n temp_client = {'Address': addresses[client], 'Name': clients[client]}\n active.append(temp_client)\n broadcast(bytes(str(active), 'utf-8'))\n while True:\n msg = client.recv(2048)\n try:\n if '(' in msg.decode('utf-8') and ')' in msg.decode('utf-8'):\n temp = msg.decode('utf-8').split(')')\n address = temp[0] + ')'\n private_message(address, temp[1])\n elif msg != bytes('{quit}', 'utf8'):\n broadcast(msg, '<global>' + name + ': ')\n print(client)\n else:\n client.close()\n active.remove({'Address': addresses[client], 'Name':\n clients[client]})\n del clients[client]\n broadcast(bytes('%s has left the chat.' % name, 'utf8'))\n broadcast(bytes(str(active), 'utf-8'))\n break\n except:\n print(msg)\n broadcast_file(msg)\n except Exception as e:\n print(e)\n\n\nif __name__ == '__main__':\n server.listen(5)\n print('Waiting for connection...')\n accept_clients_thread = Thread(target=accept_incoming_connections)\n accept_clients_thread.start()\n accept_clients_thread.join()\n server.close()\n",
"step-4": "import socket\nfrom threading import Thread\nfrom ast import literal_eval\nclients = {}\naddresses = {}\nhost = '127.0.0.1'\nport = 5678\nactive = []\naddr = host, port\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind(addr)\ngroups = []\n\n\ndef broadcast(msg, prefix=''):\n \"\"\"Broadcasts a message to all the clients.\"\"\"\n for sock in clients:\n sock.send(bytes(prefix, 'utf8') + msg)\n\n\ndef broadcast_file(msg):\n for sock in clients:\n sock.send(msg)\n\n\ndef private_message(address, message):\n message = '<private>' + message\n receiver = list(filter(lambda x: address in str(x), clients))[0]\n receiver.send(bytes(message, 'utf-8'))\n\n\ndef accept_incoming_connections():\n \"\"\"Sets up handling for incoming clients.\"\"\"\n while True:\n client, client_address = server.accept()\n print(str(client_address[0]) + ':' + str(client_address[1]) +\n ' has connected.')\n addresses[client] = client_address\n Thread(target=handle_client, args=(client,)).start()\n\n\ndef handle_client(client):\n \"\"\"Handles a single client connection.\"\"\"\n name = client.recv(2048).decode('utf8')\n welcome = 'Welcome %s! Enter {quit} to exit.' % name\n try:\n client.send(bytes(welcome, 'utf8'))\n msg = '%s: has joined the chat!' % name\n broadcast(bytes(msg, 'utf8'))\n clients[client] = name\n temp_client = {'Address': addresses[client], 'Name': clients[client]}\n active.append(temp_client)\n broadcast(bytes(str(active), 'utf-8'))\n while True:\n msg = client.recv(2048)\n try:\n if '(' in msg.decode('utf-8') and ')' in msg.decode('utf-8'):\n temp = msg.decode('utf-8').split(')')\n address = temp[0] + ')'\n private_message(address, temp[1])\n elif msg != bytes('{quit}', 'utf8'):\n broadcast(msg, '<global>' + name + ': ')\n print(client)\n else:\n client.close()\n active.remove({'Address': addresses[client], 'Name':\n clients[client]})\n del clients[client]\n broadcast(bytes('%s has left the chat.' % name, 'utf8'))\n broadcast(bytes(str(active), 'utf-8'))\n break\n except:\n print(msg)\n broadcast_file(msg)\n except Exception as e:\n print(e)\n\n\nif __name__ == '__main__':\n server.listen(5)\n print('Waiting for connection...')\n accept_clients_thread = Thread(target=accept_incoming_connections)\n accept_clients_thread.start()\n accept_clients_thread.join()\n server.close()\n",
"step-5": "import socket\nfrom threading import Thread\nfrom ast import literal_eval\n\nclients = {}\naddresses = {}\nhost = '127.0.0.1'\nport = 5678\nactive = []\naddr = (host, port)\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind(addr)\ngroups = []\n\ndef broadcast(msg, prefix=\"\"): # prefix is for name identification.\n\t\"\"\"Broadcasts a message to all the clients.\"\"\"\n\tfor sock in clients:\n\t\tsock.send(bytes(prefix, \"utf8\")+msg)\n\ndef broadcast_file(msg):\n\tfor sock in clients:\n\t\tsock.send(msg)\n\ndef private_message(address,message):\n\tmessage = '<private>' + message\n\treceiver = list(filter(lambda x: address in str(x), clients))[0]\n\treceiver.send(bytes(message,'utf-8'))\n\ndef accept_incoming_connections():\n\t\"\"\"Sets up handling for incoming clients.\"\"\"\n\twhile True:\n\t\tclient, client_address = server.accept()\n\t\tprint(str(client_address[0]) + \":\" + str(client_address[1]) + \" has connected.\")\n\t\taddresses[client] = client_address\n\t\tThread(target=handle_client, args=(client,)).start()\n\ndef handle_client(client): # Takes client socket as argument.\n\t\"\"\"Handles a single client connection.\"\"\"\n\tname = client.recv(2048).decode(\"utf8\")\n\twelcome = 'Welcome %s! Enter {quit} to exit.' % name\n\ttry:\n\t\tclient.send(bytes(welcome, \"utf8\"))\n\t\tmsg = \"%s: has joined the chat!\" % name\n\t\tbroadcast(bytes(msg, \"utf8\"))\n\t\tclients[client] = name\n\t\ttemp_client = {'Address':addresses[client],'Name':clients[client]}\n\t\tactive.append(temp_client)\n\t\tbroadcast(bytes(str(active),'utf-8'))\n\t\twhile True:\n\t\t\tmsg = client.recv(2048)\n\t\t\ttry:\n\t\t\t\tif '(' in msg.decode('utf-8') and ')' in msg.decode('utf-8'):\n\t\t\t\t\ttemp = msg.decode('utf-8').split(')')\n\t\t\t\t\taddress = temp[0] + ')'\n\t\t\t\t\tprivate_message(address,temp[1])\n\t\t\t\telif msg != bytes(\"{quit}\", \"utf8\"):\n\t\t\t\t\tbroadcast(msg, \"<global>\" + name + \": \")\n\t\t\t\t\tprint(client)\n\t\t\t\telse:\n\t\t\t\t\t#client.send(bytes(\"{quit}\", \"utf8\"))\n\t\t\t\t\tclient.close()\n\t\t\t\t\tactive.remove({'Address':addresses[client],'Name':clients[client]})\n\t\t\t\t\tdel clients[client]\n\t\t\t\t\tbroadcast(bytes(\"%s has left the chat.\" % name, \"utf8\"))\n\t\t\t\t\tbroadcast(bytes(str(active),'utf-8'))\n\t\t\t\t\tbreak\n\t\t\texcept:\n\t\t\t\tprint(msg)\n\t\t\t\tbroadcast_file(msg)\n\texcept Exception as e:\n\t\tprint(e)\n\nif __name__ == \"__main__\":\n\tserver.listen(5) # Listens for 5 connections at max.\n\tprint(\"Waiting for connection...\")\n\taccept_clients_thread = Thread(target=accept_incoming_connections)\n\taccept_clients_thread.start() # Starts the infinite loop.\n\taccept_clients_thread.join()\n\tserver.close()\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import os
bind = '0.0.0.0:8000'
workers = os.environ['GET_KEYS_ACCOUNTS_WORKERS']
|
normal
|
{
"blob_id": "d84a7e16471c604283c81412653e037ecdb19102",
"index": 3530,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nbind = '0.0.0.0:8000'\nworkers = os.environ['GET_KEYS_ACCOUNTS_WORKERS']\n",
"step-3": "import os\nbind = '0.0.0.0:8000'\nworkers = os.environ['GET_KEYS_ACCOUNTS_WORKERS']\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
"""
Django settings for geobombay project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
try:
SECRET_KEY
except NameError:
SECRET_FILE = os.path.join(BASE_DIR, 'secret.txt')
try:
SECRET_KEY = open(SECRET_FILE).read().strip()
except IOError:
try:
from random import choice
SECRET_KEY = ''.join([choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])
secret = file(SECRET_FILE, 'w')
secret.write(SECRET_KEY)
secret.close()
except IOError:
Exception('Please create a %s file with random characters to generate your secret key!' % SECRET_FILE)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
#'suit', #Django Suit, skin for the admin
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'leaflet',
'cts',
'wards',
'bmc',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'geobombay.urls'
WSGI_APPLICATION = 'geobombay.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'geobombay'
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'assets', 'collected-static')
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'assets', 'static'),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = TCP + (
'django.core.context_processors.request',
)
#Global map / leaflet settings (for django-leaflet plugin we use for admin)
LEAFLET_CONFIG = {
'DEFAULT_CENTER': (19, 72.85521,),
'DEFAULT_ZOOM': 11,
}
try:
from local_settings import *
except:
pass
|
normal
|
{
"blob_id": "32ca107fde4c98b61d85f6648f30c7601b31c7f3",
"index": 3182,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n SECRET_KEY\nexcept NameError:\n SECRET_FILE = os.path.join(BASE_DIR, 'secret.txt')\n try:\n SECRET_KEY = open(SECRET_FILE).read().strip()\n except IOError:\n try:\n from random import choice\n SECRET_KEY = ''.join([choice(\n 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in\n range(50)])\n secret = file(SECRET_FILE, 'w')\n secret.write(SECRET_KEY)\n secret.close()\n except IOError:\n Exception(\n 'Please create a %s file with random characters to generate your secret key!'\n % SECRET_FILE)\n<mask token>\ntry:\n from local_settings import *\nexcept:\n pass\n",
"step-3": "<mask token>\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\nDATA_DIR = os.path.join(BASE_DIR, 'data')\ntry:\n SECRET_KEY\nexcept NameError:\n SECRET_FILE = os.path.join(BASE_DIR, 'secret.txt')\n try:\n SECRET_KEY = open(SECRET_FILE).read().strip()\n except IOError:\n try:\n from random import choice\n SECRET_KEY = ''.join([choice(\n 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in\n range(50)])\n secret = file(SECRET_FILE, 'w')\n secret.write(SECRET_KEY)\n secret.close()\n except IOError:\n Exception(\n 'Please create a %s file with random characters to generate your secret key!'\n % SECRET_FILE)\nDEBUG = True\nTEMPLATE_DEBUG = True\nALLOWED_HOSTS = []\nINSTALLED_APPS = ('django.contrib.admin', 'django.contrib.auth',\n 'django.contrib.contenttypes', 'django.contrib.sessions',\n 'django.contrib.messages', 'django.contrib.staticfiles',\n 'django.contrib.gis', 'leaflet', 'cts', 'wards', 'bmc')\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware')\nROOT_URLCONF = 'geobombay.urls'\nWSGI_APPLICATION = 'geobombay.wsgi.application'\nDATABASES = {'default': {'ENGINE': 'django.contrib.gis.db.backends.postgis',\n 'NAME': 'geobombay'}}\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'assets', 'collected-static')\nSTATICFILES_DIRS = os.path.join(BASE_DIR, 'assets', 'static'),\nTEMPLATE_DIRS = os.path.join(BASE_DIR, 'templates'),\nTEMPLATE_CONTEXT_PROCESSORS = TCP + ('django.core.context_processors.request',)\nLEAFLET_CONFIG = {'DEFAULT_CENTER': (19, 72.85521), 'DEFAULT_ZOOM': 11}\ntry:\n from local_settings import *\nexcept:\n pass\n",
"step-4": "<mask token>\nimport os\nfrom django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\nDATA_DIR = os.path.join(BASE_DIR, 'data')\ntry:\n SECRET_KEY\nexcept NameError:\n SECRET_FILE = os.path.join(BASE_DIR, 'secret.txt')\n try:\n SECRET_KEY = open(SECRET_FILE).read().strip()\n except IOError:\n try:\n from random import choice\n SECRET_KEY = ''.join([choice(\n 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in\n range(50)])\n secret = file(SECRET_FILE, 'w')\n secret.write(SECRET_KEY)\n secret.close()\n except IOError:\n Exception(\n 'Please create a %s file with random characters to generate your secret key!'\n % SECRET_FILE)\nDEBUG = True\nTEMPLATE_DEBUG = True\nALLOWED_HOSTS = []\nINSTALLED_APPS = ('django.contrib.admin', 'django.contrib.auth',\n 'django.contrib.contenttypes', 'django.contrib.sessions',\n 'django.contrib.messages', 'django.contrib.staticfiles',\n 'django.contrib.gis', 'leaflet', 'cts', 'wards', 'bmc')\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware')\nROOT_URLCONF = 'geobombay.urls'\nWSGI_APPLICATION = 'geobombay.wsgi.application'\nDATABASES = {'default': {'ENGINE': 'django.contrib.gis.db.backends.postgis',\n 'NAME': 'geobombay'}}\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'assets', 'collected-static')\nSTATICFILES_DIRS = os.path.join(BASE_DIR, 'assets', 'static'),\nTEMPLATE_DIRS = os.path.join(BASE_DIR, 'templates'),\nTEMPLATE_CONTEXT_PROCESSORS = TCP + ('django.core.context_processors.request',)\nLEAFLET_CONFIG = {'DEFAULT_CENTER': (19, 72.85521), 'DEFAULT_ZOOM': 11}\ntry:\n from local_settings import *\nexcept:\n pass\n",
"step-5": "\"\"\"\nDjango settings for geobombay project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nfrom django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\nDATA_DIR = os.path.join(BASE_DIR, 'data')\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\ntry:\n SECRET_KEY\nexcept NameError:\n SECRET_FILE = os.path.join(BASE_DIR, 'secret.txt')\n try:\n SECRET_KEY = open(SECRET_FILE).read().strip()\n except IOError:\n try:\n from random import choice\n SECRET_KEY = ''.join([choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])\n secret = file(SECRET_FILE, 'w')\n secret.write(SECRET_KEY)\n secret.close()\n except IOError:\n Exception('Please create a %s file with random characters to generate your secret key!' % SECRET_FILE)\n\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n #'suit', #Django Suit, skin for the admin\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.gis',\n 'leaflet',\n 'cts',\n 'wards',\n 'bmc',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'geobombay.urls'\n\nWSGI_APPLICATION = 'geobombay.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.contrib.gis.db.backends.postgis',\n 'NAME': 'geobombay'\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'assets', 'collected-static')\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'assets', 'static'),\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n)\n\nTEMPLATE_DIRS = (\n os.path.join(BASE_DIR, 'templates'),\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = TCP + (\n 'django.core.context_processors.request',\n)\n\n#Global map / leaflet settings (for django-leaflet plugin we use for admin)\nLEAFLET_CONFIG = {\n 'DEFAULT_CENTER': (19, 72.85521,),\n 'DEFAULT_ZOOM': 11,\n}\n\ntry:\n from local_settings import *\nexcept:\n pass\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from flask_restful import Resource
from flask import jsonify, make_response, request
from ..models.Users import UsersModel
from ..models.Incidents import IncidentsModel
from app.api.validations.validations import Validations
class UsersView(Resource):
def __init__(self):
self.db = UsersModel()
def post(self):
data = request.get_json()
resp = Validations().validate_user_inputs(data)
username = data['username']
user = self.db.register_users(username)
if len(user) != 0:
return make_response(jsonify({
'Message': 'Username already exists'
}), 202)
elif resp == str(resp):
return make_response(jsonify({
"Message": resp
}), 201)
else:
self.db.save(resp)
return make_response(jsonify({
"Message": "User Registered. Please login"
}), 201)
def get(self):
access_token = Validations().get_access_token()
if not access_token:
return jsonify({"Message": "Token needed. Please login"})
else:
users = self.db.get_users()
return make_response(jsonify({
"Users": users,
"Message": "All Users"
}), 200)
class LoginView(Resource):
def __init__(self):
self.db = UsersModel()
self.user_db = IncidentsModel()
def post(self):
data = request.get_json()
username = data['username']
password = data['password']
auth = self.db.authenticate(username, password)
return auth
class UserView(Resource):
def __init__(self):
self.db = UsersModel()
def get(self, id):
access_token = Validations().get_access_token()
if not access_token:
return jsonify({"Message": "Token needed. Please login"})
else:
res = self.db.get_single_user(id)
return make_response(jsonify({
'Response': res
}), 201)
def delete(self, id):
access_token = Validations().get_access_token()
user = self.db.check_user_id(id)
if not access_token:
return jsonify({"Message": "Token needed. Please login"})
elif not user:
return jsonify({"Message": "User ID does not exist"})
else:
self.db.delete_user(id)
return {
"Message": "User Deleted"
}
def put(self, id):
access_token = Validations().get_access_token()
user = self.db.check_user_id(id)
if not access_token:
return jsonify({"Message": "Token needed. Please login"})
elif not user:
return jsonify({"Message": "User ID does not exist"})
if access_token:
data = request.get_json()
resp = Validations().validate_user_inputs(data)
if resp == str(resp):
return make_response(jsonify({
"Message": resp
}), 201)
else:
self.db.update_user(id, resp)
return make_response(jsonify({
'Message': 'User Details Updated'
}), 201)
|
normal
|
{
"blob_id": "0188355f84054143bd4ff9da63f1128e9eb5b23b",
"index": 2244,
"step-1": "<mask token>\n\n\nclass LoginView(Resource):\n\n def __init__(self):\n self.db = UsersModel()\n self.user_db = IncidentsModel()\n\n def post(self):\n data = request.get_json()\n username = data['username']\n password = data['password']\n auth = self.db.authenticate(username, password)\n return auth\n\n\nclass UserView(Resource):\n\n def __init__(self):\n self.db = UsersModel()\n\n def get(self, id):\n access_token = Validations().get_access_token()\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n else:\n res = self.db.get_single_user(id)\n return make_response(jsonify({'Response': res}), 201)\n\n def delete(self, id):\n access_token = Validations().get_access_token()\n user = self.db.check_user_id(id)\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n elif not user:\n return jsonify({'Message': 'User ID does not exist'})\n else:\n self.db.delete_user(id)\n return {'Message': 'User Deleted'}\n\n def put(self, id):\n access_token = Validations().get_access_token()\n user = self.db.check_user_id(id)\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n elif not user:\n return jsonify({'Message': 'User ID does not exist'})\n if access_token:\n data = request.get_json()\n resp = Validations().validate_user_inputs(data)\n if resp == str(resp):\n return make_response(jsonify({'Message': resp}), 201)\n else:\n self.db.update_user(id, resp)\n return make_response(jsonify({'Message':\n 'User Details Updated'}), 201)\n",
"step-2": "<mask token>\n\n\nclass UsersView(Resource):\n <mask token>\n\n def post(self):\n data = request.get_json()\n resp = Validations().validate_user_inputs(data)\n username = data['username']\n user = self.db.register_users(username)\n if len(user) != 0:\n return make_response(jsonify({'Message':\n 'Username already exists'}), 202)\n elif resp == str(resp):\n return make_response(jsonify({'Message': resp}), 201)\n else:\n self.db.save(resp)\n return make_response(jsonify({'Message':\n 'User Registered. Please login'}), 201)\n <mask token>\n\n\nclass LoginView(Resource):\n\n def __init__(self):\n self.db = UsersModel()\n self.user_db = IncidentsModel()\n\n def post(self):\n data = request.get_json()\n username = data['username']\n password = data['password']\n auth = self.db.authenticate(username, password)\n return auth\n\n\nclass UserView(Resource):\n\n def __init__(self):\n self.db = UsersModel()\n\n def get(self, id):\n access_token = Validations().get_access_token()\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n else:\n res = self.db.get_single_user(id)\n return make_response(jsonify({'Response': res}), 201)\n\n def delete(self, id):\n access_token = Validations().get_access_token()\n user = self.db.check_user_id(id)\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n elif not user:\n return jsonify({'Message': 'User ID does not exist'})\n else:\n self.db.delete_user(id)\n return {'Message': 'User Deleted'}\n\n def put(self, id):\n access_token = Validations().get_access_token()\n user = self.db.check_user_id(id)\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n elif not user:\n return jsonify({'Message': 'User ID does not exist'})\n if access_token:\n data = request.get_json()\n resp = Validations().validate_user_inputs(data)\n if resp == str(resp):\n return make_response(jsonify({'Message': resp}), 201)\n else:\n self.db.update_user(id, resp)\n return make_response(jsonify({'Message':\n 'User Details Updated'}), 201)\n",
"step-3": "<mask token>\n\n\nclass UsersView(Resource):\n\n def __init__(self):\n self.db = UsersModel()\n\n def post(self):\n data = request.get_json()\n resp = Validations().validate_user_inputs(data)\n username = data['username']\n user = self.db.register_users(username)\n if len(user) != 0:\n return make_response(jsonify({'Message':\n 'Username already exists'}), 202)\n elif resp == str(resp):\n return make_response(jsonify({'Message': resp}), 201)\n else:\n self.db.save(resp)\n return make_response(jsonify({'Message':\n 'User Registered. Please login'}), 201)\n <mask token>\n\n\nclass LoginView(Resource):\n\n def __init__(self):\n self.db = UsersModel()\n self.user_db = IncidentsModel()\n\n def post(self):\n data = request.get_json()\n username = data['username']\n password = data['password']\n auth = self.db.authenticate(username, password)\n return auth\n\n\nclass UserView(Resource):\n\n def __init__(self):\n self.db = UsersModel()\n\n def get(self, id):\n access_token = Validations().get_access_token()\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n else:\n res = self.db.get_single_user(id)\n return make_response(jsonify({'Response': res}), 201)\n\n def delete(self, id):\n access_token = Validations().get_access_token()\n user = self.db.check_user_id(id)\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n elif not user:\n return jsonify({'Message': 'User ID does not exist'})\n else:\n self.db.delete_user(id)\n return {'Message': 'User Deleted'}\n\n def put(self, id):\n access_token = Validations().get_access_token()\n user = self.db.check_user_id(id)\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n elif not user:\n return jsonify({'Message': 'User ID does not exist'})\n if access_token:\n data = request.get_json()\n resp = Validations().validate_user_inputs(data)\n if resp == str(resp):\n return make_response(jsonify({'Message': resp}), 201)\n else:\n self.db.update_user(id, resp)\n return make_response(jsonify({'Message':\n 'User Details Updated'}), 201)\n",
"step-4": "<mask token>\n\n\nclass UsersView(Resource):\n\n def __init__(self):\n self.db = UsersModel()\n\n def post(self):\n data = request.get_json()\n resp = Validations().validate_user_inputs(data)\n username = data['username']\n user = self.db.register_users(username)\n if len(user) != 0:\n return make_response(jsonify({'Message':\n 'Username already exists'}), 202)\n elif resp == str(resp):\n return make_response(jsonify({'Message': resp}), 201)\n else:\n self.db.save(resp)\n return make_response(jsonify({'Message':\n 'User Registered. Please login'}), 201)\n\n def get(self):\n access_token = Validations().get_access_token()\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n else:\n users = self.db.get_users()\n return make_response(jsonify({'Users': users, 'Message':\n 'All Users'}), 200)\n\n\nclass LoginView(Resource):\n\n def __init__(self):\n self.db = UsersModel()\n self.user_db = IncidentsModel()\n\n def post(self):\n data = request.get_json()\n username = data['username']\n password = data['password']\n auth = self.db.authenticate(username, password)\n return auth\n\n\nclass UserView(Resource):\n\n def __init__(self):\n self.db = UsersModel()\n\n def get(self, id):\n access_token = Validations().get_access_token()\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n else:\n res = self.db.get_single_user(id)\n return make_response(jsonify({'Response': res}), 201)\n\n def delete(self, id):\n access_token = Validations().get_access_token()\n user = self.db.check_user_id(id)\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n elif not user:\n return jsonify({'Message': 'User ID does not exist'})\n else:\n self.db.delete_user(id)\n return {'Message': 'User Deleted'}\n\n def put(self, id):\n access_token = Validations().get_access_token()\n user = self.db.check_user_id(id)\n if not access_token:\n return jsonify({'Message': 'Token needed. Please login'})\n elif not user:\n return jsonify({'Message': 'User ID does not exist'})\n if access_token:\n data = request.get_json()\n resp = Validations().validate_user_inputs(data)\n if resp == str(resp):\n return make_response(jsonify({'Message': resp}), 201)\n else:\n self.db.update_user(id, resp)\n return make_response(jsonify({'Message':\n 'User Details Updated'}), 201)\n",
"step-5": "from flask_restful import Resource\nfrom flask import jsonify, make_response, request\n\nfrom ..models.Users import UsersModel\n\nfrom ..models.Incidents import IncidentsModel\n\nfrom app.api.validations.validations import Validations\n\n\nclass UsersView(Resource):\n def __init__(self):\n self.db = UsersModel()\n\n def post(self):\n data = request.get_json()\n resp = Validations().validate_user_inputs(data)\n username = data['username']\n user = self.db.register_users(username)\n if len(user) != 0:\n return make_response(jsonify({\n 'Message': 'Username already exists'\n }), 202)\n elif resp == str(resp):\n return make_response(jsonify({\n \"Message\": resp\n }), 201)\n else:\n self.db.save(resp)\n return make_response(jsonify({\n \"Message\": \"User Registered. Please login\"\n }), 201)\n\n def get(self):\n access_token = Validations().get_access_token()\n if not access_token:\n return jsonify({\"Message\": \"Token needed. Please login\"})\n else:\n users = self.db.get_users()\n return make_response(jsonify({\n \"Users\": users,\n \"Message\": \"All Users\"\n }), 200)\n\n\nclass LoginView(Resource):\n def __init__(self):\n self.db = UsersModel()\n self.user_db = IncidentsModel()\n\n def post(self):\n data = request.get_json()\n username = data['username']\n password = data['password']\n auth = self.db.authenticate(username, password)\n return auth\n\n\nclass UserView(Resource):\n def __init__(self):\n self.db = UsersModel()\n\n def get(self, id):\n access_token = Validations().get_access_token()\n if not access_token:\n return jsonify({\"Message\": \"Token needed. Please login\"})\n else:\n res = self.db.get_single_user(id)\n return make_response(jsonify({\n 'Response': res\n }), 201)\n\n def delete(self, id):\n access_token = Validations().get_access_token()\n user = self.db.check_user_id(id)\n if not access_token:\n return jsonify({\"Message\": \"Token needed. Please login\"})\n elif not user:\n return jsonify({\"Message\": \"User ID does not exist\"})\n else:\n self.db.delete_user(id)\n return {\n \"Message\": \"User Deleted\"\n }\n\n def put(self, id):\n access_token = Validations().get_access_token()\n user = self.db.check_user_id(id)\n if not access_token:\n return jsonify({\"Message\": \"Token needed. Please login\"})\n elif not user:\n return jsonify({\"Message\": \"User ID does not exist\"})\n if access_token:\n data = request.get_json()\n resp = Validations().validate_user_inputs(data)\n if resp == str(resp):\n return make_response(jsonify({\n \"Message\": resp\n }), 201)\n else:\n self.db.update_user(id, resp)\n return make_response(jsonify({\n 'Message': 'User Details Updated'\n }), 201)\n",
"step-ids": [
8,
10,
11,
12,
14
]
}
|
[
8,
10,
11,
12,
14
] |
import os.path as path
from googleapiclient.discovery import build
from google.oauth2 import service_account
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']
# The ID and range of a sample spreadsheet.
SAMPLE_SPREADSHEET_ID = '1FSMATLJUNCbV8-XYM8h7yHoWRSGA8JFsaECOZy_i2T8'
def main():
service_account_json = path.join(path.dirname(
path.abspath(__file__)), 'service_account.json')
credentials = service_account.Credentials.from_service_account_file(
service_account_json, scopes=SCOPES)
service = build('sheets', 'v4', credentials=credentials)
sheet_service = service.spreadsheets()
print('Getting pie chart information')
get_pie_chart_info(sheet_service)
print('Getting line chart information')
get_line_chart_info(sheet_service)
print('Getting boolean information')
get_bool_info(sheet_service)
def get_pie_chart_info(sheet_service):
sample_range_name = 'data!F:G'
result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,
range=sample_range_name).execute()
values = result.get('values', [])
if not values:
print('No data found.')
else:
print('Race, Breakdown:')
for row in values:
# Print columns A and E, which correspond to indices 0 and 4.
print('%s, %s' % (row[0], row[1]))
def get_line_chart_info(sheet_service):
sample_range_name = 'data!D:D'
result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,
range=sample_range_name).execute()
values = result.get('values', [])
if not values:
print('No data found.')
else:
print('Time series information:')
for row in values:
print('%s' % row[0])
def get_bool_info(sheet_service):
sample_range_name = 'data!B1'
result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,
range=sample_range_name).execute()
values = result.get('values', [])
if not values:
print('No data found.')
else:
print('Time series information:')
for row in values:
print(row[0] == 'TRUE')
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "f9261c1844cc629c91043d1221d0b76f6e22fef6",
"index": 6157,
"step-1": "<mask token>\n\n\ndef main():\n service_account_json = path.join(path.dirname(path.abspath(__file__)),\n 'service_account.json')\n credentials = service_account.Credentials.from_service_account_file(\n service_account_json, scopes=SCOPES)\n service = build('sheets', 'v4', credentials=credentials)\n sheet_service = service.spreadsheets()\n print('Getting pie chart information')\n get_pie_chart_info(sheet_service)\n print('Getting line chart information')\n get_line_chart_info(sheet_service)\n print('Getting boolean information')\n get_bool_info(sheet_service)\n\n\ndef get_pie_chart_info(sheet_service):\n sample_range_name = 'data!F:G'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n if not values:\n print('No data found.')\n else:\n print('Race, Breakdown:')\n for row in values:\n print('%s, %s' % (row[0], row[1]))\n\n\ndef get_line_chart_info(sheet_service):\n sample_range_name = 'data!D:D'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n if not values:\n print('No data found.')\n else:\n print('Time series information:')\n for row in values:\n print('%s' % row[0])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n service_account_json = path.join(path.dirname(path.abspath(__file__)),\n 'service_account.json')\n credentials = service_account.Credentials.from_service_account_file(\n service_account_json, scopes=SCOPES)\n service = build('sheets', 'v4', credentials=credentials)\n sheet_service = service.spreadsheets()\n print('Getting pie chart information')\n get_pie_chart_info(sheet_service)\n print('Getting line chart information')\n get_line_chart_info(sheet_service)\n print('Getting boolean information')\n get_bool_info(sheet_service)\n\n\ndef get_pie_chart_info(sheet_service):\n sample_range_name = 'data!F:G'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n if not values:\n print('No data found.')\n else:\n print('Race, Breakdown:')\n for row in values:\n print('%s, %s' % (row[0], row[1]))\n\n\ndef get_line_chart_info(sheet_service):\n sample_range_name = 'data!D:D'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n if not values:\n print('No data found.')\n else:\n print('Time series information:')\n for row in values:\n print('%s' % row[0])\n\n\ndef get_bool_info(sheet_service):\n sample_range_name = 'data!B1'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n if not values:\n print('No data found.')\n else:\n print('Time series information:')\n for row in values:\n print(row[0] == 'TRUE')\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nSCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']\nSAMPLE_SPREADSHEET_ID = '1FSMATLJUNCbV8-XYM8h7yHoWRSGA8JFsaECOZy_i2T8'\n\n\ndef main():\n service_account_json = path.join(path.dirname(path.abspath(__file__)),\n 'service_account.json')\n credentials = service_account.Credentials.from_service_account_file(\n service_account_json, scopes=SCOPES)\n service = build('sheets', 'v4', credentials=credentials)\n sheet_service = service.spreadsheets()\n print('Getting pie chart information')\n get_pie_chart_info(sheet_service)\n print('Getting line chart information')\n get_line_chart_info(sheet_service)\n print('Getting boolean information')\n get_bool_info(sheet_service)\n\n\ndef get_pie_chart_info(sheet_service):\n sample_range_name = 'data!F:G'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n if not values:\n print('No data found.')\n else:\n print('Race, Breakdown:')\n for row in values:\n print('%s, %s' % (row[0], row[1]))\n\n\ndef get_line_chart_info(sheet_service):\n sample_range_name = 'data!D:D'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n if not values:\n print('No data found.')\n else:\n print('Time series information:')\n for row in values:\n print('%s' % row[0])\n\n\ndef get_bool_info(sheet_service):\n sample_range_name = 'data!B1'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n if not values:\n print('No data found.')\n else:\n print('Time series information:')\n for row in values:\n print(row[0] == 'TRUE')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import os.path as path\nfrom googleapiclient.discovery import build\nfrom google.oauth2 import service_account\nSCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']\nSAMPLE_SPREADSHEET_ID = '1FSMATLJUNCbV8-XYM8h7yHoWRSGA8JFsaECOZy_i2T8'\n\n\ndef main():\n service_account_json = path.join(path.dirname(path.abspath(__file__)),\n 'service_account.json')\n credentials = service_account.Credentials.from_service_account_file(\n service_account_json, scopes=SCOPES)\n service = build('sheets', 'v4', credentials=credentials)\n sheet_service = service.spreadsheets()\n print('Getting pie chart information')\n get_pie_chart_info(sheet_service)\n print('Getting line chart information')\n get_line_chart_info(sheet_service)\n print('Getting boolean information')\n get_bool_info(sheet_service)\n\n\ndef get_pie_chart_info(sheet_service):\n sample_range_name = 'data!F:G'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n if not values:\n print('No data found.')\n else:\n print('Race, Breakdown:')\n for row in values:\n print('%s, %s' % (row[0], row[1]))\n\n\ndef get_line_chart_info(sheet_service):\n sample_range_name = 'data!D:D'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n if not values:\n print('No data found.')\n else:\n print('Time series information:')\n for row in values:\n print('%s' % row[0])\n\n\ndef get_bool_info(sheet_service):\n sample_range_name = 'data!B1'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n if not values:\n print('No data found.')\n else:\n print('Time series information:')\n for row in values:\n print(row[0] == 'TRUE')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import os.path as path\nfrom googleapiclient.discovery import build\nfrom google.oauth2 import service_account\n\n# If modifying these scopes, delete the file token.pickle.\nSCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']\n\n# The ID and range of a sample spreadsheet.\nSAMPLE_SPREADSHEET_ID = '1FSMATLJUNCbV8-XYM8h7yHoWRSGA8JFsaECOZy_i2T8'\n\n\ndef main():\n service_account_json = path.join(path.dirname(\n path.abspath(__file__)), 'service_account.json')\n credentials = service_account.Credentials.from_service_account_file(\n service_account_json, scopes=SCOPES)\n service = build('sheets', 'v4', credentials=credentials)\n sheet_service = service.spreadsheets()\n\n print('Getting pie chart information')\n get_pie_chart_info(sheet_service)\n\n print('Getting line chart information')\n get_line_chart_info(sheet_service)\n\n print('Getting boolean information')\n get_bool_info(sheet_service)\n\n\ndef get_pie_chart_info(sheet_service):\n sample_range_name = 'data!F:G'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n\n if not values:\n print('No data found.')\n else:\n print('Race, Breakdown:')\n for row in values:\n # Print columns A and E, which correspond to indices 0 and 4.\n print('%s, %s' % (row[0], row[1]))\n\n\ndef get_line_chart_info(sheet_service):\n sample_range_name = 'data!D:D'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n\n if not values:\n print('No data found.')\n else:\n print('Time series information:')\n for row in values:\n print('%s' % row[0])\n\n\ndef get_bool_info(sheet_service):\n sample_range_name = 'data!B1'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n\n if not values:\n print('No data found.')\n else:\n print('Time series information:')\n for row in values:\n print(row[0] == 'TRUE')\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
import numpy as np
import random
import argparse
import networkx as nx
from gensim.models import Word2Vec
from utils import read_node_label, plot_embeddings
class node2vec_walk():
def __init__(self, nx_G, is_directed, p, q):
self.G = nx_G
self.is_directed = is_directed
self.p = p
self.q = q
def node2vec_walk(self, walk_length, start_node):
G = self.G
alias_nodes = self.alias_nodes
alias_edges = self.alias_edges
walk = [start_node]
while len(walk) < walk_length:
curr = walk[-1]
cur_nbrs = sorted(G.neighbors(curr))
if len(cur_nbrs) > 0:
if len(walk) == 1:
walk.append(cur_nbrs[alias_draw(alias_nodes[curr][0], alias_nodes[curr][1])])
else:
prev = walk[-2]
next = cur_nbrs[alias_draw(alias_edges[(prev, curr)][0], alias_edges[(prev, curr)][1])]
walk.append(next)
else:
break
return walk
def simulate_walks(self, num_walks, walk_length):
G = self.G
walks = []
nodes = list(G.nodes())
print("Walk iteration...")
for walk_iter in range(num_walks):
print(f"{walk_iter + 1}/{num_walks}")
random.shuffle(nodes)
for node in nodes:
walks.append(self.node2vec_walk(walk_length, node))
return walks
def get_alias_edge(self, src, dst):
G = self.G
p = self.p
q = self.q
unnormalized_probs = []
for dst_nbr in sorted(G.neighbors(dst)):
if dst_nbr == src:
unnormalized_probs.append(G[dst][dst_nbr]["weight"] / p)
elif G.has_edge(dst_nbr, src):
unnormalized_probs.append(G[dst][dst_nbr]["weight"])
else:
unnormalized_probs.append(G[dst][dst_nbr]["weight"] / q)
norm_cost = sum(unnormalized_probs)
normalized_probs = [float(v) / norm_cost for v in unnormalized_probs]
return alias_setup(normalized_probs)
def preprocess_transition_probs(self):
# 预处理转移概率
G = self.G
is_directed = self.is_directed
alias_nodes = {}
for node in G.nodes():
unnormalized_probs = [G[node][nbr]["weight"] for nbr in sorted(G.neighbors(node))]
norm_const = sum(unnormalized_probs)
normalized_probs = [float(v) / norm_const for v in unnormalized_probs]
alias_nodes[node] = alias_setup(normalized_probs)
alias_edges = {}
if is_directed:
for edge in G.edges():
alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])
else:
for edge in G.edges():
alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])
alias_edges[(edge[1], edge[0])] = self.get_alias_edge(edge[1], edge[0])
self.alias_nodes = alias_nodes
self.alias_edges = alias_edges
def alias_setup(probs):
K = len(probs)
q = np.zeros(K)
J = np.zeros(K, dtype=np.int)
smaller = []
larger = []
for kk, prob in enumerate(probs):
q[kk] = K * prob
# 记录小于均匀分布概率的Index
if q[kk] > 1.0:
larger.append(kk)
else:
smaller.append(kk)
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
# 记录index
J[small] = large
# 将small的补充满1后,算出剩余large的概率
q[large] = q[small] + q[large] - 1
# 若q[large]不等于1,则继续放入smaller和larger的数组中进行迭代
if q[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
return J, q
def alias_draw(J, q):
# 非均匀分布进行采样
K = len(J)
kk = int(np.floor(np.random.rand() * K))
if np.random.rand() < q[kk]:
return kk
else:
return J[kk]
def parse_args():
parser = argparse.ArgumentParser(description="Run node2vec.")
parser.add_argument('--input', nargs='?', default='./data/Wiki_edgelist.txt', help='Input graph path')
parser.add_argument('--output', nargs='?', default='emb/node2vec_wiki.emb', help='Embeddings path')
parser.add_argument('--label_file', nargs='?', default='data/wiki_labels.txt', help='Labels path')
parser.add_argument('--dimensions', type=int, default=128, help='Number of dimensions. Default is 128.')
parser.add_argument('--walk-length', type=int, default=80, help='Length of walk per source. Default is 80.')
parser.add_argument('--num-walks', type=int, default=20, help='Number of walks per source. Default is 10.')
parser.add_argument('--window-size', type=int, default=10, help='Context size for optimization. Default is 10.')
parser.add_argument('--iter', default=2, type=int, help='Number of epochs in SGD')
parser.add_argument('--workers', type=int, default=8, help='Number of parallel workers. Default is 8.')
parser.add_argument('--p', type=float, default=1, help='Return hyperparameter. Default is 1.')
parser.add_argument('--q', type=float, default=1, help='Inout hyperparameter. Default is 1.')
parser.add_argument('--weighted', dest='weighted', action='store_true', help='Boolean specifying (un)weighted. Default is unweighted.')
parser.add_argument('--unweighted', dest='unweighted', action='store_false')
parser.set_defaults(weighted=False)
parser.add_argument('--directed', dest='directed', action='store_true', help='Graph is (un)directed. Default is undirected.')
parser.add_argument('--undirected', dest='undirected', action='store_false')
parser.set_defaults(directed=False)
return parser.parse_args()
def read_graph():
if args.weighted:
G = nx.read_edgelist(args.input, nodetype=int, data=(('weight', float), ), create_using=nx.DiGraph)
else:
G = nx.read_edgelist(args.input, nodetype=int, create_using=nx.DiGraph())
for edge in G.edges():
G[edge[0]][edge[1]]['weight'] = 1
if not args.directed:
G = G.to_undirected()
return G
def learning_walks(walks):
walks = [list(map(str, walk)) for walk in walks]
model = Word2Vec(walks, size=args.dimensions, window=args.window_size, min_count=0, sg=1, workers=args.workers, iter=args.iter)
model.wv.save_word2vec_format(args.output)
return model
def main(args):
nx_G = read_graph()
G = node2vec_walk(nx_G, args.directed, args.p, args.q)
G.preprocess_transition_probs()
walks = G.simulate_walks(args.num_walks, args.walk_length)
model = learning_walks(walks)
_embeddings = {}
for v in nx_G.nodes():
_embeddings[str(v)] = model.wv[str(v)]
plot_embeddings(_embeddings, args.label_file)
if __name__ == "__main__":
args = parse_args()
main(args)
|
normal
|
{
"blob_id": "fc2748d766ebce8c9577f1eebc8435e2aa58ae25",
"index": 8605,
"step-1": "<mask token>\n\n\nclass node2vec_walk:\n\n def __init__(self, nx_G, is_directed, p, q):\n self.G = nx_G\n self.is_directed = is_directed\n self.p = p\n self.q = q\n\n def node2vec_walk(self, walk_length, start_node):\n G = self.G\n alias_nodes = self.alias_nodes\n alias_edges = self.alias_edges\n walk = [start_node]\n while len(walk) < walk_length:\n curr = walk[-1]\n cur_nbrs = sorted(G.neighbors(curr))\n if len(cur_nbrs) > 0:\n if len(walk) == 1:\n walk.append(cur_nbrs[alias_draw(alias_nodes[curr][0],\n alias_nodes[curr][1])])\n else:\n prev = walk[-2]\n next = cur_nbrs[alias_draw(alias_edges[prev, curr][0],\n alias_edges[prev, curr][1])]\n walk.append(next)\n else:\n break\n return walk\n\n def simulate_walks(self, num_walks, walk_length):\n G = self.G\n walks = []\n nodes = list(G.nodes())\n print('Walk iteration...')\n for walk_iter in range(num_walks):\n print(f'{walk_iter + 1}/{num_walks}')\n random.shuffle(nodes)\n for node in nodes:\n walks.append(self.node2vec_walk(walk_length, node))\n return walks\n\n def get_alias_edge(self, src, dst):\n G = self.G\n p = self.p\n q = self.q\n unnormalized_probs = []\n for dst_nbr in sorted(G.neighbors(dst)):\n if dst_nbr == src:\n unnormalized_probs.append(G[dst][dst_nbr]['weight'] / p)\n elif G.has_edge(dst_nbr, src):\n unnormalized_probs.append(G[dst][dst_nbr]['weight'])\n else:\n unnormalized_probs.append(G[dst][dst_nbr]['weight'] / q)\n norm_cost = sum(unnormalized_probs)\n normalized_probs = [(float(v) / norm_cost) for v in unnormalized_probs]\n return alias_setup(normalized_probs)\n\n def preprocess_transition_probs(self):\n G = self.G\n is_directed = self.is_directed\n alias_nodes = {}\n for node in G.nodes():\n unnormalized_probs = [G[node][nbr]['weight'] for nbr in sorted(\n G.neighbors(node))]\n norm_const = sum(unnormalized_probs)\n normalized_probs = [(float(v) / norm_const) for v in\n unnormalized_probs]\n alias_nodes[node] = alias_setup(normalized_probs)\n alias_edges = {}\n if is_directed:\n for edge in G.edges():\n alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n else:\n for edge in G.edges():\n alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n alias_edges[edge[1], edge[0]] = self.get_alias_edge(edge[1],\n edge[0])\n self.alias_nodes = alias_nodes\n self.alias_edges = alias_edges\n\n\n<mask token>\n\n\ndef read_graph():\n if args.weighted:\n G = nx.read_edgelist(args.input, nodetype=int, data=(('weight',\n float),), create_using=nx.DiGraph)\n else:\n G = nx.read_edgelist(args.input, nodetype=int, create_using=nx.\n DiGraph())\n for edge in G.edges():\n G[edge[0]][edge[1]]['weight'] = 1\n if not args.directed:\n G = G.to_undirected()\n return G\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass node2vec_walk:\n\n def __init__(self, nx_G, is_directed, p, q):\n self.G = nx_G\n self.is_directed = is_directed\n self.p = p\n self.q = q\n\n def node2vec_walk(self, walk_length, start_node):\n G = self.G\n alias_nodes = self.alias_nodes\n alias_edges = self.alias_edges\n walk = [start_node]\n while len(walk) < walk_length:\n curr = walk[-1]\n cur_nbrs = sorted(G.neighbors(curr))\n if len(cur_nbrs) > 0:\n if len(walk) == 1:\n walk.append(cur_nbrs[alias_draw(alias_nodes[curr][0],\n alias_nodes[curr][1])])\n else:\n prev = walk[-2]\n next = cur_nbrs[alias_draw(alias_edges[prev, curr][0],\n alias_edges[prev, curr][1])]\n walk.append(next)\n else:\n break\n return walk\n\n def simulate_walks(self, num_walks, walk_length):\n G = self.G\n walks = []\n nodes = list(G.nodes())\n print('Walk iteration...')\n for walk_iter in range(num_walks):\n print(f'{walk_iter + 1}/{num_walks}')\n random.shuffle(nodes)\n for node in nodes:\n walks.append(self.node2vec_walk(walk_length, node))\n return walks\n\n def get_alias_edge(self, src, dst):\n G = self.G\n p = self.p\n q = self.q\n unnormalized_probs = []\n for dst_nbr in sorted(G.neighbors(dst)):\n if dst_nbr == src:\n unnormalized_probs.append(G[dst][dst_nbr]['weight'] / p)\n elif G.has_edge(dst_nbr, src):\n unnormalized_probs.append(G[dst][dst_nbr]['weight'])\n else:\n unnormalized_probs.append(G[dst][dst_nbr]['weight'] / q)\n norm_cost = sum(unnormalized_probs)\n normalized_probs = [(float(v) / norm_cost) for v in unnormalized_probs]\n return alias_setup(normalized_probs)\n\n def preprocess_transition_probs(self):\n G = self.G\n is_directed = self.is_directed\n alias_nodes = {}\n for node in G.nodes():\n unnormalized_probs = [G[node][nbr]['weight'] for nbr in sorted(\n G.neighbors(node))]\n norm_const = sum(unnormalized_probs)\n normalized_probs = [(float(v) / norm_const) for v in\n unnormalized_probs]\n alias_nodes[node] = alias_setup(normalized_probs)\n alias_edges = {}\n if is_directed:\n for edge in G.edges():\n alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n else:\n for edge in G.edges():\n alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n alias_edges[edge[1], edge[0]] = self.get_alias_edge(edge[1],\n edge[0])\n self.alias_nodes = alias_nodes\n self.alias_edges = alias_edges\n\n\n<mask token>\n\n\ndef read_graph():\n if args.weighted:\n G = nx.read_edgelist(args.input, nodetype=int, data=(('weight',\n float),), create_using=nx.DiGraph)\n else:\n G = nx.read_edgelist(args.input, nodetype=int, create_using=nx.\n DiGraph())\n for edge in G.edges():\n G[edge[0]][edge[1]]['weight'] = 1\n if not args.directed:\n G = G.to_undirected()\n return G\n\n\n<mask token>\n\n\ndef main(args):\n nx_G = read_graph()\n G = node2vec_walk(nx_G, args.directed, args.p, args.q)\n G.preprocess_transition_probs()\n walks = G.simulate_walks(args.num_walks, args.walk_length)\n model = learning_walks(walks)\n _embeddings = {}\n for v in nx_G.nodes():\n _embeddings[str(v)] = model.wv[str(v)]\n plot_embeddings(_embeddings, args.label_file)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass node2vec_walk:\n\n def __init__(self, nx_G, is_directed, p, q):\n self.G = nx_G\n self.is_directed = is_directed\n self.p = p\n self.q = q\n\n def node2vec_walk(self, walk_length, start_node):\n G = self.G\n alias_nodes = self.alias_nodes\n alias_edges = self.alias_edges\n walk = [start_node]\n while len(walk) < walk_length:\n curr = walk[-1]\n cur_nbrs = sorted(G.neighbors(curr))\n if len(cur_nbrs) > 0:\n if len(walk) == 1:\n walk.append(cur_nbrs[alias_draw(alias_nodes[curr][0],\n alias_nodes[curr][1])])\n else:\n prev = walk[-2]\n next = cur_nbrs[alias_draw(alias_edges[prev, curr][0],\n alias_edges[prev, curr][1])]\n walk.append(next)\n else:\n break\n return walk\n\n def simulate_walks(self, num_walks, walk_length):\n G = self.G\n walks = []\n nodes = list(G.nodes())\n print('Walk iteration...')\n for walk_iter in range(num_walks):\n print(f'{walk_iter + 1}/{num_walks}')\n random.shuffle(nodes)\n for node in nodes:\n walks.append(self.node2vec_walk(walk_length, node))\n return walks\n\n def get_alias_edge(self, src, dst):\n G = self.G\n p = self.p\n q = self.q\n unnormalized_probs = []\n for dst_nbr in sorted(G.neighbors(dst)):\n if dst_nbr == src:\n unnormalized_probs.append(G[dst][dst_nbr]['weight'] / p)\n elif G.has_edge(dst_nbr, src):\n unnormalized_probs.append(G[dst][dst_nbr]['weight'])\n else:\n unnormalized_probs.append(G[dst][dst_nbr]['weight'] / q)\n norm_cost = sum(unnormalized_probs)\n normalized_probs = [(float(v) / norm_cost) for v in unnormalized_probs]\n return alias_setup(normalized_probs)\n\n def preprocess_transition_probs(self):\n G = self.G\n is_directed = self.is_directed\n alias_nodes = {}\n for node in G.nodes():\n unnormalized_probs = [G[node][nbr]['weight'] for nbr in sorted(\n G.neighbors(node))]\n norm_const = sum(unnormalized_probs)\n normalized_probs = [(float(v) / norm_const) for v in\n unnormalized_probs]\n alias_nodes[node] = alias_setup(normalized_probs)\n alias_edges = {}\n if is_directed:\n for edge in G.edges():\n alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n else:\n for edge in G.edges():\n alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n alias_edges[edge[1], edge[0]] = self.get_alias_edge(edge[1],\n edge[0])\n self.alias_nodes = alias_nodes\n self.alias_edges = alias_edges\n\n\ndef alias_setup(probs):\n K = len(probs)\n q = np.zeros(K)\n J = np.zeros(K, dtype=np.int)\n smaller = []\n larger = []\n for kk, prob in enumerate(probs):\n q[kk] = K * prob\n if q[kk] > 1.0:\n larger.append(kk)\n else:\n smaller.append(kk)\n while len(smaller) > 0 and len(larger) > 0:\n small = smaller.pop()\n large = larger.pop()\n J[small] = large\n q[large] = q[small] + q[large] - 1\n if q[large] < 1.0:\n smaller.append(large)\n else:\n larger.append(large)\n return J, q\n\n\ndef alias_draw(J, q):\n K = len(J)\n kk = int(np.floor(np.random.rand() * K))\n if np.random.rand() < q[kk]:\n return kk\n else:\n return J[kk]\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Run node2vec.')\n parser.add_argument('--input', nargs='?', default=\n './data/Wiki_edgelist.txt', help='Input graph path')\n parser.add_argument('--output', nargs='?', default=\n 'emb/node2vec_wiki.emb', help='Embeddings path')\n parser.add_argument('--label_file', nargs='?', default=\n 'data/wiki_labels.txt', help='Labels path')\n parser.add_argument('--dimensions', type=int, default=128, help=\n 'Number of dimensions. Default is 128.')\n parser.add_argument('--walk-length', type=int, default=80, help=\n 'Length of walk per source. Default is 80.')\n parser.add_argument('--num-walks', type=int, default=20, help=\n 'Number of walks per source. Default is 10.')\n parser.add_argument('--window-size', type=int, default=10, help=\n 'Context size for optimization. Default is 10.')\n parser.add_argument('--iter', default=2, type=int, help=\n 'Number of epochs in SGD')\n parser.add_argument('--workers', type=int, default=8, help=\n 'Number of parallel workers. Default is 8.')\n parser.add_argument('--p', type=float, default=1, help=\n 'Return hyperparameter. Default is 1.')\n parser.add_argument('--q', type=float, default=1, help=\n 'Inout hyperparameter. Default is 1.')\n parser.add_argument('--weighted', dest='weighted', action='store_true',\n help='Boolean specifying (un)weighted. Default is unweighted.')\n parser.add_argument('--unweighted', dest='unweighted', action='store_false'\n )\n parser.set_defaults(weighted=False)\n parser.add_argument('--directed', dest='directed', action='store_true',\n help='Graph is (un)directed. Default is undirected.')\n parser.add_argument('--undirected', dest='undirected', action='store_false'\n )\n parser.set_defaults(directed=False)\n return parser.parse_args()\n\n\ndef read_graph():\n if args.weighted:\n G = nx.read_edgelist(args.input, nodetype=int, data=(('weight',\n float),), create_using=nx.DiGraph)\n else:\n G = nx.read_edgelist(args.input, nodetype=int, create_using=nx.\n DiGraph())\n for edge in G.edges():\n G[edge[0]][edge[1]]['weight'] = 1\n if not args.directed:\n G = G.to_undirected()\n return G\n\n\ndef learning_walks(walks):\n walks = [list(map(str, walk)) for walk in walks]\n model = Word2Vec(walks, size=args.dimensions, window=args.window_size,\n min_count=0, sg=1, workers=args.workers, iter=args.iter)\n model.wv.save_word2vec_format(args.output)\n return model\n\n\ndef main(args):\n nx_G = read_graph()\n G = node2vec_walk(nx_G, args.directed, args.p, args.q)\n G.preprocess_transition_probs()\n walks = G.simulate_walks(args.num_walks, args.walk_length)\n model = learning_walks(walks)\n _embeddings = {}\n for v in nx_G.nodes():\n _embeddings[str(v)] = model.wv[str(v)]\n plot_embeddings(_embeddings, args.label_file)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass node2vec_walk:\n\n def __init__(self, nx_G, is_directed, p, q):\n self.G = nx_G\n self.is_directed = is_directed\n self.p = p\n self.q = q\n\n def node2vec_walk(self, walk_length, start_node):\n G = self.G\n alias_nodes = self.alias_nodes\n alias_edges = self.alias_edges\n walk = [start_node]\n while len(walk) < walk_length:\n curr = walk[-1]\n cur_nbrs = sorted(G.neighbors(curr))\n if len(cur_nbrs) > 0:\n if len(walk) == 1:\n walk.append(cur_nbrs[alias_draw(alias_nodes[curr][0],\n alias_nodes[curr][1])])\n else:\n prev = walk[-2]\n next = cur_nbrs[alias_draw(alias_edges[prev, curr][0],\n alias_edges[prev, curr][1])]\n walk.append(next)\n else:\n break\n return walk\n\n def simulate_walks(self, num_walks, walk_length):\n G = self.G\n walks = []\n nodes = list(G.nodes())\n print('Walk iteration...')\n for walk_iter in range(num_walks):\n print(f'{walk_iter + 1}/{num_walks}')\n random.shuffle(nodes)\n for node in nodes:\n walks.append(self.node2vec_walk(walk_length, node))\n return walks\n\n def get_alias_edge(self, src, dst):\n G = self.G\n p = self.p\n q = self.q\n unnormalized_probs = []\n for dst_nbr in sorted(G.neighbors(dst)):\n if dst_nbr == src:\n unnormalized_probs.append(G[dst][dst_nbr]['weight'] / p)\n elif G.has_edge(dst_nbr, src):\n unnormalized_probs.append(G[dst][dst_nbr]['weight'])\n else:\n unnormalized_probs.append(G[dst][dst_nbr]['weight'] / q)\n norm_cost = sum(unnormalized_probs)\n normalized_probs = [(float(v) / norm_cost) for v in unnormalized_probs]\n return alias_setup(normalized_probs)\n\n def preprocess_transition_probs(self):\n G = self.G\n is_directed = self.is_directed\n alias_nodes = {}\n for node in G.nodes():\n unnormalized_probs = [G[node][nbr]['weight'] for nbr in sorted(\n G.neighbors(node))]\n norm_const = sum(unnormalized_probs)\n normalized_probs = [(float(v) / norm_const) for v in\n unnormalized_probs]\n alias_nodes[node] = alias_setup(normalized_probs)\n alias_edges = {}\n if is_directed:\n for edge in G.edges():\n alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n else:\n for edge in G.edges():\n alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n alias_edges[edge[1], edge[0]] = self.get_alias_edge(edge[1],\n edge[0])\n self.alias_nodes = alias_nodes\n self.alias_edges = alias_edges\n\n\ndef alias_setup(probs):\n K = len(probs)\n q = np.zeros(K)\n J = np.zeros(K, dtype=np.int)\n smaller = []\n larger = []\n for kk, prob in enumerate(probs):\n q[kk] = K * prob\n if q[kk] > 1.0:\n larger.append(kk)\n else:\n smaller.append(kk)\n while len(smaller) > 0 and len(larger) > 0:\n small = smaller.pop()\n large = larger.pop()\n J[small] = large\n q[large] = q[small] + q[large] - 1\n if q[large] < 1.0:\n smaller.append(large)\n else:\n larger.append(large)\n return J, q\n\n\ndef alias_draw(J, q):\n K = len(J)\n kk = int(np.floor(np.random.rand() * K))\n if np.random.rand() < q[kk]:\n return kk\n else:\n return J[kk]\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Run node2vec.')\n parser.add_argument('--input', nargs='?', default=\n './data/Wiki_edgelist.txt', help='Input graph path')\n parser.add_argument('--output', nargs='?', default=\n 'emb/node2vec_wiki.emb', help='Embeddings path')\n parser.add_argument('--label_file', nargs='?', default=\n 'data/wiki_labels.txt', help='Labels path')\n parser.add_argument('--dimensions', type=int, default=128, help=\n 'Number of dimensions. Default is 128.')\n parser.add_argument('--walk-length', type=int, default=80, help=\n 'Length of walk per source. Default is 80.')\n parser.add_argument('--num-walks', type=int, default=20, help=\n 'Number of walks per source. Default is 10.')\n parser.add_argument('--window-size', type=int, default=10, help=\n 'Context size for optimization. Default is 10.')\n parser.add_argument('--iter', default=2, type=int, help=\n 'Number of epochs in SGD')\n parser.add_argument('--workers', type=int, default=8, help=\n 'Number of parallel workers. Default is 8.')\n parser.add_argument('--p', type=float, default=1, help=\n 'Return hyperparameter. Default is 1.')\n parser.add_argument('--q', type=float, default=1, help=\n 'Inout hyperparameter. Default is 1.')\n parser.add_argument('--weighted', dest='weighted', action='store_true',\n help='Boolean specifying (un)weighted. Default is unweighted.')\n parser.add_argument('--unweighted', dest='unweighted', action='store_false'\n )\n parser.set_defaults(weighted=False)\n parser.add_argument('--directed', dest='directed', action='store_true',\n help='Graph is (un)directed. Default is undirected.')\n parser.add_argument('--undirected', dest='undirected', action='store_false'\n )\n parser.set_defaults(directed=False)\n return parser.parse_args()\n\n\ndef read_graph():\n if args.weighted:\n G = nx.read_edgelist(args.input, nodetype=int, data=(('weight',\n float),), create_using=nx.DiGraph)\n else:\n G = nx.read_edgelist(args.input, nodetype=int, create_using=nx.\n DiGraph())\n for edge in G.edges():\n G[edge[0]][edge[1]]['weight'] = 1\n if not args.directed:\n G = G.to_undirected()\n return G\n\n\ndef learning_walks(walks):\n walks = [list(map(str, walk)) for walk in walks]\n model = Word2Vec(walks, size=args.dimensions, window=args.window_size,\n min_count=0, sg=1, workers=args.workers, iter=args.iter)\n model.wv.save_word2vec_format(args.output)\n return model\n\n\ndef main(args):\n nx_G = read_graph()\n G = node2vec_walk(nx_G, args.directed, args.p, args.q)\n G.preprocess_transition_probs()\n walks = G.simulate_walks(args.num_walks, args.walk_length)\n model = learning_walks(walks)\n _embeddings = {}\n for v in nx_G.nodes():\n _embeddings[str(v)] = model.wv[str(v)]\n plot_embeddings(_embeddings, args.label_file)\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n",
"step-5": "\n\nimport numpy as np\nimport random\n\nimport argparse\nimport networkx as nx\nfrom gensim.models import Word2Vec\n\nfrom utils import read_node_label, plot_embeddings\n\nclass node2vec_walk():\n\n def __init__(self, nx_G, is_directed, p, q):\n self.G = nx_G\n self.is_directed = is_directed\n self.p = p\n self.q = q\n\n def node2vec_walk(self, walk_length, start_node):\n G = self.G\n alias_nodes = self.alias_nodes\n alias_edges = self.alias_edges\n\n walk = [start_node]\n\n while len(walk) < walk_length:\n curr = walk[-1]\n cur_nbrs = sorted(G.neighbors(curr))\n if len(cur_nbrs) > 0:\n if len(walk) == 1:\n walk.append(cur_nbrs[alias_draw(alias_nodes[curr][0], alias_nodes[curr][1])])\n else:\n prev = walk[-2]\n next = cur_nbrs[alias_draw(alias_edges[(prev, curr)][0], alias_edges[(prev, curr)][1])]\n walk.append(next)\n else:\n break\n return walk\n\n def simulate_walks(self, num_walks, walk_length):\n G = self.G\n walks = []\n nodes = list(G.nodes())\n\n print(\"Walk iteration...\")\n\n for walk_iter in range(num_walks):\n print(f\"{walk_iter + 1}/{num_walks}\")\n random.shuffle(nodes)\n for node in nodes:\n walks.append(self.node2vec_walk(walk_length, node))\n return walks\n\n def get_alias_edge(self, src, dst):\n G = self.G\n p = self.p\n q = self.q\n unnormalized_probs = []\n for dst_nbr in sorted(G.neighbors(dst)):\n if dst_nbr == src:\n unnormalized_probs.append(G[dst][dst_nbr][\"weight\"] / p)\n elif G.has_edge(dst_nbr, src):\n unnormalized_probs.append(G[dst][dst_nbr][\"weight\"])\n else:\n unnormalized_probs.append(G[dst][dst_nbr][\"weight\"] / q)\n norm_cost = sum(unnormalized_probs)\n normalized_probs = [float(v) / norm_cost for v in unnormalized_probs]\n return alias_setup(normalized_probs)\n\n def preprocess_transition_probs(self):\n # 预处理转移概率\n G = self.G\n is_directed = self.is_directed\n\n alias_nodes = {}\n for node in G.nodes():\n unnormalized_probs = [G[node][nbr][\"weight\"] for nbr in sorted(G.neighbors(node))]\n norm_const = sum(unnormalized_probs)\n normalized_probs = [float(v) / norm_const for v in unnormalized_probs]\n alias_nodes[node] = alias_setup(normalized_probs)\n\n alias_edges = {}\n\n if is_directed:\n for edge in G.edges():\n alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n else:\n for edge in G.edges():\n alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n alias_edges[(edge[1], edge[0])] = self.get_alias_edge(edge[1], edge[0])\n\n\n self.alias_nodes = alias_nodes\n self.alias_edges = alias_edges\n\n\n\ndef alias_setup(probs):\n K = len(probs)\n q = np.zeros(K)\n J = np.zeros(K, dtype=np.int)\n\n smaller = []\n larger = []\n for kk, prob in enumerate(probs):\n q[kk] = K * prob\n # 记录小于均匀分布概率的Index\n if q[kk] > 1.0:\n larger.append(kk)\n else:\n smaller.append(kk)\n\n while len(smaller) > 0 and len(larger) > 0:\n small = smaller.pop()\n large = larger.pop()\n\n # 记录index\n J[small] = large\n # 将small的补充满1后,算出剩余large的概率\n q[large] = q[small] + q[large] - 1\n # 若q[large]不等于1,则继续放入smaller和larger的数组中进行迭代\n if q[large] < 1.0:\n smaller.append(large)\n else:\n larger.append(large)\n\n return J, q\n\ndef alias_draw(J, q):\n # 非均匀分布进行采样\n K = len(J)\n\n kk = int(np.floor(np.random.rand() * K))\n if np.random.rand() < q[kk]:\n return kk\n else:\n return J[kk]\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Run node2vec.\")\n parser.add_argument('--input', nargs='?', default='./data/Wiki_edgelist.txt', help='Input graph path')\n parser.add_argument('--output', nargs='?', default='emb/node2vec_wiki.emb', help='Embeddings path')\n parser.add_argument('--label_file', nargs='?', default='data/wiki_labels.txt', help='Labels path')\n parser.add_argument('--dimensions', type=int, default=128, help='Number of dimensions. Default is 128.')\n parser.add_argument('--walk-length', type=int, default=80, help='Length of walk per source. Default is 80.')\n parser.add_argument('--num-walks', type=int, default=20, help='Number of walks per source. Default is 10.')\n parser.add_argument('--window-size', type=int, default=10, help='Context size for optimization. Default is 10.')\n parser.add_argument('--iter', default=2, type=int, help='Number of epochs in SGD')\n parser.add_argument('--workers', type=int, default=8, help='Number of parallel workers. Default is 8.')\n parser.add_argument('--p', type=float, default=1, help='Return hyperparameter. Default is 1.')\n parser.add_argument('--q', type=float, default=1, help='Inout hyperparameter. Default is 1.')\n parser.add_argument('--weighted', dest='weighted', action='store_true', help='Boolean specifying (un)weighted. Default is unweighted.')\n parser.add_argument('--unweighted', dest='unweighted', action='store_false')\n parser.set_defaults(weighted=False)\n parser.add_argument('--directed', dest='directed', action='store_true', help='Graph is (un)directed. Default is undirected.')\n parser.add_argument('--undirected', dest='undirected', action='store_false')\n parser.set_defaults(directed=False)\n return parser.parse_args()\n\ndef read_graph():\n if args.weighted:\n G = nx.read_edgelist(args.input, nodetype=int, data=(('weight', float), ), create_using=nx.DiGraph)\n else:\n G = nx.read_edgelist(args.input, nodetype=int, create_using=nx.DiGraph())\n for edge in G.edges():\n G[edge[0]][edge[1]]['weight'] = 1\n\n if not args.directed:\n G = G.to_undirected()\n\n return G\n\ndef learning_walks(walks):\n walks = [list(map(str, walk)) for walk in walks]\n model = Word2Vec(walks, size=args.dimensions, window=args.window_size, min_count=0, sg=1, workers=args.workers, iter=args.iter)\n model.wv.save_word2vec_format(args.output)\n return model\n\ndef main(args):\n nx_G = read_graph()\n G = node2vec_walk(nx_G, args.directed, args.p, args.q)\n G.preprocess_transition_probs()\n walks = G.simulate_walks(args.num_walks, args.walk_length)\n model = learning_walks(walks)\n\n _embeddings = {}\n for v in nx_G.nodes():\n _embeddings[str(v)] = model.wv[str(v)]\n\n plot_embeddings(_embeddings, args.label_file)\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args)\n\n\n",
"step-ids": [
7,
8,
12,
13,
15
]
}
|
[
7,
8,
12,
13,
15
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [path('IncomeHome/', views.IncomeHome, name='IncomeHome'),
path('IncomeCreate/', views.IncomeCreate.as_view(), name='IncomeCreate'
), path('IncomeUpdate/<int:pk>', views.IncomeUpdate.as_view(), name=
'IncomeUpdate'), path('IncomeDelete/<int:pk>', views.IncomeDelete.
as_view(), name='IncomeDelete'), path('Income/', views.IncomeView.
as_view(), name='Income')]
<|reserved_special_token_1|>
from django.urls import path, include
from Income import views
urlpatterns = [path('IncomeHome/', views.IncomeHome, name='IncomeHome'),
path('IncomeCreate/', views.IncomeCreate.as_view(), name='IncomeCreate'
), path('IncomeUpdate/<int:pk>', views.IncomeUpdate.as_view(), name=
'IncomeUpdate'), path('IncomeDelete/<int:pk>', views.IncomeDelete.
as_view(), name='IncomeDelete'), path('Income/', views.IncomeView.
as_view(), name='Income')]
<|reserved_special_token_1|>
from django.urls import path,include
from Income import views
urlpatterns = [
path('IncomeHome/',views.IncomeHome,name='IncomeHome'),
path('IncomeCreate/',views.IncomeCreate.as_view(),name='IncomeCreate'),
path('IncomeUpdate/<int:pk>',views.IncomeUpdate.as_view(),name='IncomeUpdate'),
path('IncomeDelete/<int:pk>',views.IncomeDelete.as_view(),name='IncomeDelete'),
path('Income/',views.IncomeView.as_view(),name='Income'),
]
|
flexible
|
{
"blob_id": "ad3a7221883a847fc9d26097c3801973cbbda38e",
"index": 355,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('IncomeHome/', views.IncomeHome, name='IncomeHome'),\n path('IncomeCreate/', views.IncomeCreate.as_view(), name='IncomeCreate'\n ), path('IncomeUpdate/<int:pk>', views.IncomeUpdate.as_view(), name=\n 'IncomeUpdate'), path('IncomeDelete/<int:pk>', views.IncomeDelete.\n as_view(), name='IncomeDelete'), path('Income/', views.IncomeView.\n as_view(), name='Income')]\n",
"step-3": "from django.urls import path, include\nfrom Income import views\nurlpatterns = [path('IncomeHome/', views.IncomeHome, name='IncomeHome'),\n path('IncomeCreate/', views.IncomeCreate.as_view(), name='IncomeCreate'\n ), path('IncomeUpdate/<int:pk>', views.IncomeUpdate.as_view(), name=\n 'IncomeUpdate'), path('IncomeDelete/<int:pk>', views.IncomeDelete.\n as_view(), name='IncomeDelete'), path('Income/', views.IncomeView.\n as_view(), name='Income')]\n",
"step-4": "\nfrom django.urls import path,include\n\nfrom Income import views\n\nurlpatterns = [\n path('IncomeHome/',views.IncomeHome,name='IncomeHome'),\n path('IncomeCreate/',views.IncomeCreate.as_view(),name='IncomeCreate'),\n path('IncomeUpdate/<int:pk>',views.IncomeUpdate.as_view(),name='IncomeUpdate'),\n path('IncomeDelete/<int:pk>',views.IncomeDelete.as_view(),name='IncomeDelete'),\n path('Income/',views.IncomeView.as_view(),name='Income'),\n\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
providers = {
'provider-1': {
'name': 'provider-1',
'roles': ['licensor', 'producer'],
'description': 'This is a full description of the provider',
'url': 'https://www.provider.com'
},
'provider-2': {
'name': 'provider-2',
'roles': ['licensor'],
'description': 'This is a full description of a second provider',
'url': 'https://www.provider.com/provider-2'
},
'provider-3': {
'name': 'provider-3',
}
}
providers_invalid = {
'provider-invalid': {
'name': 'provider invalid ', 'roles': ['Test'], 'url': 'This is not an url'
},
}
links = {
'link-1': {
'rel': 'describedBy',
'href': 'https://www.example.com/described-by',
'title': 'This is an extra link',
'link_type': 'description'
}
}
links_invalid = {
'link-invalid': {
'rel': 'invalid relation',
'href': 'not a url',
}
}
collections = {
'collection-1': {
'name': 'collection-1',
'description': 'This a collection description',
'title': 'My collection 1',
'license': 'proprietary',
'providers': providers.values(),
'links': links.values()
},
'collection-2': {
'name': 'collection-2',
'description': 'This a second open source collection description',
'title': 'My collection 2',
'license': 'MIT',
'providers': [providers['provider-2']]
},
'collection-3': {
'name': 'collection-3',
'description': 'This a third open source collection description',
'title': 'My collection 3',
'license': 'MIT',
'links': [links['link-1']]
},
'collection-4': {
'name': 'collection-3',
'description': 'This a fourth open source collection description',
'title': 'My collection 4',
'license': 'MIT'
},
'collection-invalid': {
'name': 'collection invalid name',
'description': 45,
'title': 34,
'license': ['proprietary'],
},
'collection-missing-mandatory-fields': {
'name': 'collection-missing-mandatory-fields'
},
'collection-invalid-links': {
'name': 'collection-invalid-link',
'description': 'This is a collection with invalid user link',
'license': 'proprietary',
'links': [links_invalid['link-invalid']]
},
'collection-invalid-providers': {
'name': 'collection-invalid-provider',
'description': 'This is a collection with invalid provider',
'license': 'proprietary',
'providers': providers_invalid.values()
},
}
|
normal
|
{
"blob_id": "7801676df91a7ded6f123113acc62f3955dfe6cb",
"index": 7113,
"step-1": "<mask token>\n",
"step-2": "providers = {'provider-1': {'name': 'provider-1', 'roles': ['licensor',\n 'producer'], 'description':\n 'This is a full description of the provider', 'url':\n 'https://www.provider.com'}, 'provider-2': {'name': 'provider-2',\n 'roles': ['licensor'], 'description':\n 'This is a full description of a second provider', 'url':\n 'https://www.provider.com/provider-2'}, 'provider-3': {'name':\n 'provider-3'}}\nproviders_invalid = {'provider-invalid': {'name': 'provider invalid ',\n 'roles': ['Test'], 'url': 'This is not an url'}}\nlinks = {'link-1': {'rel': 'describedBy', 'href':\n 'https://www.example.com/described-by', 'title':\n 'This is an extra link', 'link_type': 'description'}}\nlinks_invalid = {'link-invalid': {'rel': 'invalid relation', 'href':\n 'not a url'}}\ncollections = {'collection-1': {'name': 'collection-1', 'description':\n 'This a collection description', 'title': 'My collection 1', 'license':\n 'proprietary', 'providers': providers.values(), 'links': links.values()\n }, 'collection-2': {'name': 'collection-2', 'description':\n 'This a second open source collection description', 'title':\n 'My collection 2', 'license': 'MIT', 'providers': [providers[\n 'provider-2']]}, 'collection-3': {'name': 'collection-3', 'description':\n 'This a third open source collection description', 'title':\n 'My collection 3', 'license': 'MIT', 'links': [links['link-1']]},\n 'collection-4': {'name': 'collection-3', 'description':\n 'This a fourth open source collection description', 'title':\n 'My collection 4', 'license': 'MIT'}, 'collection-invalid': {'name':\n 'collection invalid name', 'description': 45, 'title': 34, 'license': [\n 'proprietary']}, 'collection-missing-mandatory-fields': {'name':\n 'collection-missing-mandatory-fields'}, 'collection-invalid-links': {\n 'name': 'collection-invalid-link', 'description':\n 'This is a collection with invalid user link', 'license': 'proprietary',\n 'links': [links_invalid['link-invalid']]},\n 'collection-invalid-providers': {'name': 'collection-invalid-provider',\n 'description': 'This is a collection with invalid provider', 'license':\n 'proprietary', 'providers': providers_invalid.values()}}\n",
"step-3": "providers = {\n 'provider-1': {\n 'name': 'provider-1',\n 'roles': ['licensor', 'producer'],\n 'description': 'This is a full description of the provider',\n 'url': 'https://www.provider.com'\n },\n 'provider-2': {\n 'name': 'provider-2',\n 'roles': ['licensor'],\n 'description': 'This is a full description of a second provider',\n 'url': 'https://www.provider.com/provider-2'\n },\n 'provider-3': {\n 'name': 'provider-3',\n }\n}\n\nproviders_invalid = {\n 'provider-invalid': {\n 'name': 'provider invalid ', 'roles': ['Test'], 'url': 'This is not an url'\n },\n}\n\nlinks = {\n 'link-1': {\n 'rel': 'describedBy',\n 'href': 'https://www.example.com/described-by',\n 'title': 'This is an extra link',\n 'link_type': 'description'\n }\n}\n\nlinks_invalid = {\n 'link-invalid': {\n 'rel': 'invalid relation',\n 'href': 'not a url',\n }\n}\n\ncollections = {\n 'collection-1': {\n 'name': 'collection-1',\n 'description': 'This a collection description',\n 'title': 'My collection 1',\n 'license': 'proprietary',\n 'providers': providers.values(),\n 'links': links.values()\n },\n 'collection-2': {\n 'name': 'collection-2',\n 'description': 'This a second open source collection description',\n 'title': 'My collection 2',\n 'license': 'MIT',\n 'providers': [providers['provider-2']]\n },\n 'collection-3': {\n 'name': 'collection-3',\n 'description': 'This a third open source collection description',\n 'title': 'My collection 3',\n 'license': 'MIT',\n 'links': [links['link-1']]\n },\n 'collection-4': {\n 'name': 'collection-3',\n 'description': 'This a fourth open source collection description',\n 'title': 'My collection 4',\n 'license': 'MIT'\n },\n 'collection-invalid': {\n 'name': 'collection invalid name',\n 'description': 45,\n 'title': 34,\n 'license': ['proprietary'],\n },\n 'collection-missing-mandatory-fields': {\n 'name': 'collection-missing-mandatory-fields'\n },\n 'collection-invalid-links': {\n 'name': 'collection-invalid-link',\n 'description': 'This is a collection with invalid user link',\n 'license': 'proprietary',\n 'links': [links_invalid['link-invalid']]\n },\n 'collection-invalid-providers': {\n 'name': 'collection-invalid-provider',\n 'description': 'This is a collection with invalid provider',\n 'license': 'proprietary',\n 'providers': providers_invalid.values()\n },\n}\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# encoding=utf-8
######
# 遗传算法应用于旅行商问题(TSP)
# Python 3.6
# https://morvanzhou.github.io/tutorials/machine-learning/evolutionary-algorithm/2-03-genetic-algorithm-travel-sales-problem/
######
|
normal
|
{
"blob_id": "e79e4eb1640d5ad6e360dfb18430fbf261cf9d3b",
"index": 6675,
"step-1": "# encoding=utf-8\n\n######\n# 遗传算法应用于旅行商问题(TSP)\n# Python 3.6\n# https://morvanzhou.github.io/tutorials/machine-learning/evolutionary-algorithm/2-03-genetic-algorithm-travel-sales-problem/\n######\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
#!/usr/bin/env python3
import sys
class Parse:
data = []
def __parseLine(line):
"""Parse the given line"""
# extract name
name_len = line.index(" ")
name = line[:name_len]
line = line[name_len + 3:]
# array-ize 'electron' val
elec_pos = line.index("electron") + 9
line = line[:elec_pos] + '[' + line[elec_pos:].replace(' ', ',') + ']'
# quote 'small' val
line = line.replace(' ', '')
line = line.replace('small:', 'small:"').replace(',molar', '",molar')
# quote all keys
for i in ["position", "number", "small", "molar", "electron"]:
line = line.replace(i, '"' + i + '"')
return eval('{"name":"' + name + '",' + line + '}')
def parseFile(filename):
"""Parse the given file"""
Parse.data = []
with open(filename, "r") as f:
for line in f:
Parse.data += [Parse.__parseLine(line)]
return Parse.data
class Write:
def __writeHeader(fd):
"""Write html header"""
print(
"<!DOCTYPE html>",
"<html>",
" <head>",
" <title>Super Tableau 3000</title>",
" <meta charset='utf-8' />",
" <style>", # ty alex for css!
" table { border-collapse: collapse; }",
" td { border: solid; }",
" h4, li { font-size:10px; }",
" .empty { border: 0px; }",
" </style>",
" </head>",
" <body>",
" <table>",
sep="\n",
file=fd
)
def __writeFooter(fd):
"""Write html footer"""
print(
" </table>",
" </body>",
"</html>",
sep="\n",
file=fd
)
def __openRow(fd):
"""Write opening html table row"""
print(" <tr>", file=fd)
def __closeRow(fd):
"""Write closing html table row"""
print(" </tr>", file=fd)
def __writeElement(fd, elm):
"""Write html table cell"""
print(
" <td>",
" <h4>" + elm["name"] + "</h4>",
" <ul>",
" <li>" + str(elm["number"]) + "</li>",
" <li>" + elm["small"] + "</li>",
" <li>" + str(elm["molar"]) + "</li>",
" </ul>",
" </td>",
sep="\n",
file=fd
)
def __writeEmptyElement(fd):
"""Write html empty table cell"""
print(" <td class='empty'></td>", file=fd)
def writeFile(filename):
"""Write our awesome html file"""
with open(filename, "w") as f:
Write.__writeHeader(f)
Write.__openRow(f)
i = 0
for elm in Parse.data:
while i != elm["position"]:
Write.__writeEmptyElement(f)
i += 1
Write.__writeElement(f, elm)
i += 1
if elm["position"] == 17:
i = 0
Write.__closeRow(f)
if elm["number"] != 118:
Write.__openRow(f)
Write.__writeFooter(f)
def doTheJob(input_file):
"""Do all we need"""
Parse.parseFile(input_file)
Write.writeFile(input_file.replace(".txt", ".html"))
if __name__ == '__main__':
if len(sys.argv) == 2:
doTheJob(sys.argv[1])
else:
doTheJob("./ex07/periodic_table.txt")
|
normal
|
{
"blob_id": "cb77696a90716acdee83a1cf6162a8f42c524e11",
"index": 7612,
"step-1": "<mask token>\n\n\nclass Write:\n\n def __writeHeader(fd):\n \"\"\"Write html header\"\"\"\n print('<!DOCTYPE html>', '<html>', ' <head>',\n ' <title>Super Tableau 3000</title>',\n \" <meta charset='utf-8' />\", ' <style>',\n ' table { border-collapse: collapse; }',\n ' td { border: solid; }', ' h4, li { font-size:10px; }',\n ' .empty { border: 0px; }', ' </style>', ' </head>',\n ' <body>', ' <table>', sep='\\n', file=fd)\n\n def __writeFooter(fd):\n \"\"\"Write html footer\"\"\"\n print(' </table>', ' </body>', '</html>', sep='\\n', file=fd)\n\n def __openRow(fd):\n \"\"\"Write opening html table row\"\"\"\n print(' <tr>', file=fd)\n\n def __closeRow(fd):\n \"\"\"Write closing html table row\"\"\"\n print(' </tr>', file=fd)\n\n def __writeElement(fd, elm):\n \"\"\"Write html table cell\"\"\"\n print(' <td>', ' <h4>' + elm['name'] + '</h4>', ' <ul>',\n ' <li>' + str(elm['number']) + '</li>', ' <li>' + elm\n ['small'] + '</li>', ' <li>' + str(elm['molar']) + '</li>',\n ' </ul>', ' </td>', sep='\\n', file=fd)\n\n def __writeEmptyElement(fd):\n \"\"\"Write html empty table cell\"\"\"\n print(\" <td class='empty'></td>\", file=fd)\n\n def writeFile(filename):\n \"\"\"Write our awesome html file\"\"\"\n with open(filename, 'w') as f:\n Write.__writeHeader(f)\n Write.__openRow(f)\n i = 0\n for elm in Parse.data:\n while i != elm['position']:\n Write.__writeEmptyElement(f)\n i += 1\n Write.__writeElement(f, elm)\n i += 1\n if elm['position'] == 17:\n i = 0\n Write.__closeRow(f)\n if elm['number'] != 118:\n Write.__openRow(f)\n Write.__writeFooter(f)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Parse:\n <mask token>\n\n def __parseLine(line):\n \"\"\"Parse the given line\"\"\"\n name_len = line.index(' ')\n name = line[:name_len]\n line = line[name_len + 3:]\n elec_pos = line.index('electron') + 9\n line = line[:elec_pos] + '[' + line[elec_pos:].replace(' ', ',') + ']'\n line = line.replace(' ', '')\n line = line.replace('small:', 'small:\"').replace(',molar', '\",molar')\n for i in ['position', 'number', 'small', 'molar', 'electron']:\n line = line.replace(i, '\"' + i + '\"')\n return eval('{\"name\":\"' + name + '\",' + line + '}')\n\n def parseFile(filename):\n \"\"\"Parse the given file\"\"\"\n Parse.data = []\n with open(filename, 'r') as f:\n for line in f:\n Parse.data += [Parse.__parseLine(line)]\n return Parse.data\n\n\nclass Write:\n\n def __writeHeader(fd):\n \"\"\"Write html header\"\"\"\n print('<!DOCTYPE html>', '<html>', ' <head>',\n ' <title>Super Tableau 3000</title>',\n \" <meta charset='utf-8' />\", ' <style>',\n ' table { border-collapse: collapse; }',\n ' td { border: solid; }', ' h4, li { font-size:10px; }',\n ' .empty { border: 0px; }', ' </style>', ' </head>',\n ' <body>', ' <table>', sep='\\n', file=fd)\n\n def __writeFooter(fd):\n \"\"\"Write html footer\"\"\"\n print(' </table>', ' </body>', '</html>', sep='\\n', file=fd)\n\n def __openRow(fd):\n \"\"\"Write opening html table row\"\"\"\n print(' <tr>', file=fd)\n\n def __closeRow(fd):\n \"\"\"Write closing html table row\"\"\"\n print(' </tr>', file=fd)\n\n def __writeElement(fd, elm):\n \"\"\"Write html table cell\"\"\"\n print(' <td>', ' <h4>' + elm['name'] + '</h4>', ' <ul>',\n ' <li>' + str(elm['number']) + '</li>', ' <li>' + elm\n ['small'] + '</li>', ' <li>' + str(elm['molar']) + '</li>',\n ' </ul>', ' </td>', sep='\\n', file=fd)\n\n def __writeEmptyElement(fd):\n \"\"\"Write html empty table cell\"\"\"\n print(\" <td class='empty'></td>\", file=fd)\n\n def writeFile(filename):\n \"\"\"Write our awesome html file\"\"\"\n with open(filename, 'w') as f:\n Write.__writeHeader(f)\n Write.__openRow(f)\n i = 0\n for elm in Parse.data:\n while i != elm['position']:\n Write.__writeEmptyElement(f)\n i += 1\n Write.__writeElement(f, elm)\n i += 1\n if elm['position'] == 17:\n i = 0\n Write.__closeRow(f)\n if elm['number'] != 118:\n Write.__openRow(f)\n Write.__writeFooter(f)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Parse:\n data = []\n\n def __parseLine(line):\n \"\"\"Parse the given line\"\"\"\n name_len = line.index(' ')\n name = line[:name_len]\n line = line[name_len + 3:]\n elec_pos = line.index('electron') + 9\n line = line[:elec_pos] + '[' + line[elec_pos:].replace(' ', ',') + ']'\n line = line.replace(' ', '')\n line = line.replace('small:', 'small:\"').replace(',molar', '\",molar')\n for i in ['position', 'number', 'small', 'molar', 'electron']:\n line = line.replace(i, '\"' + i + '\"')\n return eval('{\"name\":\"' + name + '\",' + line + '}')\n\n def parseFile(filename):\n \"\"\"Parse the given file\"\"\"\n Parse.data = []\n with open(filename, 'r') as f:\n for line in f:\n Parse.data += [Parse.__parseLine(line)]\n return Parse.data\n\n\nclass Write:\n\n def __writeHeader(fd):\n \"\"\"Write html header\"\"\"\n print('<!DOCTYPE html>', '<html>', ' <head>',\n ' <title>Super Tableau 3000</title>',\n \" <meta charset='utf-8' />\", ' <style>',\n ' table { border-collapse: collapse; }',\n ' td { border: solid; }', ' h4, li { font-size:10px; }',\n ' .empty { border: 0px; }', ' </style>', ' </head>',\n ' <body>', ' <table>', sep='\\n', file=fd)\n\n def __writeFooter(fd):\n \"\"\"Write html footer\"\"\"\n print(' </table>', ' </body>', '</html>', sep='\\n', file=fd)\n\n def __openRow(fd):\n \"\"\"Write opening html table row\"\"\"\n print(' <tr>', file=fd)\n\n def __closeRow(fd):\n \"\"\"Write closing html table row\"\"\"\n print(' </tr>', file=fd)\n\n def __writeElement(fd, elm):\n \"\"\"Write html table cell\"\"\"\n print(' <td>', ' <h4>' + elm['name'] + '</h4>', ' <ul>',\n ' <li>' + str(elm['number']) + '</li>', ' <li>' + elm\n ['small'] + '</li>', ' <li>' + str(elm['molar']) + '</li>',\n ' </ul>', ' </td>', sep='\\n', file=fd)\n\n def __writeEmptyElement(fd):\n \"\"\"Write html empty table cell\"\"\"\n print(\" <td class='empty'></td>\", file=fd)\n\n def writeFile(filename):\n \"\"\"Write our awesome html file\"\"\"\n with open(filename, 'w') as f:\n Write.__writeHeader(f)\n Write.__openRow(f)\n i = 0\n for elm in Parse.data:\n while i != elm['position']:\n Write.__writeEmptyElement(f)\n i += 1\n Write.__writeElement(f, elm)\n i += 1\n if elm['position'] == 17:\n i = 0\n Write.__closeRow(f)\n if elm['number'] != 118:\n Write.__openRow(f)\n Write.__writeFooter(f)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Parse:\n data = []\n\n def __parseLine(line):\n \"\"\"Parse the given line\"\"\"\n name_len = line.index(' ')\n name = line[:name_len]\n line = line[name_len + 3:]\n elec_pos = line.index('electron') + 9\n line = line[:elec_pos] + '[' + line[elec_pos:].replace(' ', ',') + ']'\n line = line.replace(' ', '')\n line = line.replace('small:', 'small:\"').replace(',molar', '\",molar')\n for i in ['position', 'number', 'small', 'molar', 'electron']:\n line = line.replace(i, '\"' + i + '\"')\n return eval('{\"name\":\"' + name + '\",' + line + '}')\n\n def parseFile(filename):\n \"\"\"Parse the given file\"\"\"\n Parse.data = []\n with open(filename, 'r') as f:\n for line in f:\n Parse.data += [Parse.__parseLine(line)]\n return Parse.data\n\n\nclass Write:\n\n def __writeHeader(fd):\n \"\"\"Write html header\"\"\"\n print('<!DOCTYPE html>', '<html>', ' <head>',\n ' <title>Super Tableau 3000</title>',\n \" <meta charset='utf-8' />\", ' <style>',\n ' table { border-collapse: collapse; }',\n ' td { border: solid; }', ' h4, li { font-size:10px; }',\n ' .empty { border: 0px; }', ' </style>', ' </head>',\n ' <body>', ' <table>', sep='\\n', file=fd)\n\n def __writeFooter(fd):\n \"\"\"Write html footer\"\"\"\n print(' </table>', ' </body>', '</html>', sep='\\n', file=fd)\n\n def __openRow(fd):\n \"\"\"Write opening html table row\"\"\"\n print(' <tr>', file=fd)\n\n def __closeRow(fd):\n \"\"\"Write closing html table row\"\"\"\n print(' </tr>', file=fd)\n\n def __writeElement(fd, elm):\n \"\"\"Write html table cell\"\"\"\n print(' <td>', ' <h4>' + elm['name'] + '</h4>', ' <ul>',\n ' <li>' + str(elm['number']) + '</li>', ' <li>' + elm\n ['small'] + '</li>', ' <li>' + str(elm['molar']) + '</li>',\n ' </ul>', ' </td>', sep='\\n', file=fd)\n\n def __writeEmptyElement(fd):\n \"\"\"Write html empty table cell\"\"\"\n print(\" <td class='empty'></td>\", file=fd)\n\n def writeFile(filename):\n \"\"\"Write our awesome html file\"\"\"\n with open(filename, 'w') as f:\n Write.__writeHeader(f)\n Write.__openRow(f)\n i = 0\n for elm in Parse.data:\n while i != elm['position']:\n Write.__writeEmptyElement(f)\n i += 1\n Write.__writeElement(f, elm)\n i += 1\n if elm['position'] == 17:\n i = 0\n Write.__closeRow(f)\n if elm['number'] != 118:\n Write.__openRow(f)\n Write.__writeFooter(f)\n\n\ndef doTheJob(input_file):\n \"\"\"Do all we need\"\"\"\n Parse.parseFile(input_file)\n Write.writeFile(input_file.replace('.txt', '.html'))\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n\nimport sys\n\n\nclass Parse:\n data = []\n\n def __parseLine(line):\n \"\"\"Parse the given line\"\"\"\n\n # extract name\n name_len = line.index(\" \")\n name = line[:name_len]\n line = line[name_len + 3:]\n\n # array-ize 'electron' val\n elec_pos = line.index(\"electron\") + 9\n line = line[:elec_pos] + '[' + line[elec_pos:].replace(' ', ',') + ']'\n\n # quote 'small' val\n line = line.replace(' ', '')\n line = line.replace('small:', 'small:\"').replace(',molar', '\",molar')\n\n # quote all keys\n for i in [\"position\", \"number\", \"small\", \"molar\", \"electron\"]:\n line = line.replace(i, '\"' + i + '\"')\n\n return eval('{\"name\":\"' + name + '\",' + line + '}')\n\n def parseFile(filename):\n \"\"\"Parse the given file\"\"\"\n\n Parse.data = []\n with open(filename, \"r\") as f:\n for line in f:\n Parse.data += [Parse.__parseLine(line)]\n return Parse.data\n\n\nclass Write:\n def __writeHeader(fd):\n \"\"\"Write html header\"\"\"\n\n print(\n \"<!DOCTYPE html>\",\n \"<html>\",\n \" <head>\",\n \" <title>Super Tableau 3000</title>\",\n \" <meta charset='utf-8' />\",\n \" <style>\", # ty alex for css!\n \" table { border-collapse: collapse; }\",\n \" td { border: solid; }\",\n \" h4, li { font-size:10px; }\",\n \" .empty { border: 0px; }\",\n \" </style>\",\n \" </head>\",\n \" <body>\",\n \" <table>\",\n sep=\"\\n\",\n file=fd\n )\n\n def __writeFooter(fd):\n \"\"\"Write html footer\"\"\"\n\n print(\n \" </table>\",\n \" </body>\",\n \"</html>\",\n sep=\"\\n\",\n file=fd\n )\n\n def __openRow(fd):\n \"\"\"Write opening html table row\"\"\"\n\n print(\" <tr>\", file=fd)\n\n def __closeRow(fd):\n \"\"\"Write closing html table row\"\"\"\n\n print(\" </tr>\", file=fd)\n\n def __writeElement(fd, elm):\n \"\"\"Write html table cell\"\"\"\n\n print(\n \" <td>\",\n \" <h4>\" + elm[\"name\"] + \"</h4>\",\n \" <ul>\",\n \" <li>\" + str(elm[\"number\"]) + \"</li>\",\n \" <li>\" + elm[\"small\"] + \"</li>\",\n \" <li>\" + str(elm[\"molar\"]) + \"</li>\",\n \" </ul>\",\n \" </td>\",\n sep=\"\\n\",\n file=fd\n )\n\n def __writeEmptyElement(fd):\n \"\"\"Write html empty table cell\"\"\"\n\n print(\" <td class='empty'></td>\", file=fd)\n\n def writeFile(filename):\n \"\"\"Write our awesome html file\"\"\"\n\n with open(filename, \"w\") as f:\n Write.__writeHeader(f)\n\n Write.__openRow(f)\n i = 0\n for elm in Parse.data:\n while i != elm[\"position\"]:\n Write.__writeEmptyElement(f)\n i += 1\n\n Write.__writeElement(f, elm)\n i += 1\n\n if elm[\"position\"] == 17:\n i = 0\n Write.__closeRow(f)\n if elm[\"number\"] != 118:\n Write.__openRow(f)\n\n Write.__writeFooter(f)\n\n\ndef doTheJob(input_file):\n \"\"\"Do all we need\"\"\"\n\n Parse.parseFile(input_file)\n Write.writeFile(input_file.replace(\".txt\", \".html\"))\n\n\nif __name__ == '__main__':\n if len(sys.argv) == 2:\n doTheJob(sys.argv[1])\n else:\n doTheJob(\"./ex07/periodic_table.txt\")\n",
"step-ids": [
8,
11,
12,
13,
16
]
}
|
[
8,
11,
12,
13,
16
] |
from database import db
from database import ma
from datetime import datetime
from sqlalchemy import ForeignKeyConstraint
from models.admin import Admin, admin_limited_schema
from models.event_status import EventStatus, event_status_schema
from models.org_unit import org_unit_schema
class Event(db.Model):
# class corresponding to the event table in the database
__tablename__ = 'event'
__table_args__ = (
ForeignKeyConstraint(['status_id'], ['event_status.id']),
ForeignKeyConstraint(['org_id'], ['org_unit.id']),
ForeignKeyConstraint(['created_by'], ['admin.id']),
)
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(45), nullable=False)
venue = db.Column(db.String(45), nullable=False)
location_lat = db.Column(db.Float, nullable=False)
location_long = db.Column(db.Float, nullable=False)
date_created = db.Column(db.DateTime, nullable=False, default=datetime.now)
start_time = db.Column(db.DateTime, nullable=False)
duration = db.Column(db.Float, nullable=False)
coordinator_name = db.Column(db.String(45), nullable=False)
coordinator_contact = db.Column(db.Integer, nullable=False)
status_id = db.Column(db.Integer, nullable=False)
org_id = db.Column(db.Integer, nullable=False)
created_by = db.Column(db.Integer, nullable=False)
description = db.Column(db.String(500), nullable=False)
def __init__(self, name, venue, location_lat, location_long, start_time, duration, coordinator_name, coordinator_contact, status_id, org_id, created_by, description):
self.name = name
self.venue = venue
self.location_lat = location_lat
self.location_long = location_long
self.start_time = start_time
self.duration = duration
self.coordinator_name = coordinator_name
self.coordinator_contact = coordinator_contact
self.status_id = status_id
self.org_id = org_id
self.created_by = created_by
self.description = description
class EventSchema(ma.Schema):
class Meta:
fields = ('id', 'name', 'venue', 'location_lat', 'location_long', 'date_created', 'start_time',
'duration', 'coordinator_name', 'coordinator_contact', 'status_id', 'org_id', 'description')
# init schema
event_schema = EventSchema()
events_schema = EventSchema(many=True)
class EventFullInfoSchema(ma.Schema):
event = ma.Nested(event_schema)
admin = ma.Nested(admin_limited_schema)
status = ma.Nested(event_status_schema)
org_unit = ma.Nested(org_unit_schema)
event_with_full_schema = EventFullInfoSchema()
events_with_full_schema = EventFullInfoSchema(many=True)
|
normal
|
{
"blob_id": "f3167d8f1a806c38fb10672605d8e94265d2fc9c",
"index": 723,
"step-1": "<mask token>\n\n\nclass Event(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass EventSchema(ma.Schema):\n\n\n class Meta:\n fields = ('id', 'name', 'venue', 'location_lat', 'location_long',\n 'date_created', 'start_time', 'duration', 'coordinator_name',\n 'coordinator_contact', 'status_id', 'org_id', 'description')\n\n\n<mask token>\n\n\nclass EventFullInfoSchema(ma.Schema):\n event = ma.Nested(event_schema)\n admin = ma.Nested(admin_limited_schema)\n status = ma.Nested(event_status_schema)\n org_unit = ma.Nested(org_unit_schema)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Event(db.Model):\n __tablename__ = 'event'\n __table_args__ = ForeignKeyConstraint(['status_id'], ['event_status.id']\n ), ForeignKeyConstraint(['org_id'], ['org_unit.id']\n ), ForeignKeyConstraint(['created_by'], ['admin.id'])\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(45), nullable=False)\n venue = db.Column(db.String(45), nullable=False)\n location_lat = db.Column(db.Float, nullable=False)\n location_long = db.Column(db.Float, nullable=False)\n date_created = db.Column(db.DateTime, nullable=False, default=datetime.now)\n start_time = db.Column(db.DateTime, nullable=False)\n duration = db.Column(db.Float, nullable=False)\n coordinator_name = db.Column(db.String(45), nullable=False)\n coordinator_contact = db.Column(db.Integer, nullable=False)\n status_id = db.Column(db.Integer, nullable=False)\n org_id = db.Column(db.Integer, nullable=False)\n created_by = db.Column(db.Integer, nullable=False)\n description = db.Column(db.String(500), nullable=False)\n\n def __init__(self, name, venue, location_lat, location_long, start_time,\n duration, coordinator_name, coordinator_contact, status_id, org_id,\n created_by, description):\n self.name = name\n self.venue = venue\n self.location_lat = location_lat\n self.location_long = location_long\n self.start_time = start_time\n self.duration = duration\n self.coordinator_name = coordinator_name\n self.coordinator_contact = coordinator_contact\n self.status_id = status_id\n self.org_id = org_id\n self.created_by = created_by\n self.description = description\n\n\nclass EventSchema(ma.Schema):\n\n\n class Meta:\n fields = ('id', 'name', 'venue', 'location_lat', 'location_long',\n 'date_created', 'start_time', 'duration', 'coordinator_name',\n 'coordinator_contact', 'status_id', 'org_id', 'description')\n\n\n<mask token>\n\n\nclass EventFullInfoSchema(ma.Schema):\n event = ma.Nested(event_schema)\n admin = ma.Nested(admin_limited_schema)\n status = ma.Nested(event_status_schema)\n org_unit = ma.Nested(org_unit_schema)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Event(db.Model):\n __tablename__ = 'event'\n __table_args__ = ForeignKeyConstraint(['status_id'], ['event_status.id']\n ), ForeignKeyConstraint(['org_id'], ['org_unit.id']\n ), ForeignKeyConstraint(['created_by'], ['admin.id'])\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(45), nullable=False)\n venue = db.Column(db.String(45), nullable=False)\n location_lat = db.Column(db.Float, nullable=False)\n location_long = db.Column(db.Float, nullable=False)\n date_created = db.Column(db.DateTime, nullable=False, default=datetime.now)\n start_time = db.Column(db.DateTime, nullable=False)\n duration = db.Column(db.Float, nullable=False)\n coordinator_name = db.Column(db.String(45), nullable=False)\n coordinator_contact = db.Column(db.Integer, nullable=False)\n status_id = db.Column(db.Integer, nullable=False)\n org_id = db.Column(db.Integer, nullable=False)\n created_by = db.Column(db.Integer, nullable=False)\n description = db.Column(db.String(500), nullable=False)\n\n def __init__(self, name, venue, location_lat, location_long, start_time,\n duration, coordinator_name, coordinator_contact, status_id, org_id,\n created_by, description):\n self.name = name\n self.venue = venue\n self.location_lat = location_lat\n self.location_long = location_long\n self.start_time = start_time\n self.duration = duration\n self.coordinator_name = coordinator_name\n self.coordinator_contact = coordinator_contact\n self.status_id = status_id\n self.org_id = org_id\n self.created_by = created_by\n self.description = description\n\n\nclass EventSchema(ma.Schema):\n\n\n class Meta:\n fields = ('id', 'name', 'venue', 'location_lat', 'location_long',\n 'date_created', 'start_time', 'duration', 'coordinator_name',\n 'coordinator_contact', 'status_id', 'org_id', 'description')\n\n\nevent_schema = EventSchema()\nevents_schema = EventSchema(many=True)\n\n\nclass EventFullInfoSchema(ma.Schema):\n event = ma.Nested(event_schema)\n admin = ma.Nested(admin_limited_schema)\n status = ma.Nested(event_status_schema)\n org_unit = ma.Nested(org_unit_schema)\n\n\nevent_with_full_schema = EventFullInfoSchema()\nevents_with_full_schema = EventFullInfoSchema(many=True)\n",
"step-4": "from database import db\nfrom database import ma\nfrom datetime import datetime\nfrom sqlalchemy import ForeignKeyConstraint\nfrom models.admin import Admin, admin_limited_schema\nfrom models.event_status import EventStatus, event_status_schema\nfrom models.org_unit import org_unit_schema\n\n\nclass Event(db.Model):\n __tablename__ = 'event'\n __table_args__ = ForeignKeyConstraint(['status_id'], ['event_status.id']\n ), ForeignKeyConstraint(['org_id'], ['org_unit.id']\n ), ForeignKeyConstraint(['created_by'], ['admin.id'])\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(45), nullable=False)\n venue = db.Column(db.String(45), nullable=False)\n location_lat = db.Column(db.Float, nullable=False)\n location_long = db.Column(db.Float, nullable=False)\n date_created = db.Column(db.DateTime, nullable=False, default=datetime.now)\n start_time = db.Column(db.DateTime, nullable=False)\n duration = db.Column(db.Float, nullable=False)\n coordinator_name = db.Column(db.String(45), nullable=False)\n coordinator_contact = db.Column(db.Integer, nullable=False)\n status_id = db.Column(db.Integer, nullable=False)\n org_id = db.Column(db.Integer, nullable=False)\n created_by = db.Column(db.Integer, nullable=False)\n description = db.Column(db.String(500), nullable=False)\n\n def __init__(self, name, venue, location_lat, location_long, start_time,\n duration, coordinator_name, coordinator_contact, status_id, org_id,\n created_by, description):\n self.name = name\n self.venue = venue\n self.location_lat = location_lat\n self.location_long = location_long\n self.start_time = start_time\n self.duration = duration\n self.coordinator_name = coordinator_name\n self.coordinator_contact = coordinator_contact\n self.status_id = status_id\n self.org_id = org_id\n self.created_by = created_by\n self.description = description\n\n\nclass EventSchema(ma.Schema):\n\n\n class Meta:\n fields = ('id', 'name', 'venue', 'location_lat', 'location_long',\n 'date_created', 'start_time', 'duration', 'coordinator_name',\n 'coordinator_contact', 'status_id', 'org_id', 'description')\n\n\nevent_schema = EventSchema()\nevents_schema = EventSchema(many=True)\n\n\nclass EventFullInfoSchema(ma.Schema):\n event = ma.Nested(event_schema)\n admin = ma.Nested(admin_limited_schema)\n status = ma.Nested(event_status_schema)\n org_unit = ma.Nested(org_unit_schema)\n\n\nevent_with_full_schema = EventFullInfoSchema()\nevents_with_full_schema = EventFullInfoSchema(many=True)\n",
"step-5": "from database import db\nfrom database import ma\nfrom datetime import datetime\nfrom sqlalchemy import ForeignKeyConstraint\nfrom models.admin import Admin, admin_limited_schema\nfrom models.event_status import EventStatus, event_status_schema\nfrom models.org_unit import org_unit_schema\n\nclass Event(db.Model):\n # class corresponding to the event table in the database\n __tablename__ = 'event'\n __table_args__ = (\n ForeignKeyConstraint(['status_id'], ['event_status.id']),\n ForeignKeyConstraint(['org_id'], ['org_unit.id']),\n ForeignKeyConstraint(['created_by'], ['admin.id']),\n )\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(45), nullable=False)\n venue = db.Column(db.String(45), nullable=False)\n location_lat = db.Column(db.Float, nullable=False)\n location_long = db.Column(db.Float, nullable=False)\n date_created = db.Column(db.DateTime, nullable=False, default=datetime.now)\n start_time = db.Column(db.DateTime, nullable=False)\n duration = db.Column(db.Float, nullable=False)\n coordinator_name = db.Column(db.String(45), nullable=False)\n coordinator_contact = db.Column(db.Integer, nullable=False)\n status_id = db.Column(db.Integer, nullable=False)\n org_id = db.Column(db.Integer, nullable=False)\n created_by = db.Column(db.Integer, nullable=False)\n description = db.Column(db.String(500), nullable=False)\n\n def __init__(self, name, venue, location_lat, location_long, start_time, duration, coordinator_name, coordinator_contact, status_id, org_id, created_by, description):\n self.name = name\n self.venue = venue\n self.location_lat = location_lat\n self.location_long = location_long\n self.start_time = start_time\n self.duration = duration\n self.coordinator_name = coordinator_name\n self.coordinator_contact = coordinator_contact\n self.status_id = status_id\n self.org_id = org_id\n self.created_by = created_by\n self.description = description\n\n\nclass EventSchema(ma.Schema):\n class Meta:\n fields = ('id', 'name', 'venue', 'location_lat', 'location_long', 'date_created', 'start_time',\n 'duration', 'coordinator_name', 'coordinator_contact', 'status_id', 'org_id', 'description')\n\n\n# init schema\nevent_schema = EventSchema()\nevents_schema = EventSchema(many=True)\n\n\nclass EventFullInfoSchema(ma.Schema):\n event = ma.Nested(event_schema)\n admin = ma.Nested(admin_limited_schema)\n status = ma.Nested(event_status_schema)\n org_unit = ma.Nested(org_unit_schema)\n\n\n\nevent_with_full_schema = EventFullInfoSchema()\nevents_with_full_schema = EventFullInfoSchema(many=True)",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
#------------------------------------------------------------------------
#
# @Author : EV2 CHEVALLIER
#
# @Date : 16.09.20
# @Location : École Navale / Chaire de Cyberdéfense des systèmes navals
# @Project : Projet de Fin d'Études
# @Subject : # Real time detection of cyber anomalies upon a NMEA network by using machine learning methods
#
#------------------------------------------------------------------------
# @Title : Training
#------------------------------------------------------------------------
# @Description : # This programm get the training dataset, extract the interesting features ( mean and standard deviation of variations of latitude,
# longitude, heading and distance )
# and put it in a python dictionnary and save it in a binary file with the pickle module.
#------------------------------------------------------------------------
import traitement as tr
import pickle as pk
import model as md
def training(dict):
model={}
model["µ"]={}
model["sigma"]={}
for x in dict: # loop with speed
model["µ"][x]={}
model["sigma"][x]={}
for y in dict[x]: # loop with heading
model["µ"][x][y] = {}
model["sigma"][x][y] = {}
doc=tr.load(dict[x][y]) # open the json file
phi_l=doc[0]
g_l=doc[1] # get a list of phi,g,t
t_l=doc[2]
dphi_l=tr.delta(phi_l,t_l) # compute the differences
dg_l=tr.delta(g_l,t_l)
dheading_l=tr.delta(tr.heading(phi_l,g_l),t_l)
d_distance=tr.delta_distance(phi_l,g_l)
# we build a model with the statistical values of the features : variation of latitude, longitude, heading and distance
model["µ"][x][y]["phi"] = tr.parameters(dphi_l)["mean"]
model["µ"][x][y]["g"] = tr.parameters(dg_l)["mean"] # met à jour le modele
model["sigma"][x][y]["phi"] = tr.parameters(dphi_l)["standard_deviation"]
model["sigma"][x][y]["g"] = tr.parameters(g_l)["standard_deviation"]
model["µ"][x][y]["heading"] = tr.parameters(dheading_l)["mean"]
model["µ"][x][y]["distance"] = tr.parameters(d_distance)["mean"]
model["sigma"][x][y]["heading"] = tr.parameters(dheading_l)["standard_deviation"]
model["sigma"][x][y]["distance"] = tr.parameters(d_distance)["standard_deviation"]
with open('model.sauv','wb' ) as model_sauv_file:
pk.dump(model, model_sauv_file) # save the model in a binary file
return model
training(md.model())
|
normal
|
{
"blob_id": "6726c8f1b3ef9a0df74c25c1921203af3aaacb12",
"index": 8758,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef training(dict):\n model = {}\n model['µ'] = {}\n model['sigma'] = {}\n for x in dict:\n model['µ'][x] = {}\n model['sigma'][x] = {}\n for y in dict[x]:\n model['µ'][x][y] = {}\n model['sigma'][x][y] = {}\n doc = tr.load(dict[x][y])\n phi_l = doc[0]\n g_l = doc[1]\n t_l = doc[2]\n dphi_l = tr.delta(phi_l, t_l)\n dg_l = tr.delta(g_l, t_l)\n dheading_l = tr.delta(tr.heading(phi_l, g_l), t_l)\n d_distance = tr.delta_distance(phi_l, g_l)\n model['µ'][x][y]['phi'] = tr.parameters(dphi_l)['mean']\n model['µ'][x][y]['g'] = tr.parameters(dg_l)['mean']\n model['sigma'][x][y]['phi'] = tr.parameters(dphi_l)[\n 'standard_deviation']\n model['sigma'][x][y]['g'] = tr.parameters(g_l)['standard_deviation'\n ]\n model['µ'][x][y]['heading'] = tr.parameters(dheading_l)['mean']\n model['µ'][x][y]['distance'] = tr.parameters(d_distance)['mean']\n model['sigma'][x][y]['heading'] = tr.parameters(dheading_l)[\n 'standard_deviation']\n model['sigma'][x][y]['distance'] = tr.parameters(d_distance)[\n 'standard_deviation']\n with open('model.sauv', 'wb') as model_sauv_file:\n pk.dump(model, model_sauv_file)\n return model\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef training(dict):\n model = {}\n model['µ'] = {}\n model['sigma'] = {}\n for x in dict:\n model['µ'][x] = {}\n model['sigma'][x] = {}\n for y in dict[x]:\n model['µ'][x][y] = {}\n model['sigma'][x][y] = {}\n doc = tr.load(dict[x][y])\n phi_l = doc[0]\n g_l = doc[1]\n t_l = doc[2]\n dphi_l = tr.delta(phi_l, t_l)\n dg_l = tr.delta(g_l, t_l)\n dheading_l = tr.delta(tr.heading(phi_l, g_l), t_l)\n d_distance = tr.delta_distance(phi_l, g_l)\n model['µ'][x][y]['phi'] = tr.parameters(dphi_l)['mean']\n model['µ'][x][y]['g'] = tr.parameters(dg_l)['mean']\n model['sigma'][x][y]['phi'] = tr.parameters(dphi_l)[\n 'standard_deviation']\n model['sigma'][x][y]['g'] = tr.parameters(g_l)['standard_deviation'\n ]\n model['µ'][x][y]['heading'] = tr.parameters(dheading_l)['mean']\n model['µ'][x][y]['distance'] = tr.parameters(d_distance)['mean']\n model['sigma'][x][y]['heading'] = tr.parameters(dheading_l)[\n 'standard_deviation']\n model['sigma'][x][y]['distance'] = tr.parameters(d_distance)[\n 'standard_deviation']\n with open('model.sauv', 'wb') as model_sauv_file:\n pk.dump(model, model_sauv_file)\n return model\n\n\ntraining(md.model())\n",
"step-4": "import traitement as tr\nimport pickle as pk\nimport model as md\n\n\ndef training(dict):\n model = {}\n model['µ'] = {}\n model['sigma'] = {}\n for x in dict:\n model['µ'][x] = {}\n model['sigma'][x] = {}\n for y in dict[x]:\n model['µ'][x][y] = {}\n model['sigma'][x][y] = {}\n doc = tr.load(dict[x][y])\n phi_l = doc[0]\n g_l = doc[1]\n t_l = doc[2]\n dphi_l = tr.delta(phi_l, t_l)\n dg_l = tr.delta(g_l, t_l)\n dheading_l = tr.delta(tr.heading(phi_l, g_l), t_l)\n d_distance = tr.delta_distance(phi_l, g_l)\n model['µ'][x][y]['phi'] = tr.parameters(dphi_l)['mean']\n model['µ'][x][y]['g'] = tr.parameters(dg_l)['mean']\n model['sigma'][x][y]['phi'] = tr.parameters(dphi_l)[\n 'standard_deviation']\n model['sigma'][x][y]['g'] = tr.parameters(g_l)['standard_deviation'\n ]\n model['µ'][x][y]['heading'] = tr.parameters(dheading_l)['mean']\n model['µ'][x][y]['distance'] = tr.parameters(d_distance)['mean']\n model['sigma'][x][y]['heading'] = tr.parameters(dheading_l)[\n 'standard_deviation']\n model['sigma'][x][y]['distance'] = tr.parameters(d_distance)[\n 'standard_deviation']\n with open('model.sauv', 'wb') as model_sauv_file:\n pk.dump(model, model_sauv_file)\n return model\n\n\ntraining(md.model())\n",
"step-5": "#------------------------------------------------------------------------\n#\n# @Author : EV2 CHEVALLIER \n#\n# @Date : 16.09.20\n# @Location : École Navale / Chaire de Cyberdéfense des systèmes navals\n# @Project : Projet de Fin d'Études\n# @Subject : # Real time detection of cyber anomalies upon a NMEA network by using machine learning methods\n#\n#------------------------------------------------------------------------\n# @Title : Training\n#------------------------------------------------------------------------\n# @Description : # This programm get the training dataset, extract the interesting features ( mean and standard deviation of variations of latitude, \n# longitude, heading and distance )\n# and put it in a python dictionnary and save it in a binary file with the pickle module.\n\n#------------------------------------------------------------------------\n\n\nimport traitement as tr\nimport pickle as pk\nimport model as md\n\ndef training(dict):\n\n\n model={}\n model[\"µ\"]={}\n model[\"sigma\"]={}\n\n for x in dict: # loop with speed\n model[\"µ\"][x]={}\n model[\"sigma\"][x]={}\n\n for y in dict[x]: # loop with heading\n\n model[\"µ\"][x][y] = {}\n model[\"sigma\"][x][y] = {}\n\n doc=tr.load(dict[x][y]) # open the json file\n\n phi_l=doc[0]\n g_l=doc[1] # get a list of phi,g,t\n t_l=doc[2]\n\n dphi_l=tr.delta(phi_l,t_l) # compute the differences\n dg_l=tr.delta(g_l,t_l)\n dheading_l=tr.delta(tr.heading(phi_l,g_l),t_l)\n d_distance=tr.delta_distance(phi_l,g_l)\n\n# we build a model with the statistical values of the features : variation of latitude, longitude, heading and distance\n\n model[\"µ\"][x][y][\"phi\"] = tr.parameters(dphi_l)[\"mean\"]\n model[\"µ\"][x][y][\"g\"] = tr.parameters(dg_l)[\"mean\"] # met à jour le modele\n\n model[\"sigma\"][x][y][\"phi\"] = tr.parameters(dphi_l)[\"standard_deviation\"]\n model[\"sigma\"][x][y][\"g\"] = tr.parameters(g_l)[\"standard_deviation\"]\n\n\n model[\"µ\"][x][y][\"heading\"] = tr.parameters(dheading_l)[\"mean\"]\n model[\"µ\"][x][y][\"distance\"] = tr.parameters(d_distance)[\"mean\"]\n\n model[\"sigma\"][x][y][\"heading\"] = tr.parameters(dheading_l)[\"standard_deviation\"]\n model[\"sigma\"][x][y][\"distance\"] = tr.parameters(d_distance)[\"standard_deviation\"]\n\n with open('model.sauv','wb' ) as model_sauv_file: \n pk.dump(model, model_sauv_file) # save the model in a binary file\n\n return model\n\ntraining(md.model())\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
import json
import os
import io
import shutil
import pytest
from chi_annotator.algo_factory.common import TrainingData
from chi_annotator.task_center.config import AnnotatorConfig
from chi_annotator.task_center.data_loader import load_local_data
from chi_annotator.task_center.model import Interpreter
from chi_annotator.task_center.model import Trainer
from tests.utils.txt_to_json import create_tmp_test_jsonfile, rm_tmp_file
class TestTrainer(object):
@classmethod
def setup_class(cls):
""" setup any state specific to the execution of the given class (which
usually contains tests).
"""
pass
@classmethod
def teardown_class(cls):
""" teardown any state that was previously setup with a call to
setup_class.
"""
pass
"""
test Trainer and Interpreter
"""
def ignore_test_load_local_data(self):
"""
test load local json format data
:return:
"""
tmp_path = create_tmp_test_jsonfile("tmp.json")
train_data = load_local_data(tmp_path)
rm_tmp_file("tmp.json")
assert train_data is not None
assert len(train_data.training_examples) == 1000
assert "text" not in train_data.training_examples[0].data
assert "label" in train_data.training_examples[0].data
def ignore_test_load_config(self):
"""
test load config
:return:
"""
config = AnnotatorConfig(\
filename="chi_annotator/user_instance/examples/classify/spam_email_classify_config.json")
assert config["name"] == "email_spam_classification"
def ignor_test_load_default_config(self):
"""
test load default config
:return:
"""
config = AnnotatorConfig()
assert config["config"] == "config.json"
def ignore_test_trainer_init(self):
"""
test trainer
:return:
"""
test_config = "tests/data/test_config/test_config.json"
config = AnnotatorConfig(test_config)
trainer = Trainer(config)
assert len(trainer.pipeline) > 0
def ignore_test_pipeline_flow(self):
"""
test trainer's train func for pipeline
:return:
"""
test_config = "tests/data/test_config/test_config.json"
config = AnnotatorConfig(test_config)
trainer = Trainer(config)
assert len(trainer.pipeline) > 0
# create tmp train set
tmp_path = create_tmp_test_jsonfile("tmp.json")
train_data = load_local_data(tmp_path)
# rm tmp train set
rm_tmp_file("tmp.json")
interpreter = trainer.train(train_data)
assert interpreter is not None
out1 = interpreter.parse(("点连接拿红包啦"))
# test persist and load
persisted_path = trainer.persist(config['path'],
config['project'],
config['fixed_model_name'])
interpreter_loaded = Interpreter.load(persisted_path, config)
out2 = interpreter_loaded.parse("点连接拿红包啦")
assert out1.get("classifylabel").get("name") == out2.get("classifylabel").get("name")
# remove tmp models
shutil.rmtree(config['path'], ignore_errors=True)
def ignore_test_trainer_persist(self):
"""
test pipeline persist, metadata will be saved
:return:
"""
test_config = "tests/data/test_config/test_config.json"
config = AnnotatorConfig(test_config)
trainer = Trainer(config)
assert len(trainer.pipeline) > 0
# char_tokenizer component should been created
assert trainer.pipeline[0] is not None
# create tmp train set
tmp_path = create_tmp_test_jsonfile("tmp.json")
train_data = load_local_data(tmp_path)
# rm tmp train set
rm_tmp_file("tmp.json")
trainer.train(train_data)
persisted_path = trainer.persist(config['path'],
config['project'],
config['fixed_model_name'])
# load persisted metadata
metadata_path = os.path.join(persisted_path, 'metadata.json')
with io.open(metadata_path) as f:
metadata = json.load(f)
assert 'trained_at' in metadata
# rm tmp files and dirs
shutil.rmtree(config['path'], ignore_errors=False)
def ignore_test_train_model_empty_pipeline(self):
"""
train model with no component
:return:
"""
test_config = "tests/data/test_config/test_config.json"
config = AnnotatorConfig(test_config)
config['pipeline'] = []
tmp_path = create_tmp_test_jsonfile("tmp.json")
train_data = load_local_data(tmp_path)
rm_tmp_file("tmp.json")
with pytest.raises(ValueError):
trainer = Trainer(config)
trainer.train(train_data)
def ignore_test_handles_pipeline_with_non_existing_component(self):
"""
handle no exist component in pipeline
:return:
"""
test_config = "tests/data/test_config/test_config.json"
config = AnnotatorConfig(test_config)
config['pipeline'].append("unknown_component")
tmp_path = create_tmp_test_jsonfile("tmp.json")
train_data = load_local_data(tmp_path)
rm_tmp_file("tmp.json")
with pytest.raises(Exception) as execinfo:
trainer = Trainer(config)
trainer.train(train_data)
assert "Failed to find component" in str(execinfo.value)
def ignore_test_load_and_persist_without_train(self):
"""
test save and load model without train
:return:
"""
test_config = "tests/data/test_config/test_config.json"
config = AnnotatorConfig(test_config)
trainer = Trainer(config)
assert len(trainer.pipeline) > 0
# create tmp train set
tmp_path = create_tmp_test_jsonfile("tmp.json")
train_data = load_local_data(tmp_path)
# rm tmp train set
rm_tmp_file("tmp.json")
# interpreter = trainer.train(train_data)
# test persist and load
persisted_path = trainer.persist(config['path'],
config['project'],
config['fixed_model_name'])
interpreter_loaded = Interpreter.load(persisted_path, config)
assert interpreter_loaded.pipeline
assert interpreter_loaded.parse("hello") is not None
assert interpreter_loaded.parse("Hello today is Monday, again!") is not None
# remove tmp models
shutil.rmtree(config['path'], ignore_errors=False)
def ignore_test_train_with_empty_data(self):
"""
test train with empty train data
:return:
"""
test_config = "tests/data/test_config/test_config.json"
config = AnnotatorConfig(test_config)
trainer = Trainer(config)
assert len(trainer.pipeline) > 0
# create tmp train set
train_data = TrainingData([])
# rm tmp train set
trainer.train(train_data)
# test persist and load
persisted_path = trainer.persist(config['path'],
config['project'],
config['fixed_model_name'])
interpreter_loaded = Interpreter.load(persisted_path, config)
assert interpreter_loaded.pipeline
assert interpreter_loaded.parse("hello") is not None
assert interpreter_loaded.parse("Hello today is Monday, again!") is not None
# remove tmp models
shutil.rmtree(config['path'], ignore_errors=False)
|
normal
|
{
"blob_id": "192c44540018b9e1ab857bdbfba6fdb39bb74431",
"index": 8769,
"step-1": "<mask token>\n\n\nclass TestTrainer(object):\n <mask token>\n\n @classmethod\n def teardown_class(cls):\n \"\"\" teardown any state that was previously setup with a call to\n setup_class.\n \"\"\"\n pass\n <mask token>\n\n def ignore_test_load_local_data(self):\n \"\"\"\n test load local json format data\n :return:\n \"\"\"\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n assert train_data is not None\n assert len(train_data.training_examples) == 1000\n assert 'text' not in train_data.training_examples[0].data\n assert 'label' in train_data.training_examples[0].data\n\n def ignore_test_load_config(self):\n \"\"\"\n test load config\n :return:\n \"\"\"\n config = AnnotatorConfig(filename=\n 'chi_annotator/user_instance/examples/classify/spam_email_classify_config.json'\n )\n assert config['name'] == 'email_spam_classification'\n\n def ignor_test_load_default_config(self):\n \"\"\"\n test load default config\n :return:\n \"\"\"\n config = AnnotatorConfig()\n assert config['config'] == 'config.json'\n\n def ignore_test_trainer_init(self):\n \"\"\"\n test trainer\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n\n def ignore_test_pipeline_flow(self):\n \"\"\"\n test trainer's train func for pipeline\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n interpreter = trainer.train(train_data)\n assert interpreter is not None\n out1 = interpreter.parse('点连接拿红包啦')\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n interpreter_loaded = Interpreter.load(persisted_path, config)\n out2 = interpreter_loaded.parse('点连接拿红包啦')\n assert out1.get('classifylabel').get('name') == out2.get(\n 'classifylabel').get('name')\n shutil.rmtree(config['path'], ignore_errors=True)\n\n def ignore_test_trainer_persist(self):\n \"\"\"\n test pipeline persist, metadata will be saved\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n assert trainer.pipeline[0] is not None\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n trainer.train(train_data)\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n metadata_path = os.path.join(persisted_path, 'metadata.json')\n with io.open(metadata_path) as f:\n metadata = json.load(f)\n assert 'trained_at' in metadata\n shutil.rmtree(config['path'], ignore_errors=False)\n <mask token>\n\n def ignore_test_handles_pipeline_with_non_existing_component(self):\n \"\"\"\n handle no exist component in pipeline\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n config['pipeline'].append('unknown_component')\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n with pytest.raises(Exception) as execinfo:\n trainer = Trainer(config)\n trainer.train(train_data)\n assert 'Failed to find component' in str(execinfo.value)\n <mask token>\n\n def ignore_test_train_with_empty_data(self):\n \"\"\"\n test train with empty train data\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n train_data = TrainingData([])\n trainer.train(train_data)\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n interpreter_loaded = Interpreter.load(persisted_path, config)\n assert interpreter_loaded.pipeline\n assert interpreter_loaded.parse('hello') is not None\n assert interpreter_loaded.parse('Hello today is Monday, again!'\n ) is not None\n shutil.rmtree(config['path'], ignore_errors=False)\n",
"step-2": "<mask token>\n\n\nclass TestTrainer(object):\n\n @classmethod\n def setup_class(cls):\n \"\"\" setup any state specific to the execution of the given class (which\n usually contains tests).\n \"\"\"\n pass\n\n @classmethod\n def teardown_class(cls):\n \"\"\" teardown any state that was previously setup with a call to\n setup_class.\n \"\"\"\n pass\n <mask token>\n\n def ignore_test_load_local_data(self):\n \"\"\"\n test load local json format data\n :return:\n \"\"\"\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n assert train_data is not None\n assert len(train_data.training_examples) == 1000\n assert 'text' not in train_data.training_examples[0].data\n assert 'label' in train_data.training_examples[0].data\n\n def ignore_test_load_config(self):\n \"\"\"\n test load config\n :return:\n \"\"\"\n config = AnnotatorConfig(filename=\n 'chi_annotator/user_instance/examples/classify/spam_email_classify_config.json'\n )\n assert config['name'] == 'email_spam_classification'\n\n def ignor_test_load_default_config(self):\n \"\"\"\n test load default config\n :return:\n \"\"\"\n config = AnnotatorConfig()\n assert config['config'] == 'config.json'\n\n def ignore_test_trainer_init(self):\n \"\"\"\n test trainer\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n\n def ignore_test_pipeline_flow(self):\n \"\"\"\n test trainer's train func for pipeline\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n interpreter = trainer.train(train_data)\n assert interpreter is not None\n out1 = interpreter.parse('点连接拿红包啦')\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n interpreter_loaded = Interpreter.load(persisted_path, config)\n out2 = interpreter_loaded.parse('点连接拿红包啦')\n assert out1.get('classifylabel').get('name') == out2.get(\n 'classifylabel').get('name')\n shutil.rmtree(config['path'], ignore_errors=True)\n\n def ignore_test_trainer_persist(self):\n \"\"\"\n test pipeline persist, metadata will be saved\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n assert trainer.pipeline[0] is not None\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n trainer.train(train_data)\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n metadata_path = os.path.join(persisted_path, 'metadata.json')\n with io.open(metadata_path) as f:\n metadata = json.load(f)\n assert 'trained_at' in metadata\n shutil.rmtree(config['path'], ignore_errors=False)\n <mask token>\n\n def ignore_test_handles_pipeline_with_non_existing_component(self):\n \"\"\"\n handle no exist component in pipeline\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n config['pipeline'].append('unknown_component')\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n with pytest.raises(Exception) as execinfo:\n trainer = Trainer(config)\n trainer.train(train_data)\n assert 'Failed to find component' in str(execinfo.value)\n\n def ignore_test_load_and_persist_without_train(self):\n \"\"\"\n test save and load model without train\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n interpreter_loaded = Interpreter.load(persisted_path, config)\n assert interpreter_loaded.pipeline\n assert interpreter_loaded.parse('hello') is not None\n assert interpreter_loaded.parse('Hello today is Monday, again!'\n ) is not None\n shutil.rmtree(config['path'], ignore_errors=False)\n\n def ignore_test_train_with_empty_data(self):\n \"\"\"\n test train with empty train data\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n train_data = TrainingData([])\n trainer.train(train_data)\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n interpreter_loaded = Interpreter.load(persisted_path, config)\n assert interpreter_loaded.pipeline\n assert interpreter_loaded.parse('hello') is not None\n assert interpreter_loaded.parse('Hello today is Monday, again!'\n ) is not None\n shutil.rmtree(config['path'], ignore_errors=False)\n",
"step-3": "<mask token>\n\n\nclass TestTrainer(object):\n\n @classmethod\n def setup_class(cls):\n \"\"\" setup any state specific to the execution of the given class (which\n usually contains tests).\n \"\"\"\n pass\n\n @classmethod\n def teardown_class(cls):\n \"\"\" teardown any state that was previously setup with a call to\n setup_class.\n \"\"\"\n pass\n <mask token>\n\n def ignore_test_load_local_data(self):\n \"\"\"\n test load local json format data\n :return:\n \"\"\"\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n assert train_data is not None\n assert len(train_data.training_examples) == 1000\n assert 'text' not in train_data.training_examples[0].data\n assert 'label' in train_data.training_examples[0].data\n\n def ignore_test_load_config(self):\n \"\"\"\n test load config\n :return:\n \"\"\"\n config = AnnotatorConfig(filename=\n 'chi_annotator/user_instance/examples/classify/spam_email_classify_config.json'\n )\n assert config['name'] == 'email_spam_classification'\n\n def ignor_test_load_default_config(self):\n \"\"\"\n test load default config\n :return:\n \"\"\"\n config = AnnotatorConfig()\n assert config['config'] == 'config.json'\n\n def ignore_test_trainer_init(self):\n \"\"\"\n test trainer\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n\n def ignore_test_pipeline_flow(self):\n \"\"\"\n test trainer's train func for pipeline\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n interpreter = trainer.train(train_data)\n assert interpreter is not None\n out1 = interpreter.parse('点连接拿红包啦')\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n interpreter_loaded = Interpreter.load(persisted_path, config)\n out2 = interpreter_loaded.parse('点连接拿红包啦')\n assert out1.get('classifylabel').get('name') == out2.get(\n 'classifylabel').get('name')\n shutil.rmtree(config['path'], ignore_errors=True)\n\n def ignore_test_trainer_persist(self):\n \"\"\"\n test pipeline persist, metadata will be saved\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n assert trainer.pipeline[0] is not None\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n trainer.train(train_data)\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n metadata_path = os.path.join(persisted_path, 'metadata.json')\n with io.open(metadata_path) as f:\n metadata = json.load(f)\n assert 'trained_at' in metadata\n shutil.rmtree(config['path'], ignore_errors=False)\n\n def ignore_test_train_model_empty_pipeline(self):\n \"\"\"\n train model with no component\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n config['pipeline'] = []\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n with pytest.raises(ValueError):\n trainer = Trainer(config)\n trainer.train(train_data)\n\n def ignore_test_handles_pipeline_with_non_existing_component(self):\n \"\"\"\n handle no exist component in pipeline\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n config['pipeline'].append('unknown_component')\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n with pytest.raises(Exception) as execinfo:\n trainer = Trainer(config)\n trainer.train(train_data)\n assert 'Failed to find component' in str(execinfo.value)\n\n def ignore_test_load_and_persist_without_train(self):\n \"\"\"\n test save and load model without train\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n interpreter_loaded = Interpreter.load(persisted_path, config)\n assert interpreter_loaded.pipeline\n assert interpreter_loaded.parse('hello') is not None\n assert interpreter_loaded.parse('Hello today is Monday, again!'\n ) is not None\n shutil.rmtree(config['path'], ignore_errors=False)\n\n def ignore_test_train_with_empty_data(self):\n \"\"\"\n test train with empty train data\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n train_data = TrainingData([])\n trainer.train(train_data)\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n interpreter_loaded = Interpreter.load(persisted_path, config)\n assert interpreter_loaded.pipeline\n assert interpreter_loaded.parse('hello') is not None\n assert interpreter_loaded.parse('Hello today is Monday, again!'\n ) is not None\n shutil.rmtree(config['path'], ignore_errors=False)\n",
"step-4": "<mask token>\n\n\nclass TestTrainer(object):\n\n @classmethod\n def setup_class(cls):\n \"\"\" setup any state specific to the execution of the given class (which\n usually contains tests).\n \"\"\"\n pass\n\n @classmethod\n def teardown_class(cls):\n \"\"\" teardown any state that was previously setup with a call to\n setup_class.\n \"\"\"\n pass\n \"\"\"\n test Trainer and Interpreter\n \"\"\"\n\n def ignore_test_load_local_data(self):\n \"\"\"\n test load local json format data\n :return:\n \"\"\"\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n assert train_data is not None\n assert len(train_data.training_examples) == 1000\n assert 'text' not in train_data.training_examples[0].data\n assert 'label' in train_data.training_examples[0].data\n\n def ignore_test_load_config(self):\n \"\"\"\n test load config\n :return:\n \"\"\"\n config = AnnotatorConfig(filename=\n 'chi_annotator/user_instance/examples/classify/spam_email_classify_config.json'\n )\n assert config['name'] == 'email_spam_classification'\n\n def ignor_test_load_default_config(self):\n \"\"\"\n test load default config\n :return:\n \"\"\"\n config = AnnotatorConfig()\n assert config['config'] == 'config.json'\n\n def ignore_test_trainer_init(self):\n \"\"\"\n test trainer\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n\n def ignore_test_pipeline_flow(self):\n \"\"\"\n test trainer's train func for pipeline\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n interpreter = trainer.train(train_data)\n assert interpreter is not None\n out1 = interpreter.parse('点连接拿红包啦')\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n interpreter_loaded = Interpreter.load(persisted_path, config)\n out2 = interpreter_loaded.parse('点连接拿红包啦')\n assert out1.get('classifylabel').get('name') == out2.get(\n 'classifylabel').get('name')\n shutil.rmtree(config['path'], ignore_errors=True)\n\n def ignore_test_trainer_persist(self):\n \"\"\"\n test pipeline persist, metadata will be saved\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n assert trainer.pipeline[0] is not None\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n trainer.train(train_data)\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n metadata_path = os.path.join(persisted_path, 'metadata.json')\n with io.open(metadata_path) as f:\n metadata = json.load(f)\n assert 'trained_at' in metadata\n shutil.rmtree(config['path'], ignore_errors=False)\n\n def ignore_test_train_model_empty_pipeline(self):\n \"\"\"\n train model with no component\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n config['pipeline'] = []\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n with pytest.raises(ValueError):\n trainer = Trainer(config)\n trainer.train(train_data)\n\n def ignore_test_handles_pipeline_with_non_existing_component(self):\n \"\"\"\n handle no exist component in pipeline\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n config['pipeline'].append('unknown_component')\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n with pytest.raises(Exception) as execinfo:\n trainer = Trainer(config)\n trainer.train(train_data)\n assert 'Failed to find component' in str(execinfo.value)\n\n def ignore_test_load_and_persist_without_train(self):\n \"\"\"\n test save and load model without train\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n interpreter_loaded = Interpreter.load(persisted_path, config)\n assert interpreter_loaded.pipeline\n assert interpreter_loaded.parse('hello') is not None\n assert interpreter_loaded.parse('Hello today is Monday, again!'\n ) is not None\n shutil.rmtree(config['path'], ignore_errors=False)\n\n def ignore_test_train_with_empty_data(self):\n \"\"\"\n test train with empty train data\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n train_data = TrainingData([])\n trainer.train(train_data)\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n interpreter_loaded = Interpreter.load(persisted_path, config)\n assert interpreter_loaded.pipeline\n assert interpreter_loaded.parse('hello') is not None\n assert interpreter_loaded.parse('Hello today is Monday, again!'\n ) is not None\n shutil.rmtree(config['path'], ignore_errors=False)\n",
"step-5": "# -*- coding: utf-8 -*-\nimport json\nimport os\nimport io\nimport shutil\n\nimport pytest\n\nfrom chi_annotator.algo_factory.common import TrainingData\nfrom chi_annotator.task_center.config import AnnotatorConfig\nfrom chi_annotator.task_center.data_loader import load_local_data\nfrom chi_annotator.task_center.model import Interpreter\nfrom chi_annotator.task_center.model import Trainer\nfrom tests.utils.txt_to_json import create_tmp_test_jsonfile, rm_tmp_file\n\n\nclass TestTrainer(object):\n\n @classmethod\n def setup_class(cls):\n \"\"\" setup any state specific to the execution of the given class (which\n usually contains tests).\n \"\"\"\n pass\n\n @classmethod\n def teardown_class(cls):\n \"\"\" teardown any state that was previously setup with a call to\n setup_class.\n \"\"\"\n pass\n\n \"\"\"\n test Trainer and Interpreter\n \"\"\"\n def ignore_test_load_local_data(self):\n \"\"\"\n test load local json format data\n :return:\n \"\"\"\n tmp_path = create_tmp_test_jsonfile(\"tmp.json\")\n train_data = load_local_data(tmp_path)\n rm_tmp_file(\"tmp.json\")\n assert train_data is not None\n assert len(train_data.training_examples) == 1000\n assert \"text\" not in train_data.training_examples[0].data\n assert \"label\" in train_data.training_examples[0].data\n\n def ignore_test_load_config(self):\n \"\"\"\n test load config\n :return:\n \"\"\"\n config = AnnotatorConfig(\\\n filename=\"chi_annotator/user_instance/examples/classify/spam_email_classify_config.json\")\n assert config[\"name\"] == \"email_spam_classification\"\n\n def ignor_test_load_default_config(self):\n \"\"\"\n test load default config\n :return:\n \"\"\"\n config = AnnotatorConfig()\n assert config[\"config\"] == \"config.json\"\n\n def ignore_test_trainer_init(self):\n \"\"\"\n test trainer\n :return:\n \"\"\"\n test_config = \"tests/data/test_config/test_config.json\"\n config = AnnotatorConfig(test_config)\n\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n\n def ignore_test_pipeline_flow(self):\n \"\"\"\n test trainer's train func for pipeline\n :return:\n \"\"\"\n test_config = \"tests/data/test_config/test_config.json\"\n config = AnnotatorConfig(test_config)\n\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n # create tmp train set\n tmp_path = create_tmp_test_jsonfile(\"tmp.json\")\n train_data = load_local_data(tmp_path)\n # rm tmp train set\n rm_tmp_file(\"tmp.json\")\n\n interpreter = trainer.train(train_data)\n assert interpreter is not None\n out1 = interpreter.parse((\"点连接拿红包啦\"))\n\n # test persist and load\n persisted_path = trainer.persist(config['path'],\n config['project'],\n config['fixed_model_name'])\n\n interpreter_loaded = Interpreter.load(persisted_path, config)\n out2 = interpreter_loaded.parse(\"点连接拿红包啦\")\n assert out1.get(\"classifylabel\").get(\"name\") == out2.get(\"classifylabel\").get(\"name\")\n\n # remove tmp models\n shutil.rmtree(config['path'], ignore_errors=True)\n\n def ignore_test_trainer_persist(self):\n \"\"\"\n test pipeline persist, metadata will be saved\n :return:\n \"\"\"\n test_config = \"tests/data/test_config/test_config.json\"\n config = AnnotatorConfig(test_config)\n\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n # char_tokenizer component should been created\n assert trainer.pipeline[0] is not None\n # create tmp train set\n tmp_path = create_tmp_test_jsonfile(\"tmp.json\")\n train_data = load_local_data(tmp_path)\n # rm tmp train set\n rm_tmp_file(\"tmp.json\")\n\n trainer.train(train_data)\n persisted_path = trainer.persist(config['path'],\n config['project'],\n config['fixed_model_name'])\n # load persisted metadata\n metadata_path = os.path.join(persisted_path, 'metadata.json')\n with io.open(metadata_path) as f:\n metadata = json.load(f)\n assert 'trained_at' in metadata\n # rm tmp files and dirs\n shutil.rmtree(config['path'], ignore_errors=False)\n\n def ignore_test_train_model_empty_pipeline(self):\n \"\"\"\n train model with no component\n :return:\n \"\"\"\n test_config = \"tests/data/test_config/test_config.json\"\n config = AnnotatorConfig(test_config)\n config['pipeline'] = []\n\n tmp_path = create_tmp_test_jsonfile(\"tmp.json\")\n train_data = load_local_data(tmp_path)\n rm_tmp_file(\"tmp.json\")\n\n with pytest.raises(ValueError):\n trainer = Trainer(config)\n trainer.train(train_data)\n\n def ignore_test_handles_pipeline_with_non_existing_component(self):\n \"\"\"\n handle no exist component in pipeline\n :return:\n \"\"\"\n test_config = \"tests/data/test_config/test_config.json\"\n config = AnnotatorConfig(test_config)\n config['pipeline'].append(\"unknown_component\")\n\n tmp_path = create_tmp_test_jsonfile(\"tmp.json\")\n train_data = load_local_data(tmp_path)\n rm_tmp_file(\"tmp.json\")\n\n with pytest.raises(Exception) as execinfo:\n trainer = Trainer(config)\n trainer.train(train_data)\n assert \"Failed to find component\" in str(execinfo.value)\n\n def ignore_test_load_and_persist_without_train(self):\n \"\"\"\n test save and load model without train\n :return:\n \"\"\"\n test_config = \"tests/data/test_config/test_config.json\"\n config = AnnotatorConfig(test_config)\n\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n # create tmp train set\n tmp_path = create_tmp_test_jsonfile(\"tmp.json\")\n train_data = load_local_data(tmp_path)\n # rm tmp train set\n rm_tmp_file(\"tmp.json\")\n\n # interpreter = trainer.train(train_data)\n # test persist and load\n persisted_path = trainer.persist(config['path'],\n config['project'],\n config['fixed_model_name'])\n\n interpreter_loaded = Interpreter.load(persisted_path, config)\n assert interpreter_loaded.pipeline\n assert interpreter_loaded.parse(\"hello\") is not None\n assert interpreter_loaded.parse(\"Hello today is Monday, again!\") is not None\n # remove tmp models\n shutil.rmtree(config['path'], ignore_errors=False)\n\n def ignore_test_train_with_empty_data(self):\n \"\"\"\n test train with empty train data\n :return:\n \"\"\"\n test_config = \"tests/data/test_config/test_config.json\"\n config = AnnotatorConfig(test_config)\n\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n # create tmp train set\n\n train_data = TrainingData([])\n # rm tmp train set\n\n trainer.train(train_data)\n # test persist and load\n persisted_path = trainer.persist(config['path'],\n config['project'],\n config['fixed_model_name'])\n\n interpreter_loaded = Interpreter.load(persisted_path, config)\n \n assert interpreter_loaded.pipeline\n assert interpreter_loaded.parse(\"hello\") is not None\n assert interpreter_loaded.parse(\"Hello today is Monday, again!\") is not None\n \n # remove tmp models\n shutil.rmtree(config['path'], ignore_errors=False)\n\n\n\n",
"step-ids": [
10,
12,
13,
14,
16
]
}
|
[
10,
12,
13,
14,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def euclidean(p, q):
sumSq = 0.0
for i in range(len(p)):
sumSq += (p[i] - q[i]) ** 2
return sumSq ** 0.5
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ユークリッド距離
# http://en.wikipedia.org/wiki/Euclidean_space
# 多次元空間中での 2 点間の距離を探索する
def euclidean(p,q):
sumSq=0.0
# 差の平方を加算
for i in range(len(p)):
sumSq+=(p[i]-q[i])**2
# 平方根
return (sumSq**0.5)
#print euclidean([3,4,5],[4,5,6])
|
flexible
|
{
"blob_id": "11a7ebac3dad1f91a6d46b62f557b51ded8e3d7a",
"index": 1271,
"step-1": "<mask token>\n",
"step-2": "def euclidean(p, q):\n sumSq = 0.0\n for i in range(len(p)):\n sumSq += (p[i] - q[i]) ** 2\n return sumSq ** 0.5\n",
"step-3": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# ユークリッド距離\n# http://en.wikipedia.org/wiki/Euclidean_space\n\n# 多次元空間中での 2 点間の距離を探索する\n\ndef euclidean(p,q):\n sumSq=0.0\n # 差の平方を加算\n for i in range(len(p)):\n sumSq+=(p[i]-q[i])**2\n # 平方根\n return (sumSq**0.5)\n\n#print euclidean([3,4,5],[4,5,6])\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# coding: utf-8
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
driver = webdriver.Chrome()
driver.get("https://www.baidu.com")
elem = driver.find_element_by_xpath('//*[@id="kw"]')
elem.send_keys("python selenium", Keys.ENTER)
print(driver.page_source)
|
normal
|
{
"blob_id": "3c8352ff2fc92ada1b58603df2a1a402e57842be",
"index": 8606,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndriver.get('https://www.baidu.com')\n<mask token>\nelem.send_keys('python selenium', Keys.ENTER)\nprint(driver.page_source)\n",
"step-3": "<mask token>\ndriver = webdriver.Chrome()\ndriver.get('https://www.baidu.com')\nelem = driver.find_element_by_xpath('//*[@id=\"kw\"]')\nelem.send_keys('python selenium', Keys.ENTER)\nprint(driver.page_source)\n",
"step-4": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\ndriver = webdriver.Chrome()\ndriver.get('https://www.baidu.com')\nelem = driver.find_element_by_xpath('//*[@id=\"kw\"]')\nelem.send_keys('python selenium', Keys.ENTER)\nprint(driver.page_source)\n",
"step-5": "# coding: utf-8\r\n\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\ndriver = webdriver.Chrome()\r\ndriver.get(\"https://www.baidu.com\")\r\n\r\nelem = driver.find_element_by_xpath('//*[@id=\"kw\"]')\r\nelem.send_keys(\"python selenium\", Keys.ENTER)\r\n\r\nprint(driver.page_source)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
config = {'numIndividuals': 50, 'maxNumGen': 20, 'eliteProp': 0.1,
'mutantProp': 0.2, 'inheritanceProb': 0.7}
|
flexible
|
{
"blob_id": "85d1069d85e285bc5c36811f569dabd793b5064b",
"index": 4460,
"step-1": "<mask token>\n",
"step-2": "config = {'numIndividuals': 50, 'maxNumGen': 20, 'eliteProp': 0.1,\n 'mutantProp': 0.2, 'inheritanceProb': 0.7}\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
class Book:
def __init__(self, url):
self.url = url
self.title = ''
self.category = ''
self.upc = ''
self.price_including_tax = ''
self.price_excluding_tax = ''
self.number_available = ''
self.description = ''
self.review_rating = ''
self.image_url = ''
self.tax = ''
def scrap(self):
book = requests.get(self.url)
soup = BeautifulSoup(book.content, 'html.parser')
self.__fill_title(soup)
self.__fill_category(soup)
self.__fill_upc(soup)
self.__fill_price_including_tax(soup)
self.__fill_price_excluding_tax(soup)
self.__fill_number_available(soup)
self.__fill_description(soup)
self.__fill_review_rating(soup)
self.__fill_image_url(soup)
self.__fill_tax(soup)
<|reserved_special_token_0|>
def __fill_category(self, soup):
category = soup.findAll('li')
category2 = category[2].text
self.category = category2.replace('\n', '')
def __fill_upc(self, soup):
tds = soup.findAll('td')
self.upc = tds[0].text
def __fill_price_including_tax(self, soup):
tds = soup.findAll('td')
self.price_including_tax = tds[3].text
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __fill_description(self, soup):
div = soup.find('div', class_='sub-header')
p = div.find_next_sibling()
self.description = p.text
def __fill_review_rating(self, soup):
p = soup.find('div', {'class': 'col-sm-6 product_main'}).find('p',
class_='star-rating')
rating = str(p['class'])
star = rating[15:-1]
star_rating = eval(star)
return star_rating
def __fill_image_url(self, soup):
image = soup.find('div', {'class': 'item active'}).find('img')
image_url = image['src']
image_clean_url = image_url.replace('../../',
'http://books.toscrape.com/')
self.image_url = image_clean_url
def __fill_tax(self, soup):
tds = soup.findAll('td')
self.tax = tds[4].text
def __str__(self):
output = f"""url : {self.url}
title : {self.title}
category : {self.category}
upc : {self.upc}
price_including_tax : {self.price_including_tax}
price_excluding_tax : {self.price_excluding_tax}
number_available : {self.number_available}
description : {self.description}
review_rating : {self.review_rating}
image_url : {self.image_url}
tax : {self.tax} """
return output
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Book:
def __init__(self, url):
self.url = url
self.title = ''
self.category = ''
self.upc = ''
self.price_including_tax = ''
self.price_excluding_tax = ''
self.number_available = ''
self.description = ''
self.review_rating = ''
self.image_url = ''
self.tax = ''
def scrap(self):
book = requests.get(self.url)
soup = BeautifulSoup(book.content, 'html.parser')
self.__fill_title(soup)
self.__fill_category(soup)
self.__fill_upc(soup)
self.__fill_price_including_tax(soup)
self.__fill_price_excluding_tax(soup)
self.__fill_number_available(soup)
self.__fill_description(soup)
self.__fill_review_rating(soup)
self.__fill_image_url(soup)
self.__fill_tax(soup)
<|reserved_special_token_0|>
def __fill_category(self, soup):
category = soup.findAll('li')
category2 = category[2].text
self.category = category2.replace('\n', '')
def __fill_upc(self, soup):
tds = soup.findAll('td')
self.upc = tds[0].text
def __fill_price_including_tax(self, soup):
tds = soup.findAll('td')
self.price_including_tax = tds[3].text
<|reserved_special_token_0|>
def __fill_number_available(self, soup):
tds = soup.findAll('td')
self.number_available = tds[5].text
def __fill_description(self, soup):
div = soup.find('div', class_='sub-header')
p = div.find_next_sibling()
self.description = p.text
def __fill_review_rating(self, soup):
p = soup.find('div', {'class': 'col-sm-6 product_main'}).find('p',
class_='star-rating')
rating = str(p['class'])
star = rating[15:-1]
star_rating = eval(star)
return star_rating
def __fill_image_url(self, soup):
image = soup.find('div', {'class': 'item active'}).find('img')
image_url = image['src']
image_clean_url = image_url.replace('../../',
'http://books.toscrape.com/')
self.image_url = image_clean_url
def __fill_tax(self, soup):
tds = soup.findAll('td')
self.tax = tds[4].text
def __str__(self):
output = f"""url : {self.url}
title : {self.title}
category : {self.category}
upc : {self.upc}
price_including_tax : {self.price_including_tax}
price_excluding_tax : {self.price_excluding_tax}
number_available : {self.number_available}
description : {self.description}
review_rating : {self.review_rating}
image_url : {self.image_url}
tax : {self.tax} """
return output
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Book:
def __init__(self, url):
self.url = url
self.title = ''
self.category = ''
self.upc = ''
self.price_including_tax = ''
self.price_excluding_tax = ''
self.number_available = ''
self.description = ''
self.review_rating = ''
self.image_url = ''
self.tax = ''
def scrap(self):
book = requests.get(self.url)
soup = BeautifulSoup(book.content, 'html.parser')
self.__fill_title(soup)
self.__fill_category(soup)
self.__fill_upc(soup)
self.__fill_price_including_tax(soup)
self.__fill_price_excluding_tax(soup)
self.__fill_number_available(soup)
self.__fill_description(soup)
self.__fill_review_rating(soup)
self.__fill_image_url(soup)
self.__fill_tax(soup)
<|reserved_special_token_0|>
def __fill_category(self, soup):
category = soup.findAll('li')
category2 = category[2].text
self.category = category2.replace('\n', '')
def __fill_upc(self, soup):
tds = soup.findAll('td')
self.upc = tds[0].text
def __fill_price_including_tax(self, soup):
tds = soup.findAll('td')
self.price_including_tax = tds[3].text
def __fill_price_excluding_tax(self, soup):
tds = soup.findAll('td')
self.price_excluding_tax = tds[2].text
def __fill_number_available(self, soup):
tds = soup.findAll('td')
self.number_available = tds[5].text
def __fill_description(self, soup):
div = soup.find('div', class_='sub-header')
p = div.find_next_sibling()
self.description = p.text
def __fill_review_rating(self, soup):
p = soup.find('div', {'class': 'col-sm-6 product_main'}).find('p',
class_='star-rating')
rating = str(p['class'])
star = rating[15:-1]
star_rating = eval(star)
return star_rating
def __fill_image_url(self, soup):
image = soup.find('div', {'class': 'item active'}).find('img')
image_url = image['src']
image_clean_url = image_url.replace('../../',
'http://books.toscrape.com/')
self.image_url = image_clean_url
def __fill_tax(self, soup):
tds = soup.findAll('td')
self.tax = tds[4].text
def __str__(self):
output = f"""url : {self.url}
title : {self.title}
category : {self.category}
upc : {self.upc}
price_including_tax : {self.price_including_tax}
price_excluding_tax : {self.price_excluding_tax}
number_available : {self.number_available}
description : {self.description}
review_rating : {self.review_rating}
image_url : {self.image_url}
tax : {self.tax} """
return output
<|reserved_special_token_1|>
import requests
from bs4 import BeautifulSoup
class Book:
def __init__(self, url):
self.url = url
self.title = ''
self.category = ''
self.upc = ''
self.price_including_tax = ''
self.price_excluding_tax = ''
self.number_available = ''
self.description = ''
self.review_rating = ''
self.image_url = ''
self.tax = ''
def scrap(self):
book = requests.get(self.url)
soup = BeautifulSoup(book.content, 'html.parser')
self.__fill_title(soup)
self.__fill_category(soup)
self.__fill_upc(soup)
self.__fill_price_including_tax(soup)
self.__fill_price_excluding_tax(soup)
self.__fill_number_available(soup)
self.__fill_description(soup)
self.__fill_review_rating(soup)
self.__fill_image_url(soup)
self.__fill_tax(soup)
def __fill_title(self, soup):
title = soup.find('div', {'class': 'col-sm-6 product_main'}).find('h1')
self.title = title.text
def __fill_category(self, soup):
category = soup.findAll('li')
category2 = category[2].text
self.category = category2.replace('\n', '')
def __fill_upc(self, soup):
tds = soup.findAll('td')
self.upc = tds[0].text
def __fill_price_including_tax(self, soup):
tds = soup.findAll('td')
self.price_including_tax = tds[3].text
def __fill_price_excluding_tax(self, soup):
tds = soup.findAll('td')
self.price_excluding_tax = tds[2].text
def __fill_number_available(self, soup):
tds = soup.findAll('td')
self.number_available = tds[5].text
def __fill_description(self, soup):
div = soup.find('div', class_='sub-header')
p = div.find_next_sibling()
self.description = p.text
def __fill_review_rating(self, soup):
p = soup.find('div', {'class': 'col-sm-6 product_main'}).find('p',
class_='star-rating')
rating = str(p['class'])
star = rating[15:-1]
star_rating = eval(star)
return star_rating
def __fill_image_url(self, soup):
image = soup.find('div', {'class': 'item active'}).find('img')
image_url = image['src']
image_clean_url = image_url.replace('../../',
'http://books.toscrape.com/')
self.image_url = image_clean_url
def __fill_tax(self, soup):
tds = soup.findAll('td')
self.tax = tds[4].text
def __str__(self):
output = f"""url : {self.url}
title : {self.title}
category : {self.category}
upc : {self.upc}
price_including_tax : {self.price_including_tax}
price_excluding_tax : {self.price_excluding_tax}
number_available : {self.number_available}
description : {self.description}
review_rating : {self.review_rating}
image_url : {self.image_url}
tax : {self.tax} """
return output
<|reserved_special_token_1|>
import requests
from bs4 import BeautifulSoup
class Book:
def __init__(self, url):
self.url = url
self.title = ""
self.category = ""
self.upc=""
self.price_including_tax=""
self.price_excluding_tax=""
self.number_available=""
self.description=""
self.review_rating=""
self.image_url=""
self.tax=""
def scrap(self):
book = requests.get(self.url)
soup = BeautifulSoup(book.content, "html.parser")
self.__fill_title(soup)
self.__fill_category(soup)
self.__fill_upc(soup)
self.__fill_price_including_tax(soup)
self.__fill_price_excluding_tax(soup)
self.__fill_number_available(soup)
self.__fill_description(soup)
self.__fill_review_rating(soup)
self.__fill_image_url(soup)
self.__fill_tax(soup)
def __fill_title(self,soup):
title = soup.find("div", {"class": "col-sm-6 product_main"}).find("h1")
self.title= title.text
# return self.title
def __fill_category(self,soup):
category = soup.findAll("li")
category2 = category[2].text
self.category = category2.replace("\n", "")
# return self.category
def __fill_upc(self,soup):
tds = soup.findAll("td")
self.upc = tds[0].text
def __fill_price_including_tax(self,soup):
tds = soup.findAll("td")
self.price_including_tax = tds[3].text
def __fill_price_excluding_tax(self,soup):
tds = soup.findAll("td")
self.price_excluding_tax = tds[2].text
def __fill_number_available(self,soup):
tds = soup.findAll("td")
self.number_available = tds[5].text
def __fill_description(self,soup):
div = soup.find("div", class_="sub-header")
p = div.find_next_sibling()
self.description = p.text
# return self.description
def __fill_review_rating(self,soup):
p = soup.find("div", {"class": "col-sm-6 product_main"}).find(
"p", class_="star-rating"
)
rating = str(p["class"])
star = rating[15:-1]
star_rating = eval(star)
return star_rating
def __fill_image_url(self,soup):
image = soup.find("div", {"class": "item active"}).find("img")
image_url = image["src"]
image_clean_url = image_url.replace("../../", "http://books.toscrape.com/")
self.image_url = image_clean_url
def __fill_tax(self,soup):
tds = soup.findAll("td")
self.tax = tds[4].text
def __str__(self):
output = f"url : {self.url} \ntitle : {self.title} \ncategory : {self.category} \nupc : {self.upc} \nprice_including_tax : {self.price_including_tax} \nprice_excluding_tax : {self.price_excluding_tax} \nnumber_available : {self.number_available} \ndescription : {self.description} \nreview_rating : {self.review_rating} \nimage_url : {self.image_url} \ntax : {self.tax} "
return output
# book = Book("http://books.toscrape.com/catalogue/a-light-in-the-attic_1000/index.html")
# book.scrap("http://books.toscrape.com/catalogue/a-light-in-the-attic_1000/index.html")
# print(book)
|
flexible
|
{
"blob_id": "3dc83168264fbb4f9b0ab2980b845dffdc4417bb",
"index": 7588,
"step-1": "<mask token>\n\n\nclass Book:\n\n def __init__(self, url):\n self.url = url\n self.title = ''\n self.category = ''\n self.upc = ''\n self.price_including_tax = ''\n self.price_excluding_tax = ''\n self.number_available = ''\n self.description = ''\n self.review_rating = ''\n self.image_url = ''\n self.tax = ''\n\n def scrap(self):\n book = requests.get(self.url)\n soup = BeautifulSoup(book.content, 'html.parser')\n self.__fill_title(soup)\n self.__fill_category(soup)\n self.__fill_upc(soup)\n self.__fill_price_including_tax(soup)\n self.__fill_price_excluding_tax(soup)\n self.__fill_number_available(soup)\n self.__fill_description(soup)\n self.__fill_review_rating(soup)\n self.__fill_image_url(soup)\n self.__fill_tax(soup)\n <mask token>\n\n def __fill_category(self, soup):\n category = soup.findAll('li')\n category2 = category[2].text\n self.category = category2.replace('\\n', '')\n\n def __fill_upc(self, soup):\n tds = soup.findAll('td')\n self.upc = tds[0].text\n\n def __fill_price_including_tax(self, soup):\n tds = soup.findAll('td')\n self.price_including_tax = tds[3].text\n <mask token>\n <mask token>\n\n def __fill_description(self, soup):\n div = soup.find('div', class_='sub-header')\n p = div.find_next_sibling()\n self.description = p.text\n\n def __fill_review_rating(self, soup):\n p = soup.find('div', {'class': 'col-sm-6 product_main'}).find('p',\n class_='star-rating')\n rating = str(p['class'])\n star = rating[15:-1]\n star_rating = eval(star)\n return star_rating\n\n def __fill_image_url(self, soup):\n image = soup.find('div', {'class': 'item active'}).find('img')\n image_url = image['src']\n image_clean_url = image_url.replace('../../',\n 'http://books.toscrape.com/')\n self.image_url = image_clean_url\n\n def __fill_tax(self, soup):\n tds = soup.findAll('td')\n self.tax = tds[4].text\n\n def __str__(self):\n output = f\"\"\"url : {self.url} \ntitle : {self.title} \ncategory : {self.category} \nupc : {self.upc} \nprice_including_tax : {self.price_including_tax} \nprice_excluding_tax : {self.price_excluding_tax} \nnumber_available : {self.number_available} \ndescription : {self.description} \nreview_rating : {self.review_rating} \nimage_url : {self.image_url} \ntax : {self.tax} \"\"\"\n return output\n",
"step-2": "<mask token>\n\n\nclass Book:\n\n def __init__(self, url):\n self.url = url\n self.title = ''\n self.category = ''\n self.upc = ''\n self.price_including_tax = ''\n self.price_excluding_tax = ''\n self.number_available = ''\n self.description = ''\n self.review_rating = ''\n self.image_url = ''\n self.tax = ''\n\n def scrap(self):\n book = requests.get(self.url)\n soup = BeautifulSoup(book.content, 'html.parser')\n self.__fill_title(soup)\n self.__fill_category(soup)\n self.__fill_upc(soup)\n self.__fill_price_including_tax(soup)\n self.__fill_price_excluding_tax(soup)\n self.__fill_number_available(soup)\n self.__fill_description(soup)\n self.__fill_review_rating(soup)\n self.__fill_image_url(soup)\n self.__fill_tax(soup)\n <mask token>\n\n def __fill_category(self, soup):\n category = soup.findAll('li')\n category2 = category[2].text\n self.category = category2.replace('\\n', '')\n\n def __fill_upc(self, soup):\n tds = soup.findAll('td')\n self.upc = tds[0].text\n\n def __fill_price_including_tax(self, soup):\n tds = soup.findAll('td')\n self.price_including_tax = tds[3].text\n <mask token>\n\n def __fill_number_available(self, soup):\n tds = soup.findAll('td')\n self.number_available = tds[5].text\n\n def __fill_description(self, soup):\n div = soup.find('div', class_='sub-header')\n p = div.find_next_sibling()\n self.description = p.text\n\n def __fill_review_rating(self, soup):\n p = soup.find('div', {'class': 'col-sm-6 product_main'}).find('p',\n class_='star-rating')\n rating = str(p['class'])\n star = rating[15:-1]\n star_rating = eval(star)\n return star_rating\n\n def __fill_image_url(self, soup):\n image = soup.find('div', {'class': 'item active'}).find('img')\n image_url = image['src']\n image_clean_url = image_url.replace('../../',\n 'http://books.toscrape.com/')\n self.image_url = image_clean_url\n\n def __fill_tax(self, soup):\n tds = soup.findAll('td')\n self.tax = tds[4].text\n\n def __str__(self):\n output = f\"\"\"url : {self.url} \ntitle : {self.title} \ncategory : {self.category} \nupc : {self.upc} \nprice_including_tax : {self.price_including_tax} \nprice_excluding_tax : {self.price_excluding_tax} \nnumber_available : {self.number_available} \ndescription : {self.description} \nreview_rating : {self.review_rating} \nimage_url : {self.image_url} \ntax : {self.tax} \"\"\"\n return output\n",
"step-3": "<mask token>\n\n\nclass Book:\n\n def __init__(self, url):\n self.url = url\n self.title = ''\n self.category = ''\n self.upc = ''\n self.price_including_tax = ''\n self.price_excluding_tax = ''\n self.number_available = ''\n self.description = ''\n self.review_rating = ''\n self.image_url = ''\n self.tax = ''\n\n def scrap(self):\n book = requests.get(self.url)\n soup = BeautifulSoup(book.content, 'html.parser')\n self.__fill_title(soup)\n self.__fill_category(soup)\n self.__fill_upc(soup)\n self.__fill_price_including_tax(soup)\n self.__fill_price_excluding_tax(soup)\n self.__fill_number_available(soup)\n self.__fill_description(soup)\n self.__fill_review_rating(soup)\n self.__fill_image_url(soup)\n self.__fill_tax(soup)\n <mask token>\n\n def __fill_category(self, soup):\n category = soup.findAll('li')\n category2 = category[2].text\n self.category = category2.replace('\\n', '')\n\n def __fill_upc(self, soup):\n tds = soup.findAll('td')\n self.upc = tds[0].text\n\n def __fill_price_including_tax(self, soup):\n tds = soup.findAll('td')\n self.price_including_tax = tds[3].text\n\n def __fill_price_excluding_tax(self, soup):\n tds = soup.findAll('td')\n self.price_excluding_tax = tds[2].text\n\n def __fill_number_available(self, soup):\n tds = soup.findAll('td')\n self.number_available = tds[5].text\n\n def __fill_description(self, soup):\n div = soup.find('div', class_='sub-header')\n p = div.find_next_sibling()\n self.description = p.text\n\n def __fill_review_rating(self, soup):\n p = soup.find('div', {'class': 'col-sm-6 product_main'}).find('p',\n class_='star-rating')\n rating = str(p['class'])\n star = rating[15:-1]\n star_rating = eval(star)\n return star_rating\n\n def __fill_image_url(self, soup):\n image = soup.find('div', {'class': 'item active'}).find('img')\n image_url = image['src']\n image_clean_url = image_url.replace('../../',\n 'http://books.toscrape.com/')\n self.image_url = image_clean_url\n\n def __fill_tax(self, soup):\n tds = soup.findAll('td')\n self.tax = tds[4].text\n\n def __str__(self):\n output = f\"\"\"url : {self.url} \ntitle : {self.title} \ncategory : {self.category} \nupc : {self.upc} \nprice_including_tax : {self.price_including_tax} \nprice_excluding_tax : {self.price_excluding_tax} \nnumber_available : {self.number_available} \ndescription : {self.description} \nreview_rating : {self.review_rating} \nimage_url : {self.image_url} \ntax : {self.tax} \"\"\"\n return output\n",
"step-4": "import requests\nfrom bs4 import BeautifulSoup\n\n\nclass Book:\n\n def __init__(self, url):\n self.url = url\n self.title = ''\n self.category = ''\n self.upc = ''\n self.price_including_tax = ''\n self.price_excluding_tax = ''\n self.number_available = ''\n self.description = ''\n self.review_rating = ''\n self.image_url = ''\n self.tax = ''\n\n def scrap(self):\n book = requests.get(self.url)\n soup = BeautifulSoup(book.content, 'html.parser')\n self.__fill_title(soup)\n self.__fill_category(soup)\n self.__fill_upc(soup)\n self.__fill_price_including_tax(soup)\n self.__fill_price_excluding_tax(soup)\n self.__fill_number_available(soup)\n self.__fill_description(soup)\n self.__fill_review_rating(soup)\n self.__fill_image_url(soup)\n self.__fill_tax(soup)\n\n def __fill_title(self, soup):\n title = soup.find('div', {'class': 'col-sm-6 product_main'}).find('h1')\n self.title = title.text\n\n def __fill_category(self, soup):\n category = soup.findAll('li')\n category2 = category[2].text\n self.category = category2.replace('\\n', '')\n\n def __fill_upc(self, soup):\n tds = soup.findAll('td')\n self.upc = tds[0].text\n\n def __fill_price_including_tax(self, soup):\n tds = soup.findAll('td')\n self.price_including_tax = tds[3].text\n\n def __fill_price_excluding_tax(self, soup):\n tds = soup.findAll('td')\n self.price_excluding_tax = tds[2].text\n\n def __fill_number_available(self, soup):\n tds = soup.findAll('td')\n self.number_available = tds[5].text\n\n def __fill_description(self, soup):\n div = soup.find('div', class_='sub-header')\n p = div.find_next_sibling()\n self.description = p.text\n\n def __fill_review_rating(self, soup):\n p = soup.find('div', {'class': 'col-sm-6 product_main'}).find('p',\n class_='star-rating')\n rating = str(p['class'])\n star = rating[15:-1]\n star_rating = eval(star)\n return star_rating\n\n def __fill_image_url(self, soup):\n image = soup.find('div', {'class': 'item active'}).find('img')\n image_url = image['src']\n image_clean_url = image_url.replace('../../',\n 'http://books.toscrape.com/')\n self.image_url = image_clean_url\n\n def __fill_tax(self, soup):\n tds = soup.findAll('td')\n self.tax = tds[4].text\n\n def __str__(self):\n output = f\"\"\"url : {self.url} \ntitle : {self.title} \ncategory : {self.category} \nupc : {self.upc} \nprice_including_tax : {self.price_including_tax} \nprice_excluding_tax : {self.price_excluding_tax} \nnumber_available : {self.number_available} \ndescription : {self.description} \nreview_rating : {self.review_rating} \nimage_url : {self.image_url} \ntax : {self.tax} \"\"\"\n return output\n",
"step-5": "import requests\nfrom bs4 import BeautifulSoup\n\nclass Book:\n\n def __init__(self, url):\n self.url = url\n self.title = \"\"\n self.category = \"\"\n self.upc=\"\"\n self.price_including_tax=\"\"\n self.price_excluding_tax=\"\"\n self.number_available=\"\"\n self.description=\"\"\n self.review_rating=\"\"\n self.image_url=\"\"\n self.tax=\"\"\n \n def scrap(self): \n book = requests.get(self.url) \n soup = BeautifulSoup(book.content, \"html.parser\")\n self.__fill_title(soup) \n self.__fill_category(soup)\n self.__fill_upc(soup)\n self.__fill_price_including_tax(soup)\n self.__fill_price_excluding_tax(soup)\n self.__fill_number_available(soup)\n self.__fill_description(soup)\n self.__fill_review_rating(soup)\n self.__fill_image_url(soup)\n self.__fill_tax(soup)\n \n def __fill_title(self,soup): \n title = soup.find(\"div\", {\"class\": \"col-sm-6 product_main\"}).find(\"h1\")\n self.title= title.text\n # return self.title\n \n \n def __fill_category(self,soup):\n category = soup.findAll(\"li\")\n category2 = category[2].text\n self.category = category2.replace(\"\\n\", \"\")\n # return self.category\n\n def __fill_upc(self,soup):\n tds = soup.findAll(\"td\")\n self.upc = tds[0].text\n\n def __fill_price_including_tax(self,soup):\n tds = soup.findAll(\"td\")\n self.price_including_tax = tds[3].text\n\n def __fill_price_excluding_tax(self,soup):\n tds = soup.findAll(\"td\")\n self.price_excluding_tax = tds[2].text\n\n def __fill_number_available(self,soup):\n tds = soup.findAll(\"td\")\n self.number_available = tds[5].text\n\n def __fill_description(self,soup):\n div = soup.find(\"div\", class_=\"sub-header\")\n p = div.find_next_sibling()\n self.description = p.text\n # return self.description\n\n def __fill_review_rating(self,soup):\n p = soup.find(\"div\", {\"class\": \"col-sm-6 product_main\"}).find(\n \"p\", class_=\"star-rating\"\n )\n rating = str(p[\"class\"])\n star = rating[15:-1]\n star_rating = eval(star)\n return star_rating\n \n\n def __fill_image_url(self,soup):\n image = soup.find(\"div\", {\"class\": \"item active\"}).find(\"img\")\n image_url = image[\"src\"]\n image_clean_url = image_url.replace(\"../../\", \"http://books.toscrape.com/\")\n self.image_url = image_clean_url\n\n def __fill_tax(self,soup):\n tds = soup.findAll(\"td\")\n self.tax = tds[4].text\n\n def __str__(self):\n output = f\"url : {self.url} \\ntitle : {self.title} \\ncategory : {self.category} \\nupc : {self.upc} \\nprice_including_tax : {self.price_including_tax} \\nprice_excluding_tax : {self.price_excluding_tax} \\nnumber_available : {self.number_available} \\ndescription : {self.description} \\nreview_rating : {self.review_rating} \\nimage_url : {self.image_url} \\ntax : {self.tax} \"\n return output\n \n \n\n \n# book = Book(\"http://books.toscrape.com/catalogue/a-light-in-the-attic_1000/index.html\")\n# book.scrap(\"http://books.toscrape.com/catalogue/a-light-in-the-attic_1000/index.html\")\n# print(book)\n",
"step-ids": [
11,
12,
13,
15,
16
]
}
|
[
11,
12,
13,
15,
16
] |
import numpy as np
a = np.ones((3,4))
b = np.ones((4,1))
# a.shape = (3,4)
# b.shape = (4,1)
c = np.zeros_like(a)
for i in range(3):
for j in range(4):
c[i][j] = a[i][j] + b[j]
print(c)
d = a+b.T
print(d)
|
normal
|
{
"blob_id": "d6213698423902771011caf6b5206dd4e3b27450",
"index": 5753,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(3):\n for j in range(4):\n c[i][j] = a[i][j] + b[j]\nprint(c)\n<mask token>\nprint(d)\n",
"step-3": "<mask token>\na = np.ones((3, 4))\nb = np.ones((4, 1))\nc = np.zeros_like(a)\nfor i in range(3):\n for j in range(4):\n c[i][j] = a[i][j] + b[j]\nprint(c)\nd = a + b.T\nprint(d)\n",
"step-4": "import numpy as np\na = np.ones((3, 4))\nb = np.ones((4, 1))\nc = np.zeros_like(a)\nfor i in range(3):\n for j in range(4):\n c[i][j] = a[i][j] + b[j]\nprint(c)\nd = a + b.T\nprint(d)\n",
"step-5": "import numpy as np\n\na = np.ones((3,4))\nb = np.ones((4,1))\n# a.shape = (3,4)\n# b.shape = (4,1)\n\nc = np.zeros_like(a)\n\nfor i in range(3):\n for j in range(4):\n c[i][j] = a[i][j] + b[j]\n\nprint(c)\n\nd = a+b.T\nprint(d)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class IndexSetter(BaseEstimator, TransformerMixin):
""" Set index """
def __init__(self, index_cols, drop_existing):
self.index_cols = index_cols
self.drop_existing = drop_existing
def fit(self, X, y=None):
return self
def transform(self, X):
X = X.copy()
X.reset_index(drop=self.drop_existing, inplace=True)
X.set_index(self.index_cols, inplace=True)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X.head())
print(X.shape)
return X
class SubTimeSeriesSampler(BaseEstimator, TransformerMixin):
""" Samples a sub-series of length t <= the original series of length T. Assumes series is in columns
Original time-series time labels (column headers) are replaced with t_0, t_1, ... t_<series_len>.
"""
def __init__(self, series_len, num_reps):
self.series_len = series_len
self.num_reps = num_reps
def fit(self, X, y=None):
return self
def transform(self, X):
curr_len = X.shape[1]
if curr_len < self.series_len:
raise Exception(
f'Error sampling series. Target length {self.series_len} exceeds current length {curr_len}'
)
sampled_data = []
data_arr = X.values
for _ in range(self.num_reps):
for i in range(data_arr.shape[0]):
rand_idx = np.random.randint(0, curr_len - self.series_len)
sampled_data.append(data_arr[i, rand_idx:rand_idx + self.
series_len])
idx = list(X.index) * self.num_reps
col_names = [f't_{i}' for i in range(self.series_len)]
sampled_data = pd.DataFrame(sampled_data, columns=col_names, index=idx)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(sampled_data.head())
print(sampled_data.shape)
return sampled_data
class AddLeftRightFlipper(BaseEstimator, TransformerMixin):
"""
Adds left right flipped version of tensor
"""
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
X_flipped = pd.DataFrame(np.fliplr(X), columns=X.columns, index=X.index
)
X = pd.concat([X, X_flipped], axis=0, ignore_index=True)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X.head())
print(X.shape)
return X
class SeriesLengthTrimmer(BaseEstimator, TransformerMixin):
"""
Trims the length of a series to use latest data points
"""
def __init__(self, series_len):
self.series_len = series_len
def fit(self, X, y=None):
return self
def transform(self, X):
curr_len = X.shape[1]
if curr_len < self.series_len:
raise Exception(
f'Error trimming series. Target length {self.series_len} exceeds current length {curr_len}'
)
X_vals = X.values[:, -self.series_len:]
col_names = [f't_{i}' for i in range(self.series_len)]
X_vals = pd.DataFrame(X_vals, columns=col_names, index=X.index)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X_vals.head())
print(X_vals.shape)
return X_vals
class DFShuffler(BaseEstimator, TransformerMixin):
def __init__(self, shuffle=True):
self.shuffle = shuffle
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if self.shuffle == False:
return X
X = X.sample(frac=1)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X.head())
print(X.shape)
return X
class TSMinMaxScaler2(BaseEstimator, TransformerMixin):
"""Scales history and forecast parts of time-series based on history data"""
def __init__(self, scaling_len, upper_bound=5.0):
if scaling_len < 2:
raise Exception('Min Max scaling length must be >= 2')
self.scaling_len = scaling_len
self.max_scaler = MinMaxScaler()
self.row_sums = None
self.upper_bound = upper_bound
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
curr_len = X.shape[1]
if curr_len < self.scaling_len:
msg = f""" Error scaling series.
Sum of scaling_len {self.scaling_len} should not exceed series length {curr_len}. """
raise Exception(msg)
df = X if curr_len == self.scaling_len else X[X.columns[:self.
scaling_len]]
self.row_sums = df.sum(axis=1)
df = df[self.row_sums != 0]
self.max_scaler.fit(df.T)
X_filtered = X[self.row_sums != 0].copy()
vals = self.max_scaler.transform(X_filtered.T).T
vals = np.where(vals > self.upper_bound, self.upper_bound, vals)
X = pd.DataFrame(vals, columns=X_filtered.columns, index=X_filtered
.index)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X.head())
print(X.shape)
return X
def inverse_transform(self, X):
return self.max_scaler.inverse_transform(X.T).T
class TSMinMaxScaler(BaseEstimator, TransformerMixin):
"""Scales history and forecast parts of time-series based on history data"""
def __init__(self, scaling_len, upper_bound=5.0):
if scaling_len < 2:
raise Exception('Min Max scaling length must be >= 2')
self.scaling_len = scaling_len
self.min_vals = None
self.max_vals = None
self.ranges = None
self.upper_bound = upper_bound
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if self.scaling_len < 1:
msg = f""" Error scaling series.
scaling_len needs to be at least 2. Given length is {self.scaling_len}. """
raise Exception(msg)
X_vals = X.values
self.min_vals = np.expand_dims(X_vals[:, :self.scaling_len].min(
axis=1), axis=1)
self.max_vals = np.expand_dims(X_vals[:, :self.scaling_len].max(
axis=1), axis=1)
self.ranges = self.max_vals - self.min_vals
self.ranges = np.where(self.ranges == 0, 1e-05, self.ranges)
X_vals = X_vals - self.min_vals
X_vals = np.divide(X_vals, self.ranges)
X_vals = np.where(X_vals < self.upper_bound, X_vals, self.upper_bound)
X = pd.DataFrame(X_vals, columns=X.columns, index=X.index)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X.head())
print(X.shape)
return X
def inverse_transform(self, X):
X = X * self.ranges
X = X + self.min_vals
return X
class TimeSeriesXYSplitter(BaseEstimator, TransformerMixin):
"""Splits the time series into X (history) and Y (forecast) series"""
def __init__(self, X_len, Y_len):
self.X_len = X_len
self.Y_len = Y_len
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
curr_len = X.shape[1]
encode_len = self.X_len
decode_len = 0 if self.Y_len == 'auto' else self.Y_len
if curr_len < encode_len + decode_len:
msg = f""" Error splitting series.
Sum of X_len {self.X_len} and Y_len {self.Y_len} should not exceed series length {curr_len}. """
raise Exception(msg)
cols = X.columns
if self.Y_len == 'auto':
return {'X': X[cols[-self.X_len:]], 'Y': X[cols[-self.X_len:]]}
if self.Y_len == 0:
return {'X': X[cols[-self.X_len:]], 'Y': pd.DataFrame()}
return {'X': X[cols[-(self.X_len + self.Y_len):-self.Y_len]], 'Y':
X[cols[-self.Y_len:]]}
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DataPivoter(BaseEstimator, TransformerMixin):
<|reserved_special_token_0|>
def __init__(self, non_pivoted_columns, pivoting_column,
pivoted_columns, fill_na_val):
super().__init__()
self.non_pivoted_columns = [non_pivoted_columns] if not isinstance(
non_pivoted_columns, list) else non_pivoted_columns
self.pivoted_columns = [pivoted_columns] if not isinstance(
pivoted_columns, list) else pivoted_columns
self.pivoting_column = pivoting_column
self.fill_na_val = fill_na_val
def fit(self, X, y=None):
return self
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class IndexSetter(BaseEstimator, TransformerMixin):
""" Set index """
def __init__(self, index_cols, drop_existing):
self.index_cols = index_cols
self.drop_existing = drop_existing
def fit(self, X, y=None):
return self
def transform(self, X):
X = X.copy()
X.reset_index(drop=self.drop_existing, inplace=True)
X.set_index(self.index_cols, inplace=True)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X.head())
print(X.shape)
return X
class SubTimeSeriesSampler(BaseEstimator, TransformerMixin):
""" Samples a sub-series of length t <= the original series of length T. Assumes series is in columns
Original time-series time labels (column headers) are replaced with t_0, t_1, ... t_<series_len>.
"""
def __init__(self, series_len, num_reps):
self.series_len = series_len
self.num_reps = num_reps
def fit(self, X, y=None):
return self
def transform(self, X):
curr_len = X.shape[1]
if curr_len < self.series_len:
raise Exception(
f'Error sampling series. Target length {self.series_len} exceeds current length {curr_len}'
)
sampled_data = []
data_arr = X.values
for _ in range(self.num_reps):
for i in range(data_arr.shape[0]):
rand_idx = np.random.randint(0, curr_len - self.series_len)
sampled_data.append(data_arr[i, rand_idx:rand_idx + self.
series_len])
idx = list(X.index) * self.num_reps
col_names = [f't_{i}' for i in range(self.series_len)]
sampled_data = pd.DataFrame(sampled_data, columns=col_names, index=idx)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(sampled_data.head())
print(sampled_data.shape)
return sampled_data
class AddLeftRightFlipper(BaseEstimator, TransformerMixin):
"""
Adds left right flipped version of tensor
"""
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
X_flipped = pd.DataFrame(np.fliplr(X), columns=X.columns, index=X.index
)
X = pd.concat([X, X_flipped], axis=0, ignore_index=True)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X.head())
print(X.shape)
return X
class SeriesLengthTrimmer(BaseEstimator, TransformerMixin):
"""
Trims the length of a series to use latest data points
"""
def __init__(self, series_len):
self.series_len = series_len
def fit(self, X, y=None):
return self
def transform(self, X):
curr_len = X.shape[1]
if curr_len < self.series_len:
raise Exception(
f'Error trimming series. Target length {self.series_len} exceeds current length {curr_len}'
)
X_vals = X.values[:, -self.series_len:]
col_names = [f't_{i}' for i in range(self.series_len)]
X_vals = pd.DataFrame(X_vals, columns=col_names, index=X.index)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X_vals.head())
print(X_vals.shape)
return X_vals
class DFShuffler(BaseEstimator, TransformerMixin):
def __init__(self, shuffle=True):
self.shuffle = shuffle
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if self.shuffle == False:
return X
X = X.sample(frac=1)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X.head())
print(X.shape)
return X
class TSMinMaxScaler2(BaseEstimator, TransformerMixin):
"""Scales history and forecast parts of time-series based on history data"""
def __init__(self, scaling_len, upper_bound=5.0):
if scaling_len < 2:
raise Exception('Min Max scaling length must be >= 2')
self.scaling_len = scaling_len
self.max_scaler = MinMaxScaler()
self.row_sums = None
self.upper_bound = upper_bound
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
curr_len = X.shape[1]
if curr_len < self.scaling_len:
msg = f""" Error scaling series.
Sum of scaling_len {self.scaling_len} should not exceed series length {curr_len}. """
raise Exception(msg)
df = X if curr_len == self.scaling_len else X[X.columns[:self.
scaling_len]]
self.row_sums = df.sum(axis=1)
df = df[self.row_sums != 0]
self.max_scaler.fit(df.T)
X_filtered = X[self.row_sums != 0].copy()
vals = self.max_scaler.transform(X_filtered.T).T
vals = np.where(vals > self.upper_bound, self.upper_bound, vals)
X = pd.DataFrame(vals, columns=X_filtered.columns, index=X_filtered
.index)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X.head())
print(X.shape)
return X
def inverse_transform(self, X):
return self.max_scaler.inverse_transform(X.T).T
class TSMinMaxScaler(BaseEstimator, TransformerMixin):
"""Scales history and forecast parts of time-series based on history data"""
def __init__(self, scaling_len, upper_bound=5.0):
if scaling_len < 2:
raise Exception('Min Max scaling length must be >= 2')
self.scaling_len = scaling_len
self.min_vals = None
self.max_vals = None
self.ranges = None
self.upper_bound = upper_bound
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if self.scaling_len < 1:
msg = f""" Error scaling series.
scaling_len needs to be at least 2. Given length is {self.scaling_len}. """
raise Exception(msg)
X_vals = X.values
self.min_vals = np.expand_dims(X_vals[:, :self.scaling_len].min(
axis=1), axis=1)
self.max_vals = np.expand_dims(X_vals[:, :self.scaling_len].max(
axis=1), axis=1)
self.ranges = self.max_vals - self.min_vals
self.ranges = np.where(self.ranges == 0, 1e-05, self.ranges)
X_vals = X_vals - self.min_vals
X_vals = np.divide(X_vals, self.ranges)
X_vals = np.where(X_vals < self.upper_bound, X_vals, self.upper_bound)
X = pd.DataFrame(X_vals, columns=X.columns, index=X.index)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X.head())
print(X.shape)
return X
def inverse_transform(self, X):
X = X * self.ranges
X = X + self.min_vals
return X
class TimeSeriesXYSplitter(BaseEstimator, TransformerMixin):
"""Splits the time series into X (history) and Y (forecast) series"""
def __init__(self, X_len, Y_len):
self.X_len = X_len
self.Y_len = Y_len
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
curr_len = X.shape[1]
encode_len = self.X_len
decode_len = 0 if self.Y_len == 'auto' else self.Y_len
if curr_len < encode_len + decode_len:
msg = f""" Error splitting series.
Sum of X_len {self.X_len} and Y_len {self.Y_len} should not exceed series length {curr_len}. """
raise Exception(msg)
cols = X.columns
if self.Y_len == 'auto':
return {'X': X[cols[-self.X_len:]], 'Y': X[cols[-self.X_len:]]}
if self.Y_len == 0:
return {'X': X[cols[-self.X_len:]], 'Y': pd.DataFrame()}
return {'X': X[cols[-(self.X_len + self.Y_len):-self.Y_len]], 'Y':
X[cols[-self.Y_len:]]}
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MissingTimeIntervalFiller(BaseEstimator, TransformerMixin):
<|reserved_special_token_0|>
DAYS = 'days'
MINUTES = 'minutes'
HOURS = 'hours'
def __init__(self, id_columns, time_column, value_columns, time_unit,
step_size):
super().__init__()
if not isinstance(id_columns, list):
self.id_columns = [id_columns]
else:
self.id_columns = id_columns
self.time_column = time_column
if not isinstance(value_columns, list):
self.value_columns = [value_columns]
else:
self.value_columns = value_columns
self.time_unit = time_unit
self.step_size = int(step_size)
def fit(self, X, y=None):
return self
def transform(self, X):
min_time = X[self.time_column].min()
max_time = X[self.time_column].max()
if self.time_unit == MissingTimeIntervalFiller.DAYS:
num_steps = (max_time - min_time).days // self.step_size + 1
all_time_ints = [(min_time + timedelta(days=x * self.step_size)
) for x in range(num_steps)]
elif self.time_unit == MissingTimeIntervalFiller.HOURS:
time_diff_sec = (max_time - min_time).total_seconds()
num_steps = int(time_diff_sec // (3600 * self.step_size)) + 1
num_steps = (max_time - min_time).days + 1
all_time_ints = [(min_time + timedelta(hours=x * self.step_size
)) for x in range(num_steps)]
elif self.time_unit == MissingTimeIntervalFiller.MINUTES:
time_diff_sec = (max_time - min_time).total_seconds()
num_steps = int(time_diff_sec // (60 * self.step_size)) + 1
all_time_ints = [(min_time + timedelta(minutes=x * self.
step_size)) for x in range(num_steps)]
else:
raise Exception(
f"Unrecognized time unit: {self.time_unit}. Must be one of ['days', 'hours', 'minutes']."
)
full_intervals_df = pd.DataFrame(data=all_time_ints, columns=[self.
time_column])
id_cols_df = X[self.id_columns].drop_duplicates()
full_df = id_cols_df.assign(foo=1).merge(full_intervals_df.assign(
foo=1)).drop('foo', 1)
full_df = full_df.merge(X[self.id_columns + [self.time_column] +
self.value_columns], on=self.id_columns + [self.time_column],
how='left')
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(full_df.head())
print(full_df.shape)
return full_df
class DataPivoter(BaseEstimator, TransformerMixin):
""" Pivots a dataframe with a given column """
def __init__(self, non_pivoted_columns, pivoting_column,
pivoted_columns, fill_na_val):
super().__init__()
self.non_pivoted_columns = [non_pivoted_columns] if not isinstance(
non_pivoted_columns, list) else non_pivoted_columns
self.pivoted_columns = [pivoted_columns] if not isinstance(
pivoted_columns, list) else pivoted_columns
self.pivoting_column = pivoting_column
self.fill_na_val = fill_na_val
def fit(self, X, y=None):
return self
def transform(self, X):
processed_X = X.pivot_table(index=self.non_pivoted_columns, aggfunc
=sum, columns=self.pivoting_column, values=self.pivoted_columns,
fill_value=self.fill_na_val).reset_index()
processed_X.columns = [(col[0] if col[1] == '' else col[1]) for col in
processed_X.columns]
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(processed_X.head())
print(processed_X.shape)
return processed_X
def inverse_transform(self, preds_df):
preds_df2 = pd.melt(preds_df.reset_index(), id_vars=self.
non_pivoted_columns, value_vars=preds_df.columns, var_name=self
.pivoting_column, value_name=self.pivoted_columns[0])
return preds_df2
class IndexSetter(BaseEstimator, TransformerMixin):
""" Set index """
def __init__(self, index_cols, drop_existing):
self.index_cols = index_cols
self.drop_existing = drop_existing
def fit(self, X, y=None):
return self
def transform(self, X):
X = X.copy()
X.reset_index(drop=self.drop_existing, inplace=True)
X.set_index(self.index_cols, inplace=True)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X.head())
print(X.shape)
return X
class SubTimeSeriesSampler(BaseEstimator, TransformerMixin):
""" Samples a sub-series of length t <= the original series of length T. Assumes series is in columns
Original time-series time labels (column headers) are replaced with t_0, t_1, ... t_<series_len>.
"""
def __init__(self, series_len, num_reps):
self.series_len = series_len
self.num_reps = num_reps
def fit(self, X, y=None):
return self
def transform(self, X):
curr_len = X.shape[1]
if curr_len < self.series_len:
raise Exception(
f'Error sampling series. Target length {self.series_len} exceeds current length {curr_len}'
)
sampled_data = []
data_arr = X.values
for _ in range(self.num_reps):
for i in range(data_arr.shape[0]):
rand_idx = np.random.randint(0, curr_len - self.series_len)
sampled_data.append(data_arr[i, rand_idx:rand_idx + self.
series_len])
idx = list(X.index) * self.num_reps
col_names = [f't_{i}' for i in range(self.series_len)]
sampled_data = pd.DataFrame(sampled_data, columns=col_names, index=idx)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(sampled_data.head())
print(sampled_data.shape)
return sampled_data
class AddLeftRightFlipper(BaseEstimator, TransformerMixin):
"""
Adds left right flipped version of tensor
"""
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
X_flipped = pd.DataFrame(np.fliplr(X), columns=X.columns, index=X.index
)
X = pd.concat([X, X_flipped], axis=0, ignore_index=True)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X.head())
print(X.shape)
return X
class SeriesLengthTrimmer(BaseEstimator, TransformerMixin):
"""
Trims the length of a series to use latest data points
"""
def __init__(self, series_len):
self.series_len = series_len
def fit(self, X, y=None):
return self
def transform(self, X):
curr_len = X.shape[1]
if curr_len < self.series_len:
raise Exception(
f'Error trimming series. Target length {self.series_len} exceeds current length {curr_len}'
)
X_vals = X.values[:, -self.series_len:]
col_names = [f't_{i}' for i in range(self.series_len)]
X_vals = pd.DataFrame(X_vals, columns=col_names, index=X.index)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X_vals.head())
print(X_vals.shape)
return X_vals
class DFShuffler(BaseEstimator, TransformerMixin):
def __init__(self, shuffle=True):
self.shuffle = shuffle
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if self.shuffle == False:
return X
X = X.sample(frac=1)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X.head())
print(X.shape)
return X
class TSMinMaxScaler2(BaseEstimator, TransformerMixin):
"""Scales history and forecast parts of time-series based on history data"""
def __init__(self, scaling_len, upper_bound=5.0):
if scaling_len < 2:
raise Exception('Min Max scaling length must be >= 2')
self.scaling_len = scaling_len
self.max_scaler = MinMaxScaler()
self.row_sums = None
self.upper_bound = upper_bound
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
curr_len = X.shape[1]
if curr_len < self.scaling_len:
msg = f""" Error scaling series.
Sum of scaling_len {self.scaling_len} should not exceed series length {curr_len}. """
raise Exception(msg)
df = X if curr_len == self.scaling_len else X[X.columns[:self.
scaling_len]]
self.row_sums = df.sum(axis=1)
df = df[self.row_sums != 0]
self.max_scaler.fit(df.T)
X_filtered = X[self.row_sums != 0].copy()
vals = self.max_scaler.transform(X_filtered.T).T
vals = np.where(vals > self.upper_bound, self.upper_bound, vals)
X = pd.DataFrame(vals, columns=X_filtered.columns, index=X_filtered
.index)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X.head())
print(X.shape)
return X
def inverse_transform(self, X):
return self.max_scaler.inverse_transform(X.T).T
class TSMinMaxScaler(BaseEstimator, TransformerMixin):
"""Scales history and forecast parts of time-series based on history data"""
def __init__(self, scaling_len, upper_bound=5.0):
if scaling_len < 2:
raise Exception('Min Max scaling length must be >= 2')
self.scaling_len = scaling_len
self.min_vals = None
self.max_vals = None
self.ranges = None
self.upper_bound = upper_bound
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if self.scaling_len < 1:
msg = f""" Error scaling series.
scaling_len needs to be at least 2. Given length is {self.scaling_len}. """
raise Exception(msg)
X_vals = X.values
self.min_vals = np.expand_dims(X_vals[:, :self.scaling_len].min(
axis=1), axis=1)
self.max_vals = np.expand_dims(X_vals[:, :self.scaling_len].max(
axis=1), axis=1)
self.ranges = self.max_vals - self.min_vals
self.ranges = np.where(self.ranges == 0, 1e-05, self.ranges)
X_vals = X_vals - self.min_vals
X_vals = np.divide(X_vals, self.ranges)
X_vals = np.where(X_vals < self.upper_bound, X_vals, self.upper_bound)
X = pd.DataFrame(X_vals, columns=X.columns, index=X.index)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X.head())
print(X.shape)
return X
def inverse_transform(self, X):
X = X * self.ranges
X = X + self.min_vals
return X
class TimeSeriesXYSplitter(BaseEstimator, TransformerMixin):
"""Splits the time series into X (history) and Y (forecast) series"""
def __init__(self, X_len, Y_len):
self.X_len = X_len
self.Y_len = Y_len
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
curr_len = X.shape[1]
encode_len = self.X_len
decode_len = 0 if self.Y_len == 'auto' else self.Y_len
if curr_len < encode_len + decode_len:
msg = f""" Error splitting series.
Sum of X_len {self.X_len} and Y_len {self.Y_len} should not exceed series length {curr_len}. """
raise Exception(msg)
cols = X.columns
if self.Y_len == 'auto':
return {'X': X[cols[-self.X_len:]], 'Y': X[cols[-self.X_len:]]}
if self.Y_len == 0:
return {'X': X[cols[-self.X_len:]], 'Y': pd.DataFrame()}
return {'X': X[cols[-(self.X_len + self.Y_len):-self.Y_len]], 'Y':
X[cols[-self.Y_len:]]}
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DailyAggregator(BaseEstimator, TransformerMixin):
<|reserved_special_token_0|>
def __init__(self, id_columns, time_column, value_columns):
super().__init__()
if not isinstance(id_columns, list):
self.id_columns = [id_columns]
else:
self.id_columns = id_columns
self.time_column = time_column
if not isinstance(value_columns, list):
self.value_columns = [value_columns]
else:
self.value_columns = value_columns
<|reserved_special_token_0|>
def transform(self, X):
X = X.copy()
X[self.time_column] = X[self.time_column].dt.normalize()
X = X.groupby(by=self.id_columns + [self.time_column], as_index=False)[
self.value_columns].sum()
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X.head())
print(X.shape)
return X
class MissingTimeIntervalFiller(BaseEstimator, TransformerMixin):
""" Adds missing time intervals in a time-series dataframe. """
DAYS = 'days'
MINUTES = 'minutes'
HOURS = 'hours'
def __init__(self, id_columns, time_column, value_columns, time_unit,
step_size):
super().__init__()
if not isinstance(id_columns, list):
self.id_columns = [id_columns]
else:
self.id_columns = id_columns
self.time_column = time_column
if not isinstance(value_columns, list):
self.value_columns = [value_columns]
else:
self.value_columns = value_columns
self.time_unit = time_unit
self.step_size = int(step_size)
def fit(self, X, y=None):
return self
def transform(self, X):
min_time = X[self.time_column].min()
max_time = X[self.time_column].max()
if self.time_unit == MissingTimeIntervalFiller.DAYS:
num_steps = (max_time - min_time).days // self.step_size + 1
all_time_ints = [(min_time + timedelta(days=x * self.step_size)
) for x in range(num_steps)]
elif self.time_unit == MissingTimeIntervalFiller.HOURS:
time_diff_sec = (max_time - min_time).total_seconds()
num_steps = int(time_diff_sec // (3600 * self.step_size)) + 1
num_steps = (max_time - min_time).days + 1
all_time_ints = [(min_time + timedelta(hours=x * self.step_size
)) for x in range(num_steps)]
elif self.time_unit == MissingTimeIntervalFiller.MINUTES:
time_diff_sec = (max_time - min_time).total_seconds()
num_steps = int(time_diff_sec // (60 * self.step_size)) + 1
all_time_ints = [(min_time + timedelta(minutes=x * self.
step_size)) for x in range(num_steps)]
else:
raise Exception(
f"Unrecognized time unit: {self.time_unit}. Must be one of ['days', 'hours', 'minutes']."
)
full_intervals_df = pd.DataFrame(data=all_time_ints, columns=[self.
time_column])
id_cols_df = X[self.id_columns].drop_duplicates()
full_df = id_cols_df.assign(foo=1).merge(full_intervals_df.assign(
foo=1)).drop('foo', 1)
full_df = full_df.merge(X[self.id_columns + [self.time_column] +
self.value_columns], on=self.id_columns + [self.time_column],
how='left')
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(full_df.head())
print(full_df.shape)
return full_df
class DataPivoter(BaseEstimator, TransformerMixin):
""" Pivots a dataframe with a given column """
def __init__(self, non_pivoted_columns, pivoting_column,
pivoted_columns, fill_na_val):
super().__init__()
self.non_pivoted_columns = [non_pivoted_columns] if not isinstance(
non_pivoted_columns, list) else non_pivoted_columns
self.pivoted_columns = [pivoted_columns] if not isinstance(
pivoted_columns, list) else pivoted_columns
self.pivoting_column = pivoting_column
self.fill_na_val = fill_na_val
def fit(self, X, y=None):
return self
def transform(self, X):
processed_X = X.pivot_table(index=self.non_pivoted_columns, aggfunc
=sum, columns=self.pivoting_column, values=self.pivoted_columns,
fill_value=self.fill_na_val).reset_index()
processed_X.columns = [(col[0] if col[1] == '' else col[1]) for col in
processed_X.columns]
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(processed_X.head())
print(processed_X.shape)
return processed_X
def inverse_transform(self, preds_df):
preds_df2 = pd.melt(preds_df.reset_index(), id_vars=self.
non_pivoted_columns, value_vars=preds_df.columns, var_name=self
.pivoting_column, value_name=self.pivoted_columns[0])
return preds_df2
class IndexSetter(BaseEstimator, TransformerMixin):
""" Set index """
def __init__(self, index_cols, drop_existing):
self.index_cols = index_cols
self.drop_existing = drop_existing
def fit(self, X, y=None):
return self
def transform(self, X):
X = X.copy()
X.reset_index(drop=self.drop_existing, inplace=True)
X.set_index(self.index_cols, inplace=True)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X.head())
print(X.shape)
return X
class SubTimeSeriesSampler(BaseEstimator, TransformerMixin):
""" Samples a sub-series of length t <= the original series of length T. Assumes series is in columns
Original time-series time labels (column headers) are replaced with t_0, t_1, ... t_<series_len>.
"""
def __init__(self, series_len, num_reps):
self.series_len = series_len
self.num_reps = num_reps
def fit(self, X, y=None):
return self
def transform(self, X):
curr_len = X.shape[1]
if curr_len < self.series_len:
raise Exception(
f'Error sampling series. Target length {self.series_len} exceeds current length {curr_len}'
)
sampled_data = []
data_arr = X.values
for _ in range(self.num_reps):
for i in range(data_arr.shape[0]):
rand_idx = np.random.randint(0, curr_len - self.series_len)
sampled_data.append(data_arr[i, rand_idx:rand_idx + self.
series_len])
idx = list(X.index) * self.num_reps
col_names = [f't_{i}' for i in range(self.series_len)]
sampled_data = pd.DataFrame(sampled_data, columns=col_names, index=idx)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(sampled_data.head())
print(sampled_data.shape)
return sampled_data
class AddLeftRightFlipper(BaseEstimator, TransformerMixin):
"""
Adds left right flipped version of tensor
"""
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
X_flipped = pd.DataFrame(np.fliplr(X), columns=X.columns, index=X.index
)
X = pd.concat([X, X_flipped], axis=0, ignore_index=True)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X.head())
print(X.shape)
return X
class SeriesLengthTrimmer(BaseEstimator, TransformerMixin):
"""
Trims the length of a series to use latest data points
"""
def __init__(self, series_len):
self.series_len = series_len
def fit(self, X, y=None):
return self
def transform(self, X):
curr_len = X.shape[1]
if curr_len < self.series_len:
raise Exception(
f'Error trimming series. Target length {self.series_len} exceeds current length {curr_len}'
)
X_vals = X.values[:, -self.series_len:]
col_names = [f't_{i}' for i in range(self.series_len)]
X_vals = pd.DataFrame(X_vals, columns=col_names, index=X.index)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X_vals.head())
print(X_vals.shape)
return X_vals
class DFShuffler(BaseEstimator, TransformerMixin):
def __init__(self, shuffle=True):
self.shuffle = shuffle
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if self.shuffle == False:
return X
X = X.sample(frac=1)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X.head())
print(X.shape)
return X
class TSMinMaxScaler2(BaseEstimator, TransformerMixin):
"""Scales history and forecast parts of time-series based on history data"""
def __init__(self, scaling_len, upper_bound=5.0):
if scaling_len < 2:
raise Exception('Min Max scaling length must be >= 2')
self.scaling_len = scaling_len
self.max_scaler = MinMaxScaler()
self.row_sums = None
self.upper_bound = upper_bound
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
curr_len = X.shape[1]
if curr_len < self.scaling_len:
msg = f""" Error scaling series.
Sum of scaling_len {self.scaling_len} should not exceed series length {curr_len}. """
raise Exception(msg)
df = X if curr_len == self.scaling_len else X[X.columns[:self.
scaling_len]]
self.row_sums = df.sum(axis=1)
df = df[self.row_sums != 0]
self.max_scaler.fit(df.T)
X_filtered = X[self.row_sums != 0].copy()
vals = self.max_scaler.transform(X_filtered.T).T
vals = np.where(vals > self.upper_bound, self.upper_bound, vals)
X = pd.DataFrame(vals, columns=X_filtered.columns, index=X_filtered
.index)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X.head())
print(X.shape)
return X
def inverse_transform(self, X):
return self.max_scaler.inverse_transform(X.T).T
class TSMinMaxScaler(BaseEstimator, TransformerMixin):
"""Scales history and forecast parts of time-series based on history data"""
def __init__(self, scaling_len, upper_bound=5.0):
if scaling_len < 2:
raise Exception('Min Max scaling length must be >= 2')
self.scaling_len = scaling_len
self.min_vals = None
self.max_vals = None
self.ranges = None
self.upper_bound = upper_bound
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if self.scaling_len < 1:
msg = f""" Error scaling series.
scaling_len needs to be at least 2. Given length is {self.scaling_len}. """
raise Exception(msg)
X_vals = X.values
self.min_vals = np.expand_dims(X_vals[:, :self.scaling_len].min(
axis=1), axis=1)
self.max_vals = np.expand_dims(X_vals[:, :self.scaling_len].max(
axis=1), axis=1)
self.ranges = self.max_vals - self.min_vals
self.ranges = np.where(self.ranges == 0, 1e-05, self.ranges)
X_vals = X_vals - self.min_vals
X_vals = np.divide(X_vals, self.ranges)
X_vals = np.where(X_vals < self.upper_bound, X_vals, self.upper_bound)
X = pd.DataFrame(X_vals, columns=X.columns, index=X.index)
if DEBUG:
print(f'-------after {__class__.__name__}------------')
print(X.head())
print(X.shape)
return X
def inverse_transform(self, X):
X = X * self.ranges
X = X + self.min_vals
return X
class TimeSeriesXYSplitter(BaseEstimator, TransformerMixin):
"""Splits the time series into X (history) and Y (forecast) series"""
def __init__(self, X_len, Y_len):
self.X_len = X_len
self.Y_len = Y_len
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
curr_len = X.shape[1]
encode_len = self.X_len
decode_len = 0 if self.Y_len == 'auto' else self.Y_len
if curr_len < encode_len + decode_len:
msg = f""" Error splitting series.
Sum of X_len {self.X_len} and Y_len {self.Y_len} should not exceed series length {curr_len}. """
raise Exception(msg)
cols = X.columns
if self.Y_len == 'auto':
return {'X': X[cols[-self.X_len:]], 'Y': X[cols[-self.X_len:]]}
if self.Y_len == 0:
return {'X': X[cols[-self.X_len:]], 'Y': pd.DataFrame()}
return {'X': X[cols[-(self.X_len + self.Y_len):-self.Y_len]], 'Y':
X[cols[-self.Y_len:]]}
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import numpy as np, pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.base import BaseEstimator, TransformerMixin
from datetime import timedelta
import sys
DEBUG = False
class DailyAggregator(BaseEstimator, TransformerMixin):
''' Aggregates time-series values to daily level. '''
def __init__(self, id_columns, time_column, value_columns ):
super().__init__()
if not isinstance(id_columns, list):
self.id_columns = [id_columns]
else:
self.id_columns = id_columns
self.time_column = time_column
if not isinstance(value_columns, list):
self.value_columns = [value_columns]
else:
self.value_columns = value_columns
def fit(self, X, y=None): return self
def transform(self, X):
X = X.copy()
X[self.time_column] = X[self.time_column].dt.normalize()
X = X.groupby(by=self.id_columns + [self.time_column], as_index=False)[self.value_columns].sum()
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(X.head())
print(X.shape)
return X
class MissingTimeIntervalFiller(BaseEstimator, TransformerMixin):
''' Adds missing time intervals in a time-series dataframe. '''
DAYS = 'days'
MINUTES = 'minutes'
HOURS = 'hours'
def __init__(self, id_columns, time_column, value_columns, time_unit, step_size ):
super().__init__()
if not isinstance(id_columns, list):
self.id_columns = [id_columns]
else:
self.id_columns = id_columns
self.time_column = time_column
if not isinstance(value_columns, list):
self.value_columns = [value_columns]
else:
self.value_columns = value_columns
self.time_unit = time_unit
self.step_size = int(step_size)
def fit(self, X, y=None): return self # do nothing in fit
def transform(self, X):
min_time = X[self.time_column].min()
max_time = X[self.time_column].max()
# print(min_time, max_time)
if self.time_unit == MissingTimeIntervalFiller.DAYS:
num_steps = ( (max_time - min_time).days // self.step_size ) + 1
all_time_ints = [min_time + timedelta(days=x*self.step_size) for x in range(num_steps)]
elif self.time_unit == MissingTimeIntervalFiller.HOURS:
time_diff_sec = (max_time - min_time).total_seconds()
num_steps = int(time_diff_sec // (3600 * self.step_size)) + 1
num_steps = (max_time - min_time).days + 1
all_time_ints = [min_time + timedelta(hours=x*self.step_size) for x in range(num_steps)]
elif self.time_unit == MissingTimeIntervalFiller.MINUTES:
time_diff_sec = (max_time - min_time).total_seconds()
num_steps = int(time_diff_sec // (60 * self.step_size)) + 1
# print('num_steps', num_steps)
all_time_ints = [min_time + timedelta(minutes=x*self.step_size) for x in range(num_steps)]
else:
raise Exception(f"Unrecognized time unit: {self.time_unit}. Must be one of ['days', 'hours', 'minutes'].")
# create df of all time intervals
full_intervals_df = pd.DataFrame(data = all_time_ints, columns = [self.time_column])
# get unique id-var values from original input data
id_cols_df = X[self.id_columns].drop_duplicates()
# get cross join of all time intervals and ids columns
full_df = id_cols_df.assign(foo=1).merge(full_intervals_df.assign(foo=1)).drop('foo', 1)
# merge original data on to this full table
full_df = full_df.merge(X[self.id_columns + [self.time_column] + self.value_columns],
on=self.id_columns + [self.time_column], how='left')
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(full_df.head())
print(full_df.shape)
return full_df
class DataPivoter(BaseEstimator, TransformerMixin):
''' Pivots a dataframe with a given column '''
def __init__(self, non_pivoted_columns, pivoting_column, pivoted_columns, fill_na_val):
super().__init__()
self.non_pivoted_columns = \
[non_pivoted_columns] if not isinstance(non_pivoted_columns, list) else non_pivoted_columns
self.pivoted_columns = [pivoted_columns] if not isinstance(pivoted_columns, list) else pivoted_columns
self.pivoting_column = pivoting_column
self.fill_na_val = fill_na_val
def fit(self, X, y=None): return self # do nothing in fit
def transform(self, X):
processed_X = X.pivot_table(index = self.non_pivoted_columns,
aggfunc=sum,
columns=self.pivoting_column,
values=self.pivoted_columns,
fill_value = self.fill_na_val
).reset_index()
# pivot table will result in multi column index. To get a regular column names
processed_X.columns = [ col[0] if col[1] == '' else col[1] for col in processed_X.columns ]
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(processed_X.head())
print(processed_X.shape)
return processed_X
def inverse_transform(self, preds_df):
# unpivot given dataframe
preds_df2 = pd.melt(preds_df.reset_index(),
id_vars=self.non_pivoted_columns,
value_vars=preds_df.columns,
var_name = self.pivoting_column,
value_name = self.pivoted_columns[0]
)
return preds_df2
class IndexSetter(BaseEstimator, TransformerMixin):
''' Set index '''
def __init__(self, index_cols, drop_existing):
self.index_cols = index_cols
self.drop_existing = drop_existing
def fit(self, X, y=None): return self # do nothing in fit
def transform(self, X):
X = X.copy()
X.reset_index(drop=self.drop_existing, inplace=True)
X.set_index(self.index_cols, inplace=True)
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(X.head())
print(X.shape)
return X
class SubTimeSeriesSampler(BaseEstimator, TransformerMixin):
''' Samples a sub-series of length t <= the original series of length T. Assumes series is in columns
Original time-series time labels (column headers) are replaced with t_0, t_1, ... t_<series_len>.
'''
def __init__(self, series_len, num_reps):
self.series_len = series_len
self.num_reps = num_reps
def fit(self, X, y=None): return self
def transform(self, X):
curr_len = X.shape[1]
if curr_len < self.series_len:
raise Exception(f"Error sampling series. Target length {self.series_len} exceeds current length {curr_len}")
sampled_data = []
data_arr = X.values
for _ in range(self.num_reps):
for i in range(data_arr.shape[0]):
rand_idx = np.random.randint(0, curr_len - self.series_len)
sampled_data.append( data_arr[i, rand_idx: rand_idx + self.series_len] )
idx = list(X.index) * self.num_reps
col_names = [ f't_{i}' for i in range(self.series_len)]
sampled_data = pd.DataFrame(sampled_data, columns=col_names, index= idx)
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(sampled_data.head())
print(sampled_data.shape)
return sampled_data
class AddLeftRightFlipper(BaseEstimator, TransformerMixin):
'''
Adds left right flipped version of tensor
'''
def __init__(self): pass
def fit(self, X, y=None): return self
def transform(self, X):
X_flipped = pd.DataFrame( np.fliplr(X), columns=X.columns, index=X.index )
X = pd.concat([X, X_flipped], axis=0, ignore_index=True)
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(X.head())
print(X.shape)
return X
class SeriesLengthTrimmer(BaseEstimator, TransformerMixin):
'''
Trims the length of a series to use latest data points
'''
def __init__(self, series_len):
self.series_len = series_len
def fit(self, X, y=None): return self
def transform(self, X):
curr_len = X.shape[1]
if curr_len < self.series_len:
raise Exception(f"Error trimming series. Target length {self.series_len} exceeds current length {curr_len}")
X_vals = X.values[:, -self.series_len:]
col_names = [ f't_{i}' for i in range(self.series_len)]
X_vals = pd.DataFrame(X_vals, columns=col_names, index=X.index)
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(X_vals.head())
print(X_vals.shape)
return X_vals
class DFShuffler(BaseEstimator, TransformerMixin):
def __init__(self, shuffle = True):
self.shuffle = shuffle
def fit(self, X, y=None): return self
def transform(self, X, y=None):
if self.shuffle == False: return X
X = X.sample(frac=1)
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(X.head())
print(X.shape)
return X
class TSMinMaxScaler2(BaseEstimator, TransformerMixin):
'''Scales history and forecast parts of time-series based on history data'''
def __init__(self, scaling_len, upper_bound = 5.):
if scaling_len < 2: raise Exception("Min Max scaling length must be >= 2")
self.scaling_len = scaling_len
self.max_scaler = MinMaxScaler()
self.row_sums = None
self.upper_bound = upper_bound
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
curr_len = X.shape[1]
if curr_len < self.scaling_len:
msg = f''' Error scaling series.
Sum of scaling_len {self.scaling_len} should not exceed series length {curr_len}. '''
raise Exception(msg)
df = X if curr_len == self.scaling_len else X[ X.columns[ : self.scaling_len ] ]
self.row_sums = df.sum(axis=1)
df = df[self.row_sums != 0]
self.max_scaler.fit(df.T)
# print(X.shape, self.row_sums.shape)
# sys.exit()
X_filtered = X[self.row_sums != 0].copy()
vals = self.max_scaler.transform(X_filtered.T).T
vals = np.where(vals > self.upper_bound, self.upper_bound, vals)
X = pd.DataFrame(vals, columns=X_filtered.columns, index=X_filtered.index)
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(X.head())
print(X.shape)
return X
def inverse_transform(self, X):
return self.max_scaler.inverse_transform(X.T).T
class TSMinMaxScaler(BaseEstimator, TransformerMixin):
'''Scales history and forecast parts of time-series based on history data'''
def __init__(self, scaling_len, upper_bound = 5.):
if scaling_len < 2: raise Exception("Min Max scaling length must be >= 2")
self.scaling_len = scaling_len
self.min_vals = None
self.max_vals = None
self.ranges = None
self.upper_bound = upper_bound
def fit(self, X, y=None): return self
def transform(self, X, y=None):
if self.scaling_len < 1:
msg = f''' Error scaling series.
scaling_len needs to be at least 2. Given length is {self.scaling_len}. '''
raise Exception(msg)
X_vals = X.values
self.min_vals = np.expand_dims( X_vals[ :, : self.scaling_len ].min(axis=1), axis = 1)
self.max_vals = np.expand_dims( X_vals[ :, : self.scaling_len ].max(axis=1), axis = 1)
self.ranges = self.max_vals - self.min_vals
self.ranges = np.where(self.ranges == 0, 1e-5, self.ranges)
# print(self.min_vals.shape, self.ranges.shape)
# sys.exit()
X_vals = X_vals - self.min_vals
X_vals = np.divide(X_vals, self.ranges)
X_vals = np.where( X_vals < self.upper_bound, X_vals, self.upper_bound)
X = pd.DataFrame(X_vals, columns=X.columns, index=X.index)
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(X.head())
print(X.shape)
return X
def inverse_transform(self, X):
X = X * self.ranges
X = X + self.min_vals
return X
class TimeSeriesXYSplitter(BaseEstimator, TransformerMixin):
'''Splits the time series into X (history) and Y (forecast) series'''
def __init__(self, X_len, Y_len):
self.X_len = X_len
self.Y_len = Y_len
def fit(self, X, y=None): return self
def transform(self, X, y=None):
curr_len = X.shape[1]
encode_len = self.X_len
decode_len = (0 if self.Y_len == 'auto' else self.Y_len)
if curr_len < encode_len + decode_len:
msg = f''' Error splitting series.
Sum of X_len {self.X_len} and Y_len {self.Y_len} should not exceed series length {curr_len}. '''
raise Exception(msg)
# bit of a hack but sklearn pipeline only allows one thing to be returned in transform()
cols = X.columns
if self.Y_len == 'auto': return { 'X': X[cols[-self.X_len :]], 'Y': X[cols[-self.X_len :]] }
if self.Y_len == 0: return { 'X': X[cols[-self.X_len :]], 'Y': pd.DataFrame() }
return {
'X': X[cols[-( self.X_len + self.Y_len) : -self.Y_len] ],
'Y':X[cols[ -self.Y_len : ] ]
}
if __name__ == "__main__":
# data = pd.read_parquet("wfm_single_q_Internal_daily_history.parquet")
# data = pd.read_parquet("WFM_200q_Internal_daily_history.parquet")
# data.rename(columns={ 'queueid': 'seriesid', 'date': 'ts', 'callvolume': 'v',}, inplace=True)
data = pd.read_parquet("History_series_0028C91B.002795_filled.parquet")
data.rename(columns={ 'queueid': 'seriesid', 'time': 'ts', 'callvolume': 'v',}, inplace=True)
data['ts'] = pd.to_datetime(data['ts'])
data = data[['seriesid', 'ts', 'v']]
hist_len = 365
fcst_len = 90
print("-----------orig data -------------------")
# print(data.head()); print(data.shape)
print("-----------after daily agg -------------------")
agg = DailyAggregator('seriesid', 'ts', 'v')
data = agg.fit_transform(data)
# print(data.head()); print(data.shape)
print("-----------after adding missing intervals -------------------")
filler = MissingTimeIntervalFiller('seriesid', 'ts', 'v', 'days', 1)
data = filler.fit_transform(data)
# print(data.head()); print(data.shape)
print("-----------after pivoting -------------------")
pivoter = DataPivoter('seriesid', 'v', 'ts', 0)
data = pivoter.fit_transform(data)
# print(data.head()); print(data.shape)
print("-----------after indexing -------------------")
indexer = IndexSetter('seriesid', drop_existing=True)
data = indexer.fit_transform(data)
# print(data.head()); print(data.shape)
print("-----------after sampling -------------------")
sampler = SubTimeSeriesSampler(series_len=hist_len+fcst_len, num_reps=5)
data = sampler.fit_transform(data)
# print(data.head()); print(data.shape)
print("-----------after shuffling -------------------")
shuffler = DFShuffler()
data = shuffler.fit_transform(data)
print(data.head()); print(data.shape)
print("-----------after max scaling -------------------")
scaler = TSMinMaxScaler(scaling_len=hist_len)
data = scaler.fit_transform(data)
print(data.head()); print(data.shape)
print("-----------after X Y split -------------------")
splitter = TimeSeriesXYSplitter(hist_len, fcst_len)
data = splitter.fit_transform(data)
print(data.keys())
print(data['X'])
print(data['Y'])
|
flexible
|
{
"blob_id": "9f7b1cfcc3c20910201fc67b5a641a5a89908bd1",
"index": 8980,
"step-1": "<mask token>\n\n\nclass IndexSetter(BaseEstimator, TransformerMixin):\n \"\"\" Set index \"\"\"\n\n def __init__(self, index_cols, drop_existing):\n self.index_cols = index_cols\n self.drop_existing = drop_existing\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n X = X.copy()\n X.reset_index(drop=self.drop_existing, inplace=True)\n X.set_index(self.index_cols, inplace=True)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass SubTimeSeriesSampler(BaseEstimator, TransformerMixin):\n \"\"\" Samples a sub-series of length t <= the original series of length T. Assumes series is in columns \n Original time-series time labels (column headers) are replaced with t_0, t_1, ... t_<series_len>.\n \"\"\"\n\n def __init__(self, series_len, num_reps):\n self.series_len = series_len\n self.num_reps = num_reps\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n curr_len = X.shape[1]\n if curr_len < self.series_len:\n raise Exception(\n f'Error sampling series. Target length {self.series_len} exceeds current length {curr_len}'\n )\n sampled_data = []\n data_arr = X.values\n for _ in range(self.num_reps):\n for i in range(data_arr.shape[0]):\n rand_idx = np.random.randint(0, curr_len - self.series_len)\n sampled_data.append(data_arr[i, rand_idx:rand_idx + self.\n series_len])\n idx = list(X.index) * self.num_reps\n col_names = [f't_{i}' for i in range(self.series_len)]\n sampled_data = pd.DataFrame(sampled_data, columns=col_names, index=idx)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(sampled_data.head())\n print(sampled_data.shape)\n return sampled_data\n\n\nclass AddLeftRightFlipper(BaseEstimator, TransformerMixin):\n \"\"\"\n Adds left right flipped version of tensor\n \"\"\"\n\n def __init__(self):\n pass\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n X_flipped = pd.DataFrame(np.fliplr(X), columns=X.columns, index=X.index\n )\n X = pd.concat([X, X_flipped], axis=0, ignore_index=True)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass SeriesLengthTrimmer(BaseEstimator, TransformerMixin):\n \"\"\"\n Trims the length of a series to use latest data points \n \"\"\"\n\n def __init__(self, series_len):\n self.series_len = series_len\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n curr_len = X.shape[1]\n if curr_len < self.series_len:\n raise Exception(\n f'Error trimming series. Target length {self.series_len} exceeds current length {curr_len}'\n )\n X_vals = X.values[:, -self.series_len:]\n col_names = [f't_{i}' for i in range(self.series_len)]\n X_vals = pd.DataFrame(X_vals, columns=col_names, index=X.index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X_vals.head())\n print(X_vals.shape)\n return X_vals\n\n\nclass DFShuffler(BaseEstimator, TransformerMixin):\n\n def __init__(self, shuffle=True):\n self.shuffle = shuffle\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n if self.shuffle == False:\n return X\n X = X.sample(frac=1)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass TSMinMaxScaler2(BaseEstimator, TransformerMixin):\n \"\"\"Scales history and forecast parts of time-series based on history data\"\"\"\n\n def __init__(self, scaling_len, upper_bound=5.0):\n if scaling_len < 2:\n raise Exception('Min Max scaling length must be >= 2')\n self.scaling_len = scaling_len\n self.max_scaler = MinMaxScaler()\n self.row_sums = None\n self.upper_bound = upper_bound\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n curr_len = X.shape[1]\n if curr_len < self.scaling_len:\n msg = f\"\"\" Error scaling series. \n Sum of scaling_len {self.scaling_len} should not exceed series length {curr_len}. \"\"\"\n raise Exception(msg)\n df = X if curr_len == self.scaling_len else X[X.columns[:self.\n scaling_len]]\n self.row_sums = df.sum(axis=1)\n df = df[self.row_sums != 0]\n self.max_scaler.fit(df.T)\n X_filtered = X[self.row_sums != 0].copy()\n vals = self.max_scaler.transform(X_filtered.T).T\n vals = np.where(vals > self.upper_bound, self.upper_bound, vals)\n X = pd.DataFrame(vals, columns=X_filtered.columns, index=X_filtered\n .index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n def inverse_transform(self, X):\n return self.max_scaler.inverse_transform(X.T).T\n\n\nclass TSMinMaxScaler(BaseEstimator, TransformerMixin):\n \"\"\"Scales history and forecast parts of time-series based on history data\"\"\"\n\n def __init__(self, scaling_len, upper_bound=5.0):\n if scaling_len < 2:\n raise Exception('Min Max scaling length must be >= 2')\n self.scaling_len = scaling_len\n self.min_vals = None\n self.max_vals = None\n self.ranges = None\n self.upper_bound = upper_bound\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n if self.scaling_len < 1:\n msg = f\"\"\" Error scaling series. \n scaling_len needs to be at least 2. Given length is {self.scaling_len}. \"\"\"\n raise Exception(msg)\n X_vals = X.values\n self.min_vals = np.expand_dims(X_vals[:, :self.scaling_len].min(\n axis=1), axis=1)\n self.max_vals = np.expand_dims(X_vals[:, :self.scaling_len].max(\n axis=1), axis=1)\n self.ranges = self.max_vals - self.min_vals\n self.ranges = np.where(self.ranges == 0, 1e-05, self.ranges)\n X_vals = X_vals - self.min_vals\n X_vals = np.divide(X_vals, self.ranges)\n X_vals = np.where(X_vals < self.upper_bound, X_vals, self.upper_bound)\n X = pd.DataFrame(X_vals, columns=X.columns, index=X.index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n def inverse_transform(self, X):\n X = X * self.ranges\n X = X + self.min_vals\n return X\n\n\nclass TimeSeriesXYSplitter(BaseEstimator, TransformerMixin):\n \"\"\"Splits the time series into X (history) and Y (forecast) series\"\"\"\n\n def __init__(self, X_len, Y_len):\n self.X_len = X_len\n self.Y_len = Y_len\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n curr_len = X.shape[1]\n encode_len = self.X_len\n decode_len = 0 if self.Y_len == 'auto' else self.Y_len\n if curr_len < encode_len + decode_len:\n msg = f\"\"\" Error splitting series. \n Sum of X_len {self.X_len} and Y_len {self.Y_len} should not exceed series length {curr_len}. \"\"\"\n raise Exception(msg)\n cols = X.columns\n if self.Y_len == 'auto':\n return {'X': X[cols[-self.X_len:]], 'Y': X[cols[-self.X_len:]]}\n if self.Y_len == 0:\n return {'X': X[cols[-self.X_len:]], 'Y': pd.DataFrame()}\n return {'X': X[cols[-(self.X_len + self.Y_len):-self.Y_len]], 'Y':\n X[cols[-self.Y_len:]]}\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DataPivoter(BaseEstimator, TransformerMixin):\n <mask token>\n\n def __init__(self, non_pivoted_columns, pivoting_column,\n pivoted_columns, fill_na_val):\n super().__init__()\n self.non_pivoted_columns = [non_pivoted_columns] if not isinstance(\n non_pivoted_columns, list) else non_pivoted_columns\n self.pivoted_columns = [pivoted_columns] if not isinstance(\n pivoted_columns, list) else pivoted_columns\n self.pivoting_column = pivoting_column\n self.fill_na_val = fill_na_val\n\n def fit(self, X, y=None):\n return self\n <mask token>\n <mask token>\n\n\nclass IndexSetter(BaseEstimator, TransformerMixin):\n \"\"\" Set index \"\"\"\n\n def __init__(self, index_cols, drop_existing):\n self.index_cols = index_cols\n self.drop_existing = drop_existing\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n X = X.copy()\n X.reset_index(drop=self.drop_existing, inplace=True)\n X.set_index(self.index_cols, inplace=True)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass SubTimeSeriesSampler(BaseEstimator, TransformerMixin):\n \"\"\" Samples a sub-series of length t <= the original series of length T. Assumes series is in columns \n Original time-series time labels (column headers) are replaced with t_0, t_1, ... t_<series_len>.\n \"\"\"\n\n def __init__(self, series_len, num_reps):\n self.series_len = series_len\n self.num_reps = num_reps\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n curr_len = X.shape[1]\n if curr_len < self.series_len:\n raise Exception(\n f'Error sampling series. Target length {self.series_len} exceeds current length {curr_len}'\n )\n sampled_data = []\n data_arr = X.values\n for _ in range(self.num_reps):\n for i in range(data_arr.shape[0]):\n rand_idx = np.random.randint(0, curr_len - self.series_len)\n sampled_data.append(data_arr[i, rand_idx:rand_idx + self.\n series_len])\n idx = list(X.index) * self.num_reps\n col_names = [f't_{i}' for i in range(self.series_len)]\n sampled_data = pd.DataFrame(sampled_data, columns=col_names, index=idx)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(sampled_data.head())\n print(sampled_data.shape)\n return sampled_data\n\n\nclass AddLeftRightFlipper(BaseEstimator, TransformerMixin):\n \"\"\"\n Adds left right flipped version of tensor\n \"\"\"\n\n def __init__(self):\n pass\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n X_flipped = pd.DataFrame(np.fliplr(X), columns=X.columns, index=X.index\n )\n X = pd.concat([X, X_flipped], axis=0, ignore_index=True)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass SeriesLengthTrimmer(BaseEstimator, TransformerMixin):\n \"\"\"\n Trims the length of a series to use latest data points \n \"\"\"\n\n def __init__(self, series_len):\n self.series_len = series_len\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n curr_len = X.shape[1]\n if curr_len < self.series_len:\n raise Exception(\n f'Error trimming series. Target length {self.series_len} exceeds current length {curr_len}'\n )\n X_vals = X.values[:, -self.series_len:]\n col_names = [f't_{i}' for i in range(self.series_len)]\n X_vals = pd.DataFrame(X_vals, columns=col_names, index=X.index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X_vals.head())\n print(X_vals.shape)\n return X_vals\n\n\nclass DFShuffler(BaseEstimator, TransformerMixin):\n\n def __init__(self, shuffle=True):\n self.shuffle = shuffle\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n if self.shuffle == False:\n return X\n X = X.sample(frac=1)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass TSMinMaxScaler2(BaseEstimator, TransformerMixin):\n \"\"\"Scales history and forecast parts of time-series based on history data\"\"\"\n\n def __init__(self, scaling_len, upper_bound=5.0):\n if scaling_len < 2:\n raise Exception('Min Max scaling length must be >= 2')\n self.scaling_len = scaling_len\n self.max_scaler = MinMaxScaler()\n self.row_sums = None\n self.upper_bound = upper_bound\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n curr_len = X.shape[1]\n if curr_len < self.scaling_len:\n msg = f\"\"\" Error scaling series. \n Sum of scaling_len {self.scaling_len} should not exceed series length {curr_len}. \"\"\"\n raise Exception(msg)\n df = X if curr_len == self.scaling_len else X[X.columns[:self.\n scaling_len]]\n self.row_sums = df.sum(axis=1)\n df = df[self.row_sums != 0]\n self.max_scaler.fit(df.T)\n X_filtered = X[self.row_sums != 0].copy()\n vals = self.max_scaler.transform(X_filtered.T).T\n vals = np.where(vals > self.upper_bound, self.upper_bound, vals)\n X = pd.DataFrame(vals, columns=X_filtered.columns, index=X_filtered\n .index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n def inverse_transform(self, X):\n return self.max_scaler.inverse_transform(X.T).T\n\n\nclass TSMinMaxScaler(BaseEstimator, TransformerMixin):\n \"\"\"Scales history and forecast parts of time-series based on history data\"\"\"\n\n def __init__(self, scaling_len, upper_bound=5.0):\n if scaling_len < 2:\n raise Exception('Min Max scaling length must be >= 2')\n self.scaling_len = scaling_len\n self.min_vals = None\n self.max_vals = None\n self.ranges = None\n self.upper_bound = upper_bound\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n if self.scaling_len < 1:\n msg = f\"\"\" Error scaling series. \n scaling_len needs to be at least 2. Given length is {self.scaling_len}. \"\"\"\n raise Exception(msg)\n X_vals = X.values\n self.min_vals = np.expand_dims(X_vals[:, :self.scaling_len].min(\n axis=1), axis=1)\n self.max_vals = np.expand_dims(X_vals[:, :self.scaling_len].max(\n axis=1), axis=1)\n self.ranges = self.max_vals - self.min_vals\n self.ranges = np.where(self.ranges == 0, 1e-05, self.ranges)\n X_vals = X_vals - self.min_vals\n X_vals = np.divide(X_vals, self.ranges)\n X_vals = np.where(X_vals < self.upper_bound, X_vals, self.upper_bound)\n X = pd.DataFrame(X_vals, columns=X.columns, index=X.index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n def inverse_transform(self, X):\n X = X * self.ranges\n X = X + self.min_vals\n return X\n\n\nclass TimeSeriesXYSplitter(BaseEstimator, TransformerMixin):\n \"\"\"Splits the time series into X (history) and Y (forecast) series\"\"\"\n\n def __init__(self, X_len, Y_len):\n self.X_len = X_len\n self.Y_len = Y_len\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n curr_len = X.shape[1]\n encode_len = self.X_len\n decode_len = 0 if self.Y_len == 'auto' else self.Y_len\n if curr_len < encode_len + decode_len:\n msg = f\"\"\" Error splitting series. \n Sum of X_len {self.X_len} and Y_len {self.Y_len} should not exceed series length {curr_len}. \"\"\"\n raise Exception(msg)\n cols = X.columns\n if self.Y_len == 'auto':\n return {'X': X[cols[-self.X_len:]], 'Y': X[cols[-self.X_len:]]}\n if self.Y_len == 0:\n return {'X': X[cols[-self.X_len:]], 'Y': pd.DataFrame()}\n return {'X': X[cols[-(self.X_len + self.Y_len):-self.Y_len]], 'Y':\n X[cols[-self.Y_len:]]}\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MissingTimeIntervalFiller(BaseEstimator, TransformerMixin):\n <mask token>\n DAYS = 'days'\n MINUTES = 'minutes'\n HOURS = 'hours'\n\n def __init__(self, id_columns, time_column, value_columns, time_unit,\n step_size):\n super().__init__()\n if not isinstance(id_columns, list):\n self.id_columns = [id_columns]\n else:\n self.id_columns = id_columns\n self.time_column = time_column\n if not isinstance(value_columns, list):\n self.value_columns = [value_columns]\n else:\n self.value_columns = value_columns\n self.time_unit = time_unit\n self.step_size = int(step_size)\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n min_time = X[self.time_column].min()\n max_time = X[self.time_column].max()\n if self.time_unit == MissingTimeIntervalFiller.DAYS:\n num_steps = (max_time - min_time).days // self.step_size + 1\n all_time_ints = [(min_time + timedelta(days=x * self.step_size)\n ) for x in range(num_steps)]\n elif self.time_unit == MissingTimeIntervalFiller.HOURS:\n time_diff_sec = (max_time - min_time).total_seconds()\n num_steps = int(time_diff_sec // (3600 * self.step_size)) + 1\n num_steps = (max_time - min_time).days + 1\n all_time_ints = [(min_time + timedelta(hours=x * self.step_size\n )) for x in range(num_steps)]\n elif self.time_unit == MissingTimeIntervalFiller.MINUTES:\n time_diff_sec = (max_time - min_time).total_seconds()\n num_steps = int(time_diff_sec // (60 * self.step_size)) + 1\n all_time_ints = [(min_time + timedelta(minutes=x * self.\n step_size)) for x in range(num_steps)]\n else:\n raise Exception(\n f\"Unrecognized time unit: {self.time_unit}. Must be one of ['days', 'hours', 'minutes'].\"\n )\n full_intervals_df = pd.DataFrame(data=all_time_ints, columns=[self.\n time_column])\n id_cols_df = X[self.id_columns].drop_duplicates()\n full_df = id_cols_df.assign(foo=1).merge(full_intervals_df.assign(\n foo=1)).drop('foo', 1)\n full_df = full_df.merge(X[self.id_columns + [self.time_column] +\n self.value_columns], on=self.id_columns + [self.time_column],\n how='left')\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(full_df.head())\n print(full_df.shape)\n return full_df\n\n\nclass DataPivoter(BaseEstimator, TransformerMixin):\n \"\"\" Pivots a dataframe with a given column \"\"\"\n\n def __init__(self, non_pivoted_columns, pivoting_column,\n pivoted_columns, fill_na_val):\n super().__init__()\n self.non_pivoted_columns = [non_pivoted_columns] if not isinstance(\n non_pivoted_columns, list) else non_pivoted_columns\n self.pivoted_columns = [pivoted_columns] if not isinstance(\n pivoted_columns, list) else pivoted_columns\n self.pivoting_column = pivoting_column\n self.fill_na_val = fill_na_val\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n processed_X = X.pivot_table(index=self.non_pivoted_columns, aggfunc\n =sum, columns=self.pivoting_column, values=self.pivoted_columns,\n fill_value=self.fill_na_val).reset_index()\n processed_X.columns = [(col[0] if col[1] == '' else col[1]) for col in\n processed_X.columns]\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(processed_X.head())\n print(processed_X.shape)\n return processed_X\n\n def inverse_transform(self, preds_df):\n preds_df2 = pd.melt(preds_df.reset_index(), id_vars=self.\n non_pivoted_columns, value_vars=preds_df.columns, var_name=self\n .pivoting_column, value_name=self.pivoted_columns[0])\n return preds_df2\n\n\nclass IndexSetter(BaseEstimator, TransformerMixin):\n \"\"\" Set index \"\"\"\n\n def __init__(self, index_cols, drop_existing):\n self.index_cols = index_cols\n self.drop_existing = drop_existing\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n X = X.copy()\n X.reset_index(drop=self.drop_existing, inplace=True)\n X.set_index(self.index_cols, inplace=True)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass SubTimeSeriesSampler(BaseEstimator, TransformerMixin):\n \"\"\" Samples a sub-series of length t <= the original series of length T. Assumes series is in columns \n Original time-series time labels (column headers) are replaced with t_0, t_1, ... t_<series_len>.\n \"\"\"\n\n def __init__(self, series_len, num_reps):\n self.series_len = series_len\n self.num_reps = num_reps\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n curr_len = X.shape[1]\n if curr_len < self.series_len:\n raise Exception(\n f'Error sampling series. Target length {self.series_len} exceeds current length {curr_len}'\n )\n sampled_data = []\n data_arr = X.values\n for _ in range(self.num_reps):\n for i in range(data_arr.shape[0]):\n rand_idx = np.random.randint(0, curr_len - self.series_len)\n sampled_data.append(data_arr[i, rand_idx:rand_idx + self.\n series_len])\n idx = list(X.index) * self.num_reps\n col_names = [f't_{i}' for i in range(self.series_len)]\n sampled_data = pd.DataFrame(sampled_data, columns=col_names, index=idx)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(sampled_data.head())\n print(sampled_data.shape)\n return sampled_data\n\n\nclass AddLeftRightFlipper(BaseEstimator, TransformerMixin):\n \"\"\"\n Adds left right flipped version of tensor\n \"\"\"\n\n def __init__(self):\n pass\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n X_flipped = pd.DataFrame(np.fliplr(X), columns=X.columns, index=X.index\n )\n X = pd.concat([X, X_flipped], axis=0, ignore_index=True)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass SeriesLengthTrimmer(BaseEstimator, TransformerMixin):\n \"\"\"\n Trims the length of a series to use latest data points \n \"\"\"\n\n def __init__(self, series_len):\n self.series_len = series_len\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n curr_len = X.shape[1]\n if curr_len < self.series_len:\n raise Exception(\n f'Error trimming series. Target length {self.series_len} exceeds current length {curr_len}'\n )\n X_vals = X.values[:, -self.series_len:]\n col_names = [f't_{i}' for i in range(self.series_len)]\n X_vals = pd.DataFrame(X_vals, columns=col_names, index=X.index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X_vals.head())\n print(X_vals.shape)\n return X_vals\n\n\nclass DFShuffler(BaseEstimator, TransformerMixin):\n\n def __init__(self, shuffle=True):\n self.shuffle = shuffle\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n if self.shuffle == False:\n return X\n X = X.sample(frac=1)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass TSMinMaxScaler2(BaseEstimator, TransformerMixin):\n \"\"\"Scales history and forecast parts of time-series based on history data\"\"\"\n\n def __init__(self, scaling_len, upper_bound=5.0):\n if scaling_len < 2:\n raise Exception('Min Max scaling length must be >= 2')\n self.scaling_len = scaling_len\n self.max_scaler = MinMaxScaler()\n self.row_sums = None\n self.upper_bound = upper_bound\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n curr_len = X.shape[1]\n if curr_len < self.scaling_len:\n msg = f\"\"\" Error scaling series. \n Sum of scaling_len {self.scaling_len} should not exceed series length {curr_len}. \"\"\"\n raise Exception(msg)\n df = X if curr_len == self.scaling_len else X[X.columns[:self.\n scaling_len]]\n self.row_sums = df.sum(axis=1)\n df = df[self.row_sums != 0]\n self.max_scaler.fit(df.T)\n X_filtered = X[self.row_sums != 0].copy()\n vals = self.max_scaler.transform(X_filtered.T).T\n vals = np.where(vals > self.upper_bound, self.upper_bound, vals)\n X = pd.DataFrame(vals, columns=X_filtered.columns, index=X_filtered\n .index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n def inverse_transform(self, X):\n return self.max_scaler.inverse_transform(X.T).T\n\n\nclass TSMinMaxScaler(BaseEstimator, TransformerMixin):\n \"\"\"Scales history and forecast parts of time-series based on history data\"\"\"\n\n def __init__(self, scaling_len, upper_bound=5.0):\n if scaling_len < 2:\n raise Exception('Min Max scaling length must be >= 2')\n self.scaling_len = scaling_len\n self.min_vals = None\n self.max_vals = None\n self.ranges = None\n self.upper_bound = upper_bound\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n if self.scaling_len < 1:\n msg = f\"\"\" Error scaling series. \n scaling_len needs to be at least 2. Given length is {self.scaling_len}. \"\"\"\n raise Exception(msg)\n X_vals = X.values\n self.min_vals = np.expand_dims(X_vals[:, :self.scaling_len].min(\n axis=1), axis=1)\n self.max_vals = np.expand_dims(X_vals[:, :self.scaling_len].max(\n axis=1), axis=1)\n self.ranges = self.max_vals - self.min_vals\n self.ranges = np.where(self.ranges == 0, 1e-05, self.ranges)\n X_vals = X_vals - self.min_vals\n X_vals = np.divide(X_vals, self.ranges)\n X_vals = np.where(X_vals < self.upper_bound, X_vals, self.upper_bound)\n X = pd.DataFrame(X_vals, columns=X.columns, index=X.index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n def inverse_transform(self, X):\n X = X * self.ranges\n X = X + self.min_vals\n return X\n\n\nclass TimeSeriesXYSplitter(BaseEstimator, TransformerMixin):\n \"\"\"Splits the time series into X (history) and Y (forecast) series\"\"\"\n\n def __init__(self, X_len, Y_len):\n self.X_len = X_len\n self.Y_len = Y_len\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n curr_len = X.shape[1]\n encode_len = self.X_len\n decode_len = 0 if self.Y_len == 'auto' else self.Y_len\n if curr_len < encode_len + decode_len:\n msg = f\"\"\" Error splitting series. \n Sum of X_len {self.X_len} and Y_len {self.Y_len} should not exceed series length {curr_len}. \"\"\"\n raise Exception(msg)\n cols = X.columns\n if self.Y_len == 'auto':\n return {'X': X[cols[-self.X_len:]], 'Y': X[cols[-self.X_len:]]}\n if self.Y_len == 0:\n return {'X': X[cols[-self.X_len:]], 'Y': pd.DataFrame()}\n return {'X': X[cols[-(self.X_len + self.Y_len):-self.Y_len]], 'Y':\n X[cols[-self.Y_len:]]}\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass DailyAggregator(BaseEstimator, TransformerMixin):\n <mask token>\n\n def __init__(self, id_columns, time_column, value_columns):\n super().__init__()\n if not isinstance(id_columns, list):\n self.id_columns = [id_columns]\n else:\n self.id_columns = id_columns\n self.time_column = time_column\n if not isinstance(value_columns, list):\n self.value_columns = [value_columns]\n else:\n self.value_columns = value_columns\n <mask token>\n\n def transform(self, X):\n X = X.copy()\n X[self.time_column] = X[self.time_column].dt.normalize()\n X = X.groupby(by=self.id_columns + [self.time_column], as_index=False)[\n self.value_columns].sum()\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass MissingTimeIntervalFiller(BaseEstimator, TransformerMixin):\n \"\"\" Adds missing time intervals in a time-series dataframe. \"\"\"\n DAYS = 'days'\n MINUTES = 'minutes'\n HOURS = 'hours'\n\n def __init__(self, id_columns, time_column, value_columns, time_unit,\n step_size):\n super().__init__()\n if not isinstance(id_columns, list):\n self.id_columns = [id_columns]\n else:\n self.id_columns = id_columns\n self.time_column = time_column\n if not isinstance(value_columns, list):\n self.value_columns = [value_columns]\n else:\n self.value_columns = value_columns\n self.time_unit = time_unit\n self.step_size = int(step_size)\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n min_time = X[self.time_column].min()\n max_time = X[self.time_column].max()\n if self.time_unit == MissingTimeIntervalFiller.DAYS:\n num_steps = (max_time - min_time).days // self.step_size + 1\n all_time_ints = [(min_time + timedelta(days=x * self.step_size)\n ) for x in range(num_steps)]\n elif self.time_unit == MissingTimeIntervalFiller.HOURS:\n time_diff_sec = (max_time - min_time).total_seconds()\n num_steps = int(time_diff_sec // (3600 * self.step_size)) + 1\n num_steps = (max_time - min_time).days + 1\n all_time_ints = [(min_time + timedelta(hours=x * self.step_size\n )) for x in range(num_steps)]\n elif self.time_unit == MissingTimeIntervalFiller.MINUTES:\n time_diff_sec = (max_time - min_time).total_seconds()\n num_steps = int(time_diff_sec // (60 * self.step_size)) + 1\n all_time_ints = [(min_time + timedelta(minutes=x * self.\n step_size)) for x in range(num_steps)]\n else:\n raise Exception(\n f\"Unrecognized time unit: {self.time_unit}. Must be one of ['days', 'hours', 'minutes'].\"\n )\n full_intervals_df = pd.DataFrame(data=all_time_ints, columns=[self.\n time_column])\n id_cols_df = X[self.id_columns].drop_duplicates()\n full_df = id_cols_df.assign(foo=1).merge(full_intervals_df.assign(\n foo=1)).drop('foo', 1)\n full_df = full_df.merge(X[self.id_columns + [self.time_column] +\n self.value_columns], on=self.id_columns + [self.time_column],\n how='left')\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(full_df.head())\n print(full_df.shape)\n return full_df\n\n\nclass DataPivoter(BaseEstimator, TransformerMixin):\n \"\"\" Pivots a dataframe with a given column \"\"\"\n\n def __init__(self, non_pivoted_columns, pivoting_column,\n pivoted_columns, fill_na_val):\n super().__init__()\n self.non_pivoted_columns = [non_pivoted_columns] if not isinstance(\n non_pivoted_columns, list) else non_pivoted_columns\n self.pivoted_columns = [pivoted_columns] if not isinstance(\n pivoted_columns, list) else pivoted_columns\n self.pivoting_column = pivoting_column\n self.fill_na_val = fill_na_val\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n processed_X = X.pivot_table(index=self.non_pivoted_columns, aggfunc\n =sum, columns=self.pivoting_column, values=self.pivoted_columns,\n fill_value=self.fill_na_val).reset_index()\n processed_X.columns = [(col[0] if col[1] == '' else col[1]) for col in\n processed_X.columns]\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(processed_X.head())\n print(processed_X.shape)\n return processed_X\n\n def inverse_transform(self, preds_df):\n preds_df2 = pd.melt(preds_df.reset_index(), id_vars=self.\n non_pivoted_columns, value_vars=preds_df.columns, var_name=self\n .pivoting_column, value_name=self.pivoted_columns[0])\n return preds_df2\n\n\nclass IndexSetter(BaseEstimator, TransformerMixin):\n \"\"\" Set index \"\"\"\n\n def __init__(self, index_cols, drop_existing):\n self.index_cols = index_cols\n self.drop_existing = drop_existing\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n X = X.copy()\n X.reset_index(drop=self.drop_existing, inplace=True)\n X.set_index(self.index_cols, inplace=True)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass SubTimeSeriesSampler(BaseEstimator, TransformerMixin):\n \"\"\" Samples a sub-series of length t <= the original series of length T. Assumes series is in columns \n Original time-series time labels (column headers) are replaced with t_0, t_1, ... t_<series_len>.\n \"\"\"\n\n def __init__(self, series_len, num_reps):\n self.series_len = series_len\n self.num_reps = num_reps\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n curr_len = X.shape[1]\n if curr_len < self.series_len:\n raise Exception(\n f'Error sampling series. Target length {self.series_len} exceeds current length {curr_len}'\n )\n sampled_data = []\n data_arr = X.values\n for _ in range(self.num_reps):\n for i in range(data_arr.shape[0]):\n rand_idx = np.random.randint(0, curr_len - self.series_len)\n sampled_data.append(data_arr[i, rand_idx:rand_idx + self.\n series_len])\n idx = list(X.index) * self.num_reps\n col_names = [f't_{i}' for i in range(self.series_len)]\n sampled_data = pd.DataFrame(sampled_data, columns=col_names, index=idx)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(sampled_data.head())\n print(sampled_data.shape)\n return sampled_data\n\n\nclass AddLeftRightFlipper(BaseEstimator, TransformerMixin):\n \"\"\"\n Adds left right flipped version of tensor\n \"\"\"\n\n def __init__(self):\n pass\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n X_flipped = pd.DataFrame(np.fliplr(X), columns=X.columns, index=X.index\n )\n X = pd.concat([X, X_flipped], axis=0, ignore_index=True)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass SeriesLengthTrimmer(BaseEstimator, TransformerMixin):\n \"\"\"\n Trims the length of a series to use latest data points \n \"\"\"\n\n def __init__(self, series_len):\n self.series_len = series_len\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n curr_len = X.shape[1]\n if curr_len < self.series_len:\n raise Exception(\n f'Error trimming series. Target length {self.series_len} exceeds current length {curr_len}'\n )\n X_vals = X.values[:, -self.series_len:]\n col_names = [f't_{i}' for i in range(self.series_len)]\n X_vals = pd.DataFrame(X_vals, columns=col_names, index=X.index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X_vals.head())\n print(X_vals.shape)\n return X_vals\n\n\nclass DFShuffler(BaseEstimator, TransformerMixin):\n\n def __init__(self, shuffle=True):\n self.shuffle = shuffle\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n if self.shuffle == False:\n return X\n X = X.sample(frac=1)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass TSMinMaxScaler2(BaseEstimator, TransformerMixin):\n \"\"\"Scales history and forecast parts of time-series based on history data\"\"\"\n\n def __init__(self, scaling_len, upper_bound=5.0):\n if scaling_len < 2:\n raise Exception('Min Max scaling length must be >= 2')\n self.scaling_len = scaling_len\n self.max_scaler = MinMaxScaler()\n self.row_sums = None\n self.upper_bound = upper_bound\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n curr_len = X.shape[1]\n if curr_len < self.scaling_len:\n msg = f\"\"\" Error scaling series. \n Sum of scaling_len {self.scaling_len} should not exceed series length {curr_len}. \"\"\"\n raise Exception(msg)\n df = X if curr_len == self.scaling_len else X[X.columns[:self.\n scaling_len]]\n self.row_sums = df.sum(axis=1)\n df = df[self.row_sums != 0]\n self.max_scaler.fit(df.T)\n X_filtered = X[self.row_sums != 0].copy()\n vals = self.max_scaler.transform(X_filtered.T).T\n vals = np.where(vals > self.upper_bound, self.upper_bound, vals)\n X = pd.DataFrame(vals, columns=X_filtered.columns, index=X_filtered\n .index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n def inverse_transform(self, X):\n return self.max_scaler.inverse_transform(X.T).T\n\n\nclass TSMinMaxScaler(BaseEstimator, TransformerMixin):\n \"\"\"Scales history and forecast parts of time-series based on history data\"\"\"\n\n def __init__(self, scaling_len, upper_bound=5.0):\n if scaling_len < 2:\n raise Exception('Min Max scaling length must be >= 2')\n self.scaling_len = scaling_len\n self.min_vals = None\n self.max_vals = None\n self.ranges = None\n self.upper_bound = upper_bound\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n if self.scaling_len < 1:\n msg = f\"\"\" Error scaling series. \n scaling_len needs to be at least 2. Given length is {self.scaling_len}. \"\"\"\n raise Exception(msg)\n X_vals = X.values\n self.min_vals = np.expand_dims(X_vals[:, :self.scaling_len].min(\n axis=1), axis=1)\n self.max_vals = np.expand_dims(X_vals[:, :self.scaling_len].max(\n axis=1), axis=1)\n self.ranges = self.max_vals - self.min_vals\n self.ranges = np.where(self.ranges == 0, 1e-05, self.ranges)\n X_vals = X_vals - self.min_vals\n X_vals = np.divide(X_vals, self.ranges)\n X_vals = np.where(X_vals < self.upper_bound, X_vals, self.upper_bound)\n X = pd.DataFrame(X_vals, columns=X.columns, index=X.index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n def inverse_transform(self, X):\n X = X * self.ranges\n X = X + self.min_vals\n return X\n\n\nclass TimeSeriesXYSplitter(BaseEstimator, TransformerMixin):\n \"\"\"Splits the time series into X (history) and Y (forecast) series\"\"\"\n\n def __init__(self, X_len, Y_len):\n self.X_len = X_len\n self.Y_len = Y_len\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n curr_len = X.shape[1]\n encode_len = self.X_len\n decode_len = 0 if self.Y_len == 'auto' else self.Y_len\n if curr_len < encode_len + decode_len:\n msg = f\"\"\" Error splitting series. \n Sum of X_len {self.X_len} and Y_len {self.Y_len} should not exceed series length {curr_len}. \"\"\"\n raise Exception(msg)\n cols = X.columns\n if self.Y_len == 'auto':\n return {'X': X[cols[-self.X_len:]], 'Y': X[cols[-self.X_len:]]}\n if self.Y_len == 0:\n return {'X': X[cols[-self.X_len:]], 'Y': pd.DataFrame()}\n return {'X': X[cols[-(self.X_len + self.Y_len):-self.Y_len]], 'Y':\n X[cols[-self.Y_len:]]}\n\n\n<mask token>\n",
"step-5": "import numpy as np, pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom datetime import timedelta\nimport sys\n\nDEBUG = False\n\nclass DailyAggregator(BaseEstimator, TransformerMixin):\n ''' Aggregates time-series values to daily level. '''\n def __init__(self, id_columns, time_column, value_columns ):\n super().__init__()\n if not isinstance(id_columns, list):\n self.id_columns = [id_columns]\n else:\n self.id_columns = id_columns\n\n self.time_column = time_column\n\n if not isinstance(value_columns, list):\n self.value_columns = [value_columns]\n else:\n self.value_columns = value_columns\n\n\n def fit(self, X, y=None): return self\n\n\n def transform(self, X):\n X = X.copy()\n X[self.time_column] = X[self.time_column].dt.normalize()\n X = X.groupby(by=self.id_columns + [self.time_column], as_index=False)[self.value_columns].sum()\n if DEBUG:\n print(f'-------after {__class__.__name__ }------------')\n print(X.head())\n print(X.shape)\n return X\n\n\n\nclass MissingTimeIntervalFiller(BaseEstimator, TransformerMixin):\n ''' Adds missing time intervals in a time-series dataframe. '''\n DAYS = 'days'\n MINUTES = 'minutes'\n HOURS = 'hours'\n\n def __init__(self, id_columns, time_column, value_columns, time_unit, step_size ):\n super().__init__()\n if not isinstance(id_columns, list):\n self.id_columns = [id_columns]\n else:\n self.id_columns = id_columns\n\n self.time_column = time_column\n\n if not isinstance(value_columns, list):\n self.value_columns = [value_columns]\n else:\n self.value_columns = value_columns\n\n self.time_unit = time_unit\n self.step_size = int(step_size)\n\n \n def fit(self, X, y=None): return self # do nothing in fit\n \n\n def transform(self, X):\n min_time = X[self.time_column].min()\n max_time = X[self.time_column].max() \n # print(min_time, max_time) \n\n if self.time_unit == MissingTimeIntervalFiller.DAYS:\n num_steps = ( (max_time - min_time).days // self.step_size ) + 1\n all_time_ints = [min_time + timedelta(days=x*self.step_size) for x in range(num_steps)]\n\n elif self.time_unit == MissingTimeIntervalFiller.HOURS:\n time_diff_sec = (max_time - min_time).total_seconds()\n num_steps = int(time_diff_sec // (3600 * self.step_size)) + 1\n num_steps = (max_time - min_time).days + 1\n all_time_ints = [min_time + timedelta(hours=x*self.step_size) for x in range(num_steps)]\n\n elif self.time_unit == MissingTimeIntervalFiller.MINUTES:\n time_diff_sec = (max_time - min_time).total_seconds()\n num_steps = int(time_diff_sec // (60 * self.step_size)) + 1\n # print('num_steps', num_steps)\n all_time_ints = [min_time + timedelta(minutes=x*self.step_size) for x in range(num_steps)]\n else: \n raise Exception(f\"Unrecognized time unit: {self.time_unit}. Must be one of ['days', 'hours', 'minutes'].\")\n\n # create df of all time intervals\n full_intervals_df = pd.DataFrame(data = all_time_ints, columns = [self.time_column]) \n\n # get unique id-var values from original input data\n id_cols_df = X[self.id_columns].drop_duplicates()\n \n # get cross join of all time intervals and ids columns\n full_df = id_cols_df.assign(foo=1).merge(full_intervals_df.assign(foo=1)).drop('foo', 1)\n\n # merge original data on to this full table\n full_df = full_df.merge(X[self.id_columns + [self.time_column] + self.value_columns], \n on=self.id_columns + [self.time_column], how='left')\n if DEBUG:\n print(f'-------after {__class__.__name__ }------------')\n print(full_df.head())\n print(full_df.shape)\n return full_df\n\n\n\nclass DataPivoter(BaseEstimator, TransformerMixin):\n ''' Pivots a dataframe with a given column '''\n\n def __init__(self, non_pivoted_columns, pivoting_column, pivoted_columns, fill_na_val):\n super().__init__() \n self.non_pivoted_columns = \\\n [non_pivoted_columns] if not isinstance(non_pivoted_columns, list) else non_pivoted_columns\n self.pivoted_columns = [pivoted_columns] if not isinstance(pivoted_columns, list) else pivoted_columns\n self.pivoting_column = pivoting_column\n self.fill_na_val = fill_na_val\n\n\n def fit(self, X, y=None): return self # do nothing in fit\n\n\n def transform(self, X):\n processed_X = X.pivot_table(index = self.non_pivoted_columns, \n aggfunc=sum,\n columns=self.pivoting_column, \n values=self.pivoted_columns, \n fill_value = self.fill_na_val\n ).reset_index()\n\n \n # pivot table will result in multi column index. To get a regular column names\n processed_X.columns = [ col[0] if col[1] == '' else col[1] for col in processed_X.columns ] \n if DEBUG:\n print(f'-------after {__class__.__name__ }------------')\n print(processed_X.head())\n print(processed_X.shape) \n return processed_X\n\n \n def inverse_transform(self, preds_df):\n # unpivot given dataframe\n preds_df2 = pd.melt(preds_df.reset_index(), \n id_vars=self.non_pivoted_columns,\n value_vars=preds_df.columns,\n var_name = self.pivoting_column,\n value_name = self.pivoted_columns[0]\n )\n return preds_df2\n\n\n\nclass IndexSetter(BaseEstimator, TransformerMixin):\n ''' Set index '''\n def __init__(self, index_cols, drop_existing):\n self.index_cols = index_cols\n self.drop_existing = drop_existing\n \n def fit(self, X, y=None): return self # do nothing in fit\n\n\n def transform(self, X):\n X = X.copy()\n X.reset_index(drop=self.drop_existing, inplace=True)\n X.set_index(self.index_cols, inplace=True)\n if DEBUG:\n print(f'-------after {__class__.__name__ }------------')\n print(X.head())\n print(X.shape) \n return X\n\n\n\nclass SubTimeSeriesSampler(BaseEstimator, TransformerMixin):\n ''' Samples a sub-series of length t <= the original series of length T. Assumes series is in columns \n Original time-series time labels (column headers) are replaced with t_0, t_1, ... t_<series_len>.\n '''\n def __init__(self, series_len, num_reps): \n self.series_len = series_len\n self.num_reps = num_reps\n\n\n def fit(self, X, y=None): return self\n\n\n def transform(self, X):\n curr_len = X.shape[1]\n\n if curr_len < self.series_len: \n raise Exception(f\"Error sampling series. Target length {self.series_len} exceeds current length {curr_len}\")\n\n sampled_data = []\n data_arr = X.values\n for _ in range(self.num_reps):\n for i in range(data_arr.shape[0]):\n rand_idx = np.random.randint(0, curr_len - self.series_len)\n sampled_data.append( data_arr[i, rand_idx: rand_idx + self.series_len] )\n \n idx = list(X.index) * self.num_reps\n col_names = [ f't_{i}' for i in range(self.series_len)]\n sampled_data = pd.DataFrame(sampled_data, columns=col_names, index= idx)\n if DEBUG:\n print(f'-------after {__class__.__name__ }------------')\n print(sampled_data.head())\n print(sampled_data.shape) \n return sampled_data\n\n\n\nclass AddLeftRightFlipper(BaseEstimator, TransformerMixin):\n '''\n Adds left right flipped version of tensor\n '''\n def __init__(self): pass\n def fit(self, X, y=None): return self\n\n def transform(self, X):\n X_flipped = pd.DataFrame( np.fliplr(X), columns=X.columns, index=X.index )\n X = pd.concat([X, X_flipped], axis=0, ignore_index=True)\n if DEBUG:\n print(f'-------after {__class__.__name__ }------------')\n print(X.head())\n print(X.shape) \n return X\n\n\n\nclass SeriesLengthTrimmer(BaseEstimator, TransformerMixin):\n '''\n Trims the length of a series to use latest data points \n '''\n def __init__(self, series_len): \n self.series_len = series_len\n\n def fit(self, X, y=None): return self\n\n def transform(self, X):\n curr_len = X.shape[1]\n\n if curr_len < self.series_len: \n raise Exception(f\"Error trimming series. Target length {self.series_len} exceeds current length {curr_len}\")\n \n X_vals = X.values[:, -self.series_len:]\n col_names = [ f't_{i}' for i in range(self.series_len)]\n X_vals = pd.DataFrame(X_vals, columns=col_names, index=X.index) \n if DEBUG:\n print(f'-------after {__class__.__name__ }------------')\n print(X_vals.head())\n print(X_vals.shape) \n return X_vals\n\n\n\nclass DFShuffler(BaseEstimator, TransformerMixin):\n def __init__(self, shuffle = True): \n self.shuffle = shuffle\n\n def fit(self, X, y=None): return self\n\n def transform(self, X, y=None): \n if self.shuffle == False: return X \n X = X.sample(frac=1) \n if DEBUG:\n print(f'-------after {__class__.__name__ }------------')\n print(X.head())\n print(X.shape) \n return X\n\n\n\nclass TSMinMaxScaler2(BaseEstimator, TransformerMixin):\n '''Scales history and forecast parts of time-series based on history data'''\n def __init__(self, scaling_len, upper_bound = 5.): \n if scaling_len < 2: raise Exception(\"Min Max scaling length must be >= 2\")\n self.scaling_len = scaling_len\n self.max_scaler = MinMaxScaler()\n self.row_sums = None\n self.upper_bound = upper_bound\n \n\n def fit(self, X, y=None): \n return self\n \n def transform(self, X, y=None): \n curr_len = X.shape[1]\n if curr_len < self.scaling_len: \n msg = f''' Error scaling series. \n Sum of scaling_len {self.scaling_len} should not exceed series length {curr_len}. '''\n raise Exception(msg)\n \n df = X if curr_len == self.scaling_len else X[ X.columns[ : self.scaling_len ] ] \n self.row_sums = df.sum(axis=1)\n df = df[self.row_sums != 0]\n self.max_scaler.fit(df.T)\n \n # print(X.shape, self.row_sums.shape)\n # sys.exit()\n X_filtered = X[self.row_sums != 0].copy()\n vals = self.max_scaler.transform(X_filtered.T).T\n vals = np.where(vals > self.upper_bound, self.upper_bound, vals)\n\n X = pd.DataFrame(vals, columns=X_filtered.columns, index=X_filtered.index)\n if DEBUG:\n print(f'-------after {__class__.__name__ }------------')\n print(X.head())\n print(X.shape) \n return X\n\n def inverse_transform(self, X):\n return self.max_scaler.inverse_transform(X.T).T\n\n\n\nclass TSMinMaxScaler(BaseEstimator, TransformerMixin):\n '''Scales history and forecast parts of time-series based on history data'''\n def __init__(self, scaling_len, upper_bound = 5.): \n if scaling_len < 2: raise Exception(\"Min Max scaling length must be >= 2\")\n self.scaling_len = scaling_len\n self.min_vals = None \n self.max_vals = None \n self.ranges = None \n self.upper_bound = upper_bound\n \n\n def fit(self, X, y=None): return self\n\n \n def transform(self, X, y=None): \n\n if self.scaling_len < 1: \n msg = f''' Error scaling series. \n scaling_len needs to be at least 2. Given length is {self.scaling_len}. '''\n raise Exception(msg)\n \n\n X_vals = X.values\n self.min_vals = np.expand_dims( X_vals[ :, : self.scaling_len ].min(axis=1), axis = 1)\n self.max_vals = np.expand_dims( X_vals[ :, : self.scaling_len ].max(axis=1), axis = 1)\n\n self.ranges = self.max_vals - self.min_vals\n self.ranges = np.where(self.ranges == 0, 1e-5, self.ranges)\n # print(self.min_vals.shape, self.ranges.shape)\n\n # sys.exit()\n X_vals = X_vals - self.min_vals\n X_vals = np.divide(X_vals, self.ranges) \n X_vals = np.where( X_vals < self.upper_bound, X_vals, self.upper_bound)\n\n X = pd.DataFrame(X_vals, columns=X.columns, index=X.index)\n if DEBUG:\n print(f'-------after {__class__.__name__ }------------')\n print(X.head())\n print(X.shape) \n return X\n \n\n def inverse_transform(self, X):\n X = X * self.ranges\n X = X + self.min_vals\n return X\n\n\n\nclass TimeSeriesXYSplitter(BaseEstimator, TransformerMixin):\n '''Splits the time series into X (history) and Y (forecast) series'''\n def __init__(self, X_len, Y_len): \n self.X_len = X_len\n self.Y_len = Y_len\n \n\n def fit(self, X, y=None): return self\n\n def transform(self, X, y=None): \n curr_len = X.shape[1]\n encode_len = self.X_len\n decode_len = (0 if self.Y_len == 'auto' else self.Y_len)\n\n if curr_len < encode_len + decode_len: \n msg = f''' Error splitting series. \n Sum of X_len {self.X_len} and Y_len {self.Y_len} should not exceed series length {curr_len}. '''\n raise Exception(msg)\n\n # bit of a hack but sklearn pipeline only allows one thing to be returned in transform()\n cols = X.columns \n if self.Y_len == 'auto': return { 'X': X[cols[-self.X_len :]], 'Y': X[cols[-self.X_len :]] }\n if self.Y_len == 0: return { 'X': X[cols[-self.X_len :]], 'Y': pd.DataFrame() }\n return {\n 'X': X[cols[-( self.X_len + self.Y_len) : -self.Y_len] ], \n 'Y':X[cols[ -self.Y_len : ] ] \n }\n\n\n\nif __name__ == \"__main__\": \n\n # data = pd.read_parquet(\"wfm_single_q_Internal_daily_history.parquet\")\n # data = pd.read_parquet(\"WFM_200q_Internal_daily_history.parquet\")\n # data.rename(columns={ 'queueid': 'seriesid', 'date': 'ts', 'callvolume': 'v',}, inplace=True)\n \n data = pd.read_parquet(\"History_series_0028C91B.002795_filled.parquet\")\n data.rename(columns={ 'queueid': 'seriesid', 'time': 'ts', 'callvolume': 'v',}, inplace=True)\n\n \n data['ts'] = pd.to_datetime(data['ts'])\n data = data[['seriesid', 'ts', 'v']]\n\n hist_len = 365\n fcst_len = 90\n\n print(\"-----------orig data -------------------\")\n # print(data.head()); print(data.shape) \n \n print(\"-----------after daily agg -------------------\")\n agg = DailyAggregator('seriesid', 'ts', 'v')\n data = agg.fit_transform(data)\n # print(data.head()); print(data.shape) \n\n print(\"-----------after adding missing intervals -------------------\")\n filler = MissingTimeIntervalFiller('seriesid', 'ts', 'v', 'days', 1)\n data = filler.fit_transform(data)\n # print(data.head()); print(data.shape) \n\n print(\"-----------after pivoting -------------------\")\n pivoter = DataPivoter('seriesid', 'v', 'ts', 0)\n data = pivoter.fit_transform(data)\n # print(data.head()); print(data.shape) \n\n print(\"-----------after indexing -------------------\")\n indexer = IndexSetter('seriesid', drop_existing=True)\n data = indexer.fit_transform(data)\n # print(data.head()); print(data.shape) \n\n print(\"-----------after sampling -------------------\")\n sampler = SubTimeSeriesSampler(series_len=hist_len+fcst_len, num_reps=5)\n data = sampler.fit_transform(data)\n # print(data.head()); print(data.shape) \n\n print(\"-----------after shuffling -------------------\")\n shuffler = DFShuffler()\n data = shuffler.fit_transform(data)\n print(data.head()); print(data.shape) \n\n print(\"-----------after max scaling -------------------\")\n scaler = TSMinMaxScaler(scaling_len=hist_len)\n data = scaler.fit_transform(data)\n print(data.head()); print(data.shape) \n\n print(\"-----------after X Y split -------------------\")\n splitter = TimeSeriesXYSplitter(hist_len, fcst_len)\n data = splitter.fit_transform(data)\n print(data.keys())\n print(data['X'])\n print(data['Y'])\n\n\n\n",
"step-ids": [
41,
44,
52,
56,
62
]
}
|
[
41,
44,
52,
56,
62
] |
from django.apps import AppConfig
class AttendaceConfig(AppConfig):
name = 'attendace'
|
normal
|
{
"blob_id": "d5d61b23dc14ffdfe7fe6f983164916863928eaf",
"index": 3685,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass AttendaceConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass AttendaceConfig(AppConfig):\n name = 'attendace'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass AttendaceConfig(AppConfig):\n name = 'attendace'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from celery_app import celery_app
@celery_app.task
def demo_celery_run():
return 'result is ok'
|
normal
|
{
"blob_id": "4bb973b598a9c35394a0cd78ed9ba807f3a595d7",
"index": 2323,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@celery_app.task\ndef demo_celery_run():\n return 'result is ok'\n",
"step-3": "from celery_app import celery_app\n\n\n@celery_app.task\ndef demo_celery_run():\n return 'result is ok'\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import glob
import xarray as xr
from model_diagnostics import *
data_root = '../data/synthetic/standard/'
var_list = ['hs', 'dp', 'spr', 'fp', 'dir', 't0m1']
eke = 0.01
##########################
output = []
diagnostic_functions = [basic_stats]
for var in var_list:
grid_files = glob.glob(data_root+'gridded/*%s*%s.nc' %(eke,var))
for f in grid_files:
output.append(analize_member(f, var,diagnostic_functions))
print("processing %s" %os.path.basename(f))
var = 'hs'
diagnostic_functions = [hs_spectral_slope]
grid_files = glob.glob(data_root+'gridded/*%s*%s.nc' %(eke,var))
for f in grid_files:
output.append(analize_member(f, var,diagnostic_functions))
print("processing %s" %os.path.basename(f))
var = 'cur'
diagnostic_functions = [flow_stats]
grid_files = glob.glob(data_root+'gridded/*%s*%s.nc' %(eke,var))
for f in grid_files:
output.append(analize_member(f, var, diagnostic_functions))
print("processing %s" %os.path.basename(f))
ds = xr.merge(output)
df = ds.to_dataframe()
df = df.reset_index()
data = df.to_xarray()
data.to_netcdf(path='../data/model_stats/S%s_gridded_stats.nc'%eke, mode='w')
|
normal
|
{
"blob_id": "6b727cdfc684db4ba919cd5390fe45de43a806fe",
"index": 309,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor var in var_list:\n grid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))\n for f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\n<mask token>\nfor f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\n<mask token>\nfor f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\n<mask token>\ndata.to_netcdf(path='../data/model_stats/S%s_gridded_stats.nc' % eke, mode='w')\n",
"step-3": "<mask token>\ndata_root = '../data/synthetic/standard/'\nvar_list = ['hs', 'dp', 'spr', 'fp', 'dir', 't0m1']\neke = 0.01\noutput = []\ndiagnostic_functions = [basic_stats]\nfor var in var_list:\n grid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))\n for f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\nvar = 'hs'\ndiagnostic_functions = [hs_spectral_slope]\ngrid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))\nfor f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\nvar = 'cur'\ndiagnostic_functions = [flow_stats]\ngrid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))\nfor f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\nds = xr.merge(output)\ndf = ds.to_dataframe()\ndf = df.reset_index()\ndata = df.to_xarray()\ndata.to_netcdf(path='../data/model_stats/S%s_gridded_stats.nc' % eke, mode='w')\n",
"step-4": "import glob\nimport xarray as xr\nfrom model_diagnostics import *\ndata_root = '../data/synthetic/standard/'\nvar_list = ['hs', 'dp', 'spr', 'fp', 'dir', 't0m1']\neke = 0.01\noutput = []\ndiagnostic_functions = [basic_stats]\nfor var in var_list:\n grid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))\n for f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\nvar = 'hs'\ndiagnostic_functions = [hs_spectral_slope]\ngrid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))\nfor f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\nvar = 'cur'\ndiagnostic_functions = [flow_stats]\ngrid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))\nfor f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\nds = xr.merge(output)\ndf = ds.to_dataframe()\ndf = df.reset_index()\ndata = df.to_xarray()\ndata.to_netcdf(path='../data/model_stats/S%s_gridded_stats.nc' % eke, mode='w')\n",
"step-5": "import glob\nimport xarray as xr\n\nfrom model_diagnostics import *\n\ndata_root = '../data/synthetic/standard/'\nvar_list = ['hs', 'dp', 'spr', 'fp', 'dir', 't0m1']\neke = 0.01\n##########################\noutput = []\ndiagnostic_functions = [basic_stats]\nfor var in var_list:\n grid_files = glob.glob(data_root+'gridded/*%s*%s.nc' %(eke,var))\n for f in grid_files:\n output.append(analize_member(f, var,diagnostic_functions))\n print(\"processing %s\" %os.path.basename(f))\nvar = 'hs'\ndiagnostic_functions = [hs_spectral_slope]\ngrid_files = glob.glob(data_root+'gridded/*%s*%s.nc' %(eke,var))\nfor f in grid_files:\n output.append(analize_member(f, var,diagnostic_functions))\n print(\"processing %s\" %os.path.basename(f))\nvar = 'cur'\ndiagnostic_functions = [flow_stats]\ngrid_files = glob.glob(data_root+'gridded/*%s*%s.nc' %(eke,var))\nfor f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print(\"processing %s\" %os.path.basename(f))\n\nds = xr.merge(output)\ndf = ds.to_dataframe()\ndf = df.reset_index()\ndata = df.to_xarray()\ndata.to_netcdf(path='../data/model_stats/S%s_gridded_stats.nc'%eke, mode='w')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
""" Interfaces to Juju API ModelManager """
from conjureup import juju
@juju.requires_login
def list_models(user='user-admin'):
""" Lists Juju Models
Arguments:
user: Name of user to list models for.
Returns:
Dictionary of known Juju Models (default: user-admin)
"""
models = juju.CLIENT.ModelManager(request="ListModels",
params={'Tag': user})
return models['UserModels']
@juju.requires_login
def model_info(model):
""" Returns information on select model
Arguments:
model: name of model to inspect
Returns:
Dictionary of model attributes
"""
return juju.CLIENT.Client(request="ModelInfo",
params={"Name": model})
@juju.requires_login
def model_status():
""" Returns the FullStatus output of a model
Returns:
Dictionary of model status
"""
return juju.CLIENT.Client(request="FullStatus")
|
normal
|
{
"blob_id": "11045cffc6d47902be7236e1d684422317f2c5f9",
"index": 1444,
"step-1": "<mask token>\n\n\n@juju.requires_login\ndef list_models(user='user-admin'):\n \"\"\" Lists Juju Models\n\n Arguments:\n user: Name of user to list models for.\n\n Returns:\n Dictionary of known Juju Models (default: user-admin)\n \"\"\"\n models = juju.CLIENT.ModelManager(request='ListModels', params={'Tag':\n user})\n return models['UserModels']\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@juju.requires_login\ndef list_models(user='user-admin'):\n \"\"\" Lists Juju Models\n\n Arguments:\n user: Name of user to list models for.\n\n Returns:\n Dictionary of known Juju Models (default: user-admin)\n \"\"\"\n models = juju.CLIENT.ModelManager(request='ListModels', params={'Tag':\n user})\n return models['UserModels']\n\n\n@juju.requires_login\ndef model_info(model):\n \"\"\" Returns information on select model\n\n Arguments:\n model: name of model to inspect\n\n Returns:\n Dictionary of model attributes\n \"\"\"\n return juju.CLIENT.Client(request='ModelInfo', params={'Name': model})\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@juju.requires_login\ndef list_models(user='user-admin'):\n \"\"\" Lists Juju Models\n\n Arguments:\n user: Name of user to list models for.\n\n Returns:\n Dictionary of known Juju Models (default: user-admin)\n \"\"\"\n models = juju.CLIENT.ModelManager(request='ListModels', params={'Tag':\n user})\n return models['UserModels']\n\n\n@juju.requires_login\ndef model_info(model):\n \"\"\" Returns information on select model\n\n Arguments:\n model: name of model to inspect\n\n Returns:\n Dictionary of model attributes\n \"\"\"\n return juju.CLIENT.Client(request='ModelInfo', params={'Name': model})\n\n\n@juju.requires_login\ndef model_status():\n \"\"\" Returns the FullStatus output of a model\n\n Returns:\n Dictionary of model status\n \"\"\"\n return juju.CLIENT.Client(request='FullStatus')\n",
"step-4": "<mask token>\nfrom conjureup import juju\n\n\n@juju.requires_login\ndef list_models(user='user-admin'):\n \"\"\" Lists Juju Models\n\n Arguments:\n user: Name of user to list models for.\n\n Returns:\n Dictionary of known Juju Models (default: user-admin)\n \"\"\"\n models = juju.CLIENT.ModelManager(request='ListModels', params={'Tag':\n user})\n return models['UserModels']\n\n\n@juju.requires_login\ndef model_info(model):\n \"\"\" Returns information on select model\n\n Arguments:\n model: name of model to inspect\n\n Returns:\n Dictionary of model attributes\n \"\"\"\n return juju.CLIENT.Client(request='ModelInfo', params={'Name': model})\n\n\n@juju.requires_login\ndef model_status():\n \"\"\" Returns the FullStatus output of a model\n\n Returns:\n Dictionary of model status\n \"\"\"\n return juju.CLIENT.Client(request='FullStatus')\n",
"step-5": "\"\"\" Interfaces to Juju API ModelManager \"\"\"\n\nfrom conjureup import juju\n\n\n@juju.requires_login\ndef list_models(user='user-admin'):\n \"\"\" Lists Juju Models\n\n Arguments:\n user: Name of user to list models for.\n\n Returns:\n Dictionary of known Juju Models (default: user-admin)\n \"\"\"\n models = juju.CLIENT.ModelManager(request=\"ListModels\",\n params={'Tag': user})\n return models['UserModels']\n\n\n@juju.requires_login\ndef model_info(model):\n \"\"\" Returns information on select model\n\n Arguments:\n model: name of model to inspect\n\n Returns:\n Dictionary of model attributes\n \"\"\"\n return juju.CLIENT.Client(request=\"ModelInfo\",\n params={\"Name\": model})\n\n\n@juju.requires_login\ndef model_status():\n \"\"\" Returns the FullStatus output of a model\n\n Returns:\n Dictionary of model status\n \"\"\"\n return juju.CLIENT.Client(request=\"FullStatus\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Imports
from __future__ import print_function
import numpy
from numpy.random import randint
from enum import Enum
__all__ = ["common", "plot"]
class result(Enum):
CRIT = 16
HIT = 8
EVADE = 4
FOCUS = 2
BLANK = 1
def result_str(res):
str = ""
if res & result.BLANK:
str += "BLANK"
if res & result.FOCUS:
if len(str):
str += "|"
str += "FOCUS"
if res & result.HIT:
if len(str):
str += "|"
str += "HIT"
if res & result.CRIT:
if len(str):
str += "|"
str += "CRIT"
if res & result.EVADE:
if len(str):
str += "|"
str += "EVADE"
return str
# DICE CLASSES DEFINITIONS
__attack_die_faces__ = [result.CRIT, result.HIT, result.HIT, result.HIT, result.FOCUS, result.FOCUS, result.BLANK, result.BLANK]
__evade_die_faces__ = [result.EVADE, result.EVADE, result.EVADE, result.FOCUS, result.FOCUS, result.BLANK, result.BLANK, result.BLANK]
class die:
def __init__ (self):
self.rerolled = False
def __str__(self):
return result_str(self.result)
@staticmethod
def __roll_die__(face_list):
return face_list[randint(0, 8)]
def equals(self, result):
return self.result & result
def change(self, to):
self.result = to
class attack_die(die):
def __init__(self):
die.__init__(self)
self.__roll__()
def __roll__(self):
self.result = self.__roll_die__(__attack_die_faces__)
def reroll(self):
if not self.rerolled:
self.__roll__()
self.rerolled = True
return True
return False
class evade_die(die):
def __init__(self):
die.__init__(self)
self.__roll__()
def __roll__(self):
self.result = die.__roll_die__(__evade_die_faces__)
def reroll(self):
if not self.rerolled:
self.__roll__()
self.rerolled = True
return True
return False
# DICE LIST METHOD DEFINITIONS
def count_relevant_results(dice_list, relevant_results):
count = 0
for i in range(len(dice_list)):
if dice_list[i].result & relevant_results:
count += 1
return count
def roll_attack_dice(number):
dice_results = []
for i in range(number):
dice_results.append(attack_die())
return dice_results
def roll_evade_dice(number):
dice_results = []
for i in range(number):
dice_results.append(evade_die())
return dice_results
# DICE LIST MODIFICATION DEFINITITONS
class perform(Enum):
FOR_ALL = 7
ONCE = 1
class change:
def __init__(self, rule, from_result, to_result):
self.rule = rule
self.from_result = from_result
self.to_result = to_result
def modify_dice_list(self, dice_list):
for i in range(len(dice_list)):
if dice_list[i].equals(self.from_result):
dice_list[i].change(self.to_result)
if self.rule == perform.ONCE:
return dice_list
return dice_list
class reroll:
def __init__(self, rule, from_result):
self.rule = rule
self.from_result = from_result
def modify_dice_list(self, dice_list):
for i in range(len(dice_list)):
if dice_list[i].equals(self.from_result):
if dice_list[i].reroll() and self.rule == perform.ONCE:
return dice_list
return dice_list
# Debug
def __print_dice_list(dice_list):
for i in range(len(dice_list)):
print(dice_list[i], end=" ")
print("")
def get_dice_chances(number_of_dice, dice_roll_function, relevant_results, enemy_modifications, friendly_modifications):
relevant_counts = numpy.zeros((8))
num_iterations = 200000
for i in range(num_iterations):
dice_list = dice_roll_function(number_of_dice)
# Perform modifications
for j in range(len(enemy_modifications)):
dice_list = enemy_modifications[j].modify_dice_list(dice_list)
for j in range(len(friendly_modifications)):
dice_list = friendly_modifications[j].modify_dice_list(dice_list)
relevant_count_for_this_roll = count_relevant_results(dice_list, relevant_results)
relevant_counts[relevant_count_for_this_roll] += 1
chances = numpy.zeros((8))
for i in range(len(chances)):
chances[i] = float(relevant_counts[i]) / float(num_iterations)
return chances
def get_hit_chances(number_of_dice, enemy_modifications=[], friendly_modifications=[]):
return get_dice_chances(number_of_dice, roll_attack_dice, result.HIT | result.CRIT, enemy_modifications, friendly_modifications)
def get_evade_chances(number_of_dice, enemy_modifications=[], friendly_modifications=[]):
return get_dice_chances(number_of_dice, roll_evade_dice, result.EVADE, enemy_modifications, friendly_modifications)
def get_crit_chances(number_of_dice, enemy_modifications=[], friendly_modifications=[]):
return get_dice_chances(number_of_dice, roll_attack_dice, result.CRIT, eenemy_modifications, friendly_modifications)
def hits_vs_evade(hit_chances, evade_chances):
chances = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
for i in range(1, len(hit_chances)):
for j in range(i):
chances[i - j] = chances[i - j] + (hit_chances[i] * evade_chances[j])
total = 0.0
for i in range(1, len(chances)):
total = total + chances[i]
chances[0] = 1.0 - total
return chances
def average_chance(chance_list):
avg = 0.0
for i in range(1, len(chance_list)):
avg = avg + (i * chance_list[i])
return avg
|
normal
|
{
"blob_id": "5261346f96e7520b6ef75a292b3d44a6f00d868c",
"index": 5566,
"step-1": "<mask token>\n\n\nclass result(Enum):\n CRIT = 16\n HIT = 8\n EVADE = 4\n FOCUS = 2\n BLANK = 1\n\n\n<mask token>\n\n\nclass die:\n\n def __init__(self):\n self.rerolled = False\n\n def __str__(self):\n return result_str(self.result)\n\n @staticmethod\n def __roll_die__(face_list):\n return face_list[randint(0, 8)]\n\n def equals(self, result):\n return self.result & result\n\n def change(self, to):\n self.result = to\n\n\nclass attack_die(die):\n\n def __init__(self):\n die.__init__(self)\n self.__roll__()\n\n def __roll__(self):\n self.result = self.__roll_die__(__attack_die_faces__)\n\n def reroll(self):\n if not self.rerolled:\n self.__roll__()\n self.rerolled = True\n return True\n return False\n\n\nclass evade_die(die):\n\n def __init__(self):\n die.__init__(self)\n self.__roll__()\n\n def __roll__(self):\n self.result = die.__roll_die__(__evade_die_faces__)\n\n def reroll(self):\n if not self.rerolled:\n self.__roll__()\n self.rerolled = True\n return True\n return False\n\n\n<mask token>\n\n\nclass perform(Enum):\n FOR_ALL = 7\n ONCE = 1\n\n\nclass change:\n\n def __init__(self, rule, from_result, to_result):\n self.rule = rule\n self.from_result = from_result\n self.to_result = to_result\n\n def modify_dice_list(self, dice_list):\n for i in range(len(dice_list)):\n if dice_list[i].equals(self.from_result):\n dice_list[i].change(self.to_result)\n if self.rule == perform.ONCE:\n return dice_list\n return dice_list\n\n\nclass reroll:\n\n def __init__(self, rule, from_result):\n self.rule = rule\n self.from_result = from_result\n\n def modify_dice_list(self, dice_list):\n for i in range(len(dice_list)):\n if dice_list[i].equals(self.from_result):\n if dice_list[i].reroll() and self.rule == perform.ONCE:\n return dice_list\n return dice_list\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass result(Enum):\n CRIT = 16\n HIT = 8\n EVADE = 4\n FOCUS = 2\n BLANK = 1\n\n\n<mask token>\n\n\nclass die:\n\n def __init__(self):\n self.rerolled = False\n\n def __str__(self):\n return result_str(self.result)\n\n @staticmethod\n def __roll_die__(face_list):\n return face_list[randint(0, 8)]\n\n def equals(self, result):\n return self.result & result\n\n def change(self, to):\n self.result = to\n\n\nclass attack_die(die):\n\n def __init__(self):\n die.__init__(self)\n self.__roll__()\n\n def __roll__(self):\n self.result = self.__roll_die__(__attack_die_faces__)\n\n def reroll(self):\n if not self.rerolled:\n self.__roll__()\n self.rerolled = True\n return True\n return False\n\n\nclass evade_die(die):\n\n def __init__(self):\n die.__init__(self)\n self.__roll__()\n\n def __roll__(self):\n self.result = die.__roll_die__(__evade_die_faces__)\n\n def reroll(self):\n if not self.rerolled:\n self.__roll__()\n self.rerolled = True\n return True\n return False\n\n\n<mask token>\n\n\ndef roll_evade_dice(number):\n dice_results = []\n for i in range(number):\n dice_results.append(evade_die())\n return dice_results\n\n\nclass perform(Enum):\n FOR_ALL = 7\n ONCE = 1\n\n\nclass change:\n\n def __init__(self, rule, from_result, to_result):\n self.rule = rule\n self.from_result = from_result\n self.to_result = to_result\n\n def modify_dice_list(self, dice_list):\n for i in range(len(dice_list)):\n if dice_list[i].equals(self.from_result):\n dice_list[i].change(self.to_result)\n if self.rule == perform.ONCE:\n return dice_list\n return dice_list\n\n\nclass reroll:\n\n def __init__(self, rule, from_result):\n self.rule = rule\n self.from_result = from_result\n\n def modify_dice_list(self, dice_list):\n for i in range(len(dice_list)):\n if dice_list[i].equals(self.from_result):\n if dice_list[i].reroll() and self.rule == perform.ONCE:\n return dice_list\n return dice_list\n\n\n<mask token>\n\n\ndef average_chance(chance_list):\n avg = 0.0\n for i in range(1, len(chance_list)):\n avg = avg + i * chance_list[i]\n return avg\n",
"step-3": "<mask token>\n\n\nclass result(Enum):\n CRIT = 16\n HIT = 8\n EVADE = 4\n FOCUS = 2\n BLANK = 1\n\n\ndef result_str(res):\n str = ''\n if res & result.BLANK:\n str += 'BLANK'\n if res & result.FOCUS:\n if len(str):\n str += '|'\n str += 'FOCUS'\n if res & result.HIT:\n if len(str):\n str += '|'\n str += 'HIT'\n if res & result.CRIT:\n if len(str):\n str += '|'\n str += 'CRIT'\n if res & result.EVADE:\n if len(str):\n str += '|'\n str += 'EVADE'\n return str\n\n\n<mask token>\n\n\nclass die:\n\n def __init__(self):\n self.rerolled = False\n\n def __str__(self):\n return result_str(self.result)\n\n @staticmethod\n def __roll_die__(face_list):\n return face_list[randint(0, 8)]\n\n def equals(self, result):\n return self.result & result\n\n def change(self, to):\n self.result = to\n\n\nclass attack_die(die):\n\n def __init__(self):\n die.__init__(self)\n self.__roll__()\n\n def __roll__(self):\n self.result = self.__roll_die__(__attack_die_faces__)\n\n def reroll(self):\n if not self.rerolled:\n self.__roll__()\n self.rerolled = True\n return True\n return False\n\n\nclass evade_die(die):\n\n def __init__(self):\n die.__init__(self)\n self.__roll__()\n\n def __roll__(self):\n self.result = die.__roll_die__(__evade_die_faces__)\n\n def reroll(self):\n if not self.rerolled:\n self.__roll__()\n self.rerolled = True\n return True\n return False\n\n\n<mask token>\n\n\ndef roll_evade_dice(number):\n dice_results = []\n for i in range(number):\n dice_results.append(evade_die())\n return dice_results\n\n\nclass perform(Enum):\n FOR_ALL = 7\n ONCE = 1\n\n\nclass change:\n\n def __init__(self, rule, from_result, to_result):\n self.rule = rule\n self.from_result = from_result\n self.to_result = to_result\n\n def modify_dice_list(self, dice_list):\n for i in range(len(dice_list)):\n if dice_list[i].equals(self.from_result):\n dice_list[i].change(self.to_result)\n if self.rule == perform.ONCE:\n return dice_list\n return dice_list\n\n\nclass reroll:\n\n def __init__(self, rule, from_result):\n self.rule = rule\n self.from_result = from_result\n\n def modify_dice_list(self, dice_list):\n for i in range(len(dice_list)):\n if dice_list[i].equals(self.from_result):\n if dice_list[i].reroll() and self.rule == perform.ONCE:\n return dice_list\n return dice_list\n\n\ndef __print_dice_list(dice_list):\n for i in range(len(dice_list)):\n print(dice_list[i], end=' ')\n print('')\n\n\n<mask token>\n\n\ndef hits_vs_evade(hit_chances, evade_chances):\n chances = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n for i in range(1, len(hit_chances)):\n for j in range(i):\n chances[i - j] = chances[i - j] + hit_chances[i] * evade_chances[j]\n total = 0.0\n for i in range(1, len(chances)):\n total = total + chances[i]\n chances[0] = 1.0 - total\n return chances\n\n\ndef average_chance(chance_list):\n avg = 0.0\n for i in range(1, len(chance_list)):\n avg = avg + i * chance_list[i]\n return avg\n",
"step-4": "<mask token>\n\n\nclass result(Enum):\n CRIT = 16\n HIT = 8\n EVADE = 4\n FOCUS = 2\n BLANK = 1\n\n\ndef result_str(res):\n str = ''\n if res & result.BLANK:\n str += 'BLANK'\n if res & result.FOCUS:\n if len(str):\n str += '|'\n str += 'FOCUS'\n if res & result.HIT:\n if len(str):\n str += '|'\n str += 'HIT'\n if res & result.CRIT:\n if len(str):\n str += '|'\n str += 'CRIT'\n if res & result.EVADE:\n if len(str):\n str += '|'\n str += 'EVADE'\n return str\n\n\n<mask token>\n\n\nclass die:\n\n def __init__(self):\n self.rerolled = False\n\n def __str__(self):\n return result_str(self.result)\n\n @staticmethod\n def __roll_die__(face_list):\n return face_list[randint(0, 8)]\n\n def equals(self, result):\n return self.result & result\n\n def change(self, to):\n self.result = to\n\n\nclass attack_die(die):\n\n def __init__(self):\n die.__init__(self)\n self.__roll__()\n\n def __roll__(self):\n self.result = self.__roll_die__(__attack_die_faces__)\n\n def reroll(self):\n if not self.rerolled:\n self.__roll__()\n self.rerolled = True\n return True\n return False\n\n\nclass evade_die(die):\n\n def __init__(self):\n die.__init__(self)\n self.__roll__()\n\n def __roll__(self):\n self.result = die.__roll_die__(__evade_die_faces__)\n\n def reroll(self):\n if not self.rerolled:\n self.__roll__()\n self.rerolled = True\n return True\n return False\n\n\n<mask token>\n\n\ndef roll_attack_dice(number):\n dice_results = []\n for i in range(number):\n dice_results.append(attack_die())\n return dice_results\n\n\ndef roll_evade_dice(number):\n dice_results = []\n for i in range(number):\n dice_results.append(evade_die())\n return dice_results\n\n\nclass perform(Enum):\n FOR_ALL = 7\n ONCE = 1\n\n\nclass change:\n\n def __init__(self, rule, from_result, to_result):\n self.rule = rule\n self.from_result = from_result\n self.to_result = to_result\n\n def modify_dice_list(self, dice_list):\n for i in range(len(dice_list)):\n if dice_list[i].equals(self.from_result):\n dice_list[i].change(self.to_result)\n if self.rule == perform.ONCE:\n return dice_list\n return dice_list\n\n\nclass reroll:\n\n def __init__(self, rule, from_result):\n self.rule = rule\n self.from_result = from_result\n\n def modify_dice_list(self, dice_list):\n for i in range(len(dice_list)):\n if dice_list[i].equals(self.from_result):\n if dice_list[i].reroll() and self.rule == perform.ONCE:\n return dice_list\n return dice_list\n\n\ndef __print_dice_list(dice_list):\n for i in range(len(dice_list)):\n print(dice_list[i], end=' ')\n print('')\n\n\n<mask token>\n\n\ndef get_hit_chances(number_of_dice, enemy_modifications=[],\n friendly_modifications=[]):\n return get_dice_chances(number_of_dice, roll_attack_dice, result.HIT |\n result.CRIT, enemy_modifications, friendly_modifications)\n\n\ndef get_evade_chances(number_of_dice, enemy_modifications=[],\n friendly_modifications=[]):\n return get_dice_chances(number_of_dice, roll_evade_dice, result.EVADE,\n enemy_modifications, friendly_modifications)\n\n\ndef get_crit_chances(number_of_dice, enemy_modifications=[],\n friendly_modifications=[]):\n return get_dice_chances(number_of_dice, roll_attack_dice, result.CRIT,\n eenemy_modifications, friendly_modifications)\n\n\ndef hits_vs_evade(hit_chances, evade_chances):\n chances = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n for i in range(1, len(hit_chances)):\n for j in range(i):\n chances[i - j] = chances[i - j] + hit_chances[i] * evade_chances[j]\n total = 0.0\n for i in range(1, len(chances)):\n total = total + chances[i]\n chances[0] = 1.0 - total\n return chances\n\n\ndef average_chance(chance_list):\n avg = 0.0\n for i in range(1, len(chance_list)):\n avg = avg + i * chance_list[i]\n return avg\n",
"step-5": "# Imports\r\nfrom __future__ import print_function\r\n\r\nimport numpy\r\nfrom numpy.random import randint\r\nfrom enum import Enum\r\n\r\n__all__ = [\"common\", \"plot\"]\r\n\r\nclass result(Enum):\r\n\tCRIT = 16\r\n\tHIT = 8\r\n\tEVADE = 4\r\n\tFOCUS = 2\r\n\tBLANK = 1\r\n\t\r\ndef result_str(res):\r\n\tstr = \"\"\r\n\tif res & result.BLANK:\r\n\t\tstr += \"BLANK\"\r\n\tif res & result.FOCUS:\r\n\t\tif len(str):\r\n\t\t\tstr += \"|\"\r\n\t\tstr += \"FOCUS\"\r\n\tif res & result.HIT:\r\n\t\tif len(str):\r\n\t\t\tstr += \"|\"\r\n\t\tstr += \"HIT\"\r\n\tif res & result.CRIT:\r\n\t\tif len(str):\r\n\t\t\tstr += \"|\"\r\n\t\tstr += \"CRIT\"\r\n\tif res & result.EVADE:\r\n\t\tif len(str):\r\n\t\t\tstr += \"|\"\r\n\t\tstr += \"EVADE\"\r\n\treturn str\r\n\r\n# DICE CLASSES DEFINITIONS\r\n\r\n__attack_die_faces__ = [result.CRIT, result.HIT, result.HIT, result.HIT, result.FOCUS, result.FOCUS, result.BLANK, result.BLANK]\r\n__evade_die_faces__ = [result.EVADE, result.EVADE, result.EVADE, result.FOCUS, result.FOCUS, result.BLANK, result.BLANK, result.BLANK]\r\n\t\r\n\r\nclass die:\r\n\tdef __init__ (self):\r\n\t\tself.rerolled = False\r\n\tdef __str__(self):\r\n\t\treturn result_str(self.result)\r\n\t@staticmethod\r\n\tdef __roll_die__(face_list):\r\n\t\treturn face_list[randint(0, 8)]\r\n\tdef equals(self, result):\r\n\t\treturn self.result & result\r\n\tdef change(self, to):\r\n\t\tself.result = to\r\n\r\nclass attack_die(die):\r\n\tdef __init__(self):\r\n\t\tdie.__init__(self)\r\n\t\tself.__roll__()\r\n\tdef __roll__(self):\r\n\t\tself.result = self.__roll_die__(__attack_die_faces__)\r\n\tdef reroll(self):\r\n\t\tif not self.rerolled:\r\n\t\t\tself.__roll__()\r\n\t\t\tself.rerolled = True\r\n\t\t\treturn True\r\n\t\treturn False\r\n\r\nclass evade_die(die):\r\n\tdef __init__(self):\r\n\t\tdie.__init__(self)\r\n\t\tself.__roll__()\r\n\tdef __roll__(self):\r\n\t\tself.result = die.__roll_die__(__evade_die_faces__)\r\n\tdef reroll(self):\r\n\t\tif not self.rerolled:\r\n\t\t\tself.__roll__()\r\n\t\t\tself.rerolled = True\r\n\t\t\treturn True\r\n\t\treturn False\r\n\r\n# DICE LIST METHOD DEFINITIONS\r\n\r\ndef count_relevant_results(dice_list, relevant_results):\r\n\tcount = 0\r\n\tfor i in range(len(dice_list)):\r\n\t\tif dice_list[i].result & relevant_results:\r\n\t\t\tcount += 1\r\n\treturn count\r\n\r\ndef roll_attack_dice(number):\r\n\tdice_results = []\r\n\tfor i in range(number):\r\n\t\tdice_results.append(attack_die())\r\n\treturn dice_results\r\n\r\ndef roll_evade_dice(number):\r\n\tdice_results = []\r\n\tfor i in range(number):\r\n\t\tdice_results.append(evade_die())\r\n\treturn dice_results\r\n\r\n# DICE LIST MODIFICATION DEFINITITONS\r\n\r\nclass perform(Enum):\r\n\tFOR_ALL = 7\r\n\tONCE = 1\r\n\r\nclass change:\r\n\tdef __init__(self, rule, from_result, to_result):\r\n\t\tself.rule = rule\r\n\t\tself.from_result = from_result\r\n\t\tself.to_result = to_result\r\n\tdef modify_dice_list(self, dice_list):\r\n\t\tfor i in range(len(dice_list)):\r\n\t\t\tif dice_list[i].equals(self.from_result):\r\n\t\t\t\tdice_list[i].change(self.to_result)\r\n\t\t\t\tif self.rule == perform.ONCE:\r\n\t\t\t\t\treturn dice_list\r\n\t\treturn dice_list\r\n\r\nclass reroll:\r\n\tdef __init__(self, rule, from_result):\r\n\t\tself.rule = rule\r\n\t\tself.from_result = from_result\r\n\tdef modify_dice_list(self, dice_list):\r\n\t\tfor i in range(len(dice_list)):\r\n\t\t\tif dice_list[i].equals(self.from_result):\r\n\t\t\t\tif dice_list[i].reroll() and self.rule == perform.ONCE:\r\n\t\t\t\t\treturn dice_list\r\n\t\treturn dice_list\r\n\t\r\n# Debug\r\ndef __print_dice_list(dice_list):\r\n\tfor i in range(len(dice_list)):\r\n\t\tprint(dice_list[i], end=\" \")\r\n\tprint(\"\")\r\n\t\r\ndef get_dice_chances(number_of_dice, dice_roll_function, relevant_results, enemy_modifications, friendly_modifications):\r\n\trelevant_counts = numpy.zeros((8)) \r\n\tnum_iterations = 200000\r\n\tfor i in range(num_iterations):\r\n\t\tdice_list = dice_roll_function(number_of_dice)\r\n\t\t# Perform modifications\r\n\t\tfor j in range(len(enemy_modifications)):\r\n\t\t\tdice_list = enemy_modifications[j].modify_dice_list(dice_list)\r\n\t\tfor j in range(len(friendly_modifications)):\r\n\t\t\tdice_list = friendly_modifications[j].modify_dice_list(dice_list)\r\n\t\trelevant_count_for_this_roll = count_relevant_results(dice_list, relevant_results)\r\n\t\trelevant_counts[relevant_count_for_this_roll] += 1\t\r\n\tchances = numpy.zeros((8))\r\n\tfor i in range(len(chances)):\r\n\t\tchances[i] = float(relevant_counts[i]) / float(num_iterations)\r\n\treturn chances\r\n\r\ndef get_hit_chances(number_of_dice, enemy_modifications=[], friendly_modifications=[]):\r\n\treturn get_dice_chances(number_of_dice, roll_attack_dice, result.HIT | result.CRIT, enemy_modifications, friendly_modifications)\r\n\r\ndef get_evade_chances(number_of_dice, enemy_modifications=[], friendly_modifications=[]):\r\n\treturn get_dice_chances(number_of_dice, roll_evade_dice, result.EVADE, enemy_modifications, friendly_modifications)\r\n\r\ndef get_crit_chances(number_of_dice, enemy_modifications=[], friendly_modifications=[]):\r\n\treturn get_dice_chances(number_of_dice, roll_attack_dice, result.CRIT, eenemy_modifications, friendly_modifications)\r\n\r\ndef hits_vs_evade(hit_chances, evade_chances):\r\n\tchances = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\r\n\tfor i in range(1, len(hit_chances)):\r\n\t\tfor j in range(i):\r\n\t\t\tchances[i - j] = chances[i - j] + (hit_chances[i] * evade_chances[j])\r\n\ttotal = 0.0\r\n\tfor i in range(1, len(chances)):\r\n\t\ttotal = total + chances[i]\r\n\tchances[0] = 1.0 - total\r\n\treturn chances\r\n\t\r\ndef average_chance(chance_list):\r\n\tavg = 0.0\r\n\tfor i in range(1, len(chance_list)):\r\n\t\tavg = avg + (i * chance_list[i])\r\n\treturn avg",
"step-ids": [
24,
26,
29,
33,
38
]
}
|
[
24,
26,
29,
33,
38
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('\n'.join(re.findall(
'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
, sys.stdin.read())))
<|reserved_special_token_1|>
import sys, re
print('\n'.join(re.findall(
'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
, sys.stdin.read())))
<|reserved_special_token_1|>
#!/usr/bin/env python
import sys,re
print('\n'.join(re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',sys.stdin.read())))
|
flexible
|
{
"blob_id": "4cefaa964251e77a05066af1f61f9fd2a4350d38",
"index": 7622,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('\\n'.join(re.findall(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n , sys.stdin.read())))\n",
"step-3": "import sys, re\nprint('\\n'.join(re.findall(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n , sys.stdin.read())))\n",
"step-4": "#!/usr/bin/env python\nimport sys,re\nprint('\\n'.join(re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',sys.stdin.read())))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ConcatZeroPadding(OptimizeRule):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ConcatZeroPadding(OptimizeRule):
def optimize(self, graph: Graph) ->Tuple[Graph, bool]:
"""
Merges padding of ZeroPadding2D and Convolution2D | MaxPooling2D | AveragePooling2D layer
Args:
graph:
Returns:
"""
flag_changed = False
for tail_layer in [Convolution2D, MaxPooling2D, AveragePooling2D]:
matches = search_sub_structure(graph, [ZeroPadding2D, Variable,
tail_layer])
while len(matches) > 0:
match = matches[0]
a1: ZeroPadding2D = match[0]
a2: Union[Convolution2D, MaxPooling2D, AveragePooling2D
] = match[2]
zero_pad = a1.parameters['padding']
conv_pad = a2.parameters['padding']
a2.parameters['padding'] = zero_pad[0] + conv_pad[0], zero_pad[
1] + conv_pad[1]
x1 = a1.inputs['x']
x2 = a2.inputs['x']
a1.remove_all()
a2.remove_input(x2)
a2.append_input('x', x1)
flag_changed = True
matches = search_sub_structure(graph, [ZeroPadding2D,
Variable, tail_layer])
return graph, flag_changed
<|reserved_special_token_1|>
from typing import Tuple, Union
from webdnn.graph.graph import Graph
from webdnn.graph.operators.zero_padding_2d import ZeroPadding2D
from webdnn.graph.operators.convolution2d import Convolution2D
from webdnn.graph.operators.max_pooling_2d import MaxPooling2D
from webdnn.graph.operators.average_pooling_2d import AveragePooling2D
from webdnn.graph.optimize_rule import OptimizeRule
from webdnn.graph.traverse import search_sub_structure
from webdnn.graph.variable import Variable
from webdnn.util import flags
class ConcatZeroPadding(OptimizeRule):
def optimize(self, graph: Graph) ->Tuple[Graph, bool]:
"""
Merges padding of ZeroPadding2D and Convolution2D | MaxPooling2D | AveragePooling2D layer
Args:
graph:
Returns:
"""
flag_changed = False
for tail_layer in [Convolution2D, MaxPooling2D, AveragePooling2D]:
matches = search_sub_structure(graph, [ZeroPadding2D, Variable,
tail_layer])
while len(matches) > 0:
match = matches[0]
a1: ZeroPadding2D = match[0]
a2: Union[Convolution2D, MaxPooling2D, AveragePooling2D
] = match[2]
zero_pad = a1.parameters['padding']
conv_pad = a2.parameters['padding']
a2.parameters['padding'] = zero_pad[0] + conv_pad[0], zero_pad[
1] + conv_pad[1]
x1 = a1.inputs['x']
x2 = a2.inputs['x']
a1.remove_all()
a2.remove_input(x2)
a2.append_input('x', x1)
flag_changed = True
matches = search_sub_structure(graph, [ZeroPadding2D,
Variable, tail_layer])
return graph, flag_changed
<|reserved_special_token_1|>
from typing import Tuple, Union
from webdnn.graph.graph import Graph
from webdnn.graph.operators.zero_padding_2d import ZeroPadding2D
from webdnn.graph.operators.convolution2d import Convolution2D
from webdnn.graph.operators.max_pooling_2d import MaxPooling2D
from webdnn.graph.operators.average_pooling_2d import AveragePooling2D
from webdnn.graph.optimize_rule import OptimizeRule
from webdnn.graph.traverse import search_sub_structure
from webdnn.graph.variable import Variable
from webdnn.util import flags
class ConcatZeroPadding(OptimizeRule):
def optimize(self, graph: Graph) -> Tuple[Graph, bool]:
"""
Merges padding of ZeroPadding2D and Convolution2D | MaxPooling2D | AveragePooling2D layer
Args:
graph:
Returns:
"""
# this optimization is always applied (since backends do not implement padding)
flag_changed = False
for tail_layer in [Convolution2D, MaxPooling2D, AveragePooling2D]:
matches = search_sub_structure(graph, [ZeroPadding2D, Variable, tail_layer])
while len(matches) > 0:
match = matches[0]
a1: ZeroPadding2D = match[0]
a2: Union[Convolution2D, MaxPooling2D, AveragePooling2D] = match[2]
zero_pad = a1.parameters["padding"]
conv_pad = a2.parameters["padding"]
a2.parameters["padding"] = (zero_pad[0] + conv_pad[0], zero_pad[1] + conv_pad[1])
x1 = a1.inputs["x"]
x2 = a2.inputs["x"]
a1.remove_all()
# replace_input checks if the shape of x1 and x2 are same, but this restriction does not hold.
a2.remove_input(x2)
a2.append_input("x", x1)
flag_changed = True
matches = search_sub_structure(graph, [ZeroPadding2D, Variable, tail_layer])
return graph, flag_changed
|
flexible
|
{
"blob_id": "687f7f4908e8a5448335f636edf74a627f03c306",
"index": 9110,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ConcatZeroPadding(OptimizeRule):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ConcatZeroPadding(OptimizeRule):\n\n def optimize(self, graph: Graph) ->Tuple[Graph, bool]:\n \"\"\"\n Merges padding of ZeroPadding2D and Convolution2D | MaxPooling2D | AveragePooling2D layer\n Args:\n graph:\n\n Returns:\n\n \"\"\"\n flag_changed = False\n for tail_layer in [Convolution2D, MaxPooling2D, AveragePooling2D]:\n matches = search_sub_structure(graph, [ZeroPadding2D, Variable,\n tail_layer])\n while len(matches) > 0:\n match = matches[0]\n a1: ZeroPadding2D = match[0]\n a2: Union[Convolution2D, MaxPooling2D, AveragePooling2D\n ] = match[2]\n zero_pad = a1.parameters['padding']\n conv_pad = a2.parameters['padding']\n a2.parameters['padding'] = zero_pad[0] + conv_pad[0], zero_pad[\n 1] + conv_pad[1]\n x1 = a1.inputs['x']\n x2 = a2.inputs['x']\n a1.remove_all()\n a2.remove_input(x2)\n a2.append_input('x', x1)\n flag_changed = True\n matches = search_sub_structure(graph, [ZeroPadding2D,\n Variable, tail_layer])\n return graph, flag_changed\n",
"step-4": "from typing import Tuple, Union\nfrom webdnn.graph.graph import Graph\nfrom webdnn.graph.operators.zero_padding_2d import ZeroPadding2D\nfrom webdnn.graph.operators.convolution2d import Convolution2D\nfrom webdnn.graph.operators.max_pooling_2d import MaxPooling2D\nfrom webdnn.graph.operators.average_pooling_2d import AveragePooling2D\nfrom webdnn.graph.optimize_rule import OptimizeRule\nfrom webdnn.graph.traverse import search_sub_structure\nfrom webdnn.graph.variable import Variable\nfrom webdnn.util import flags\n\n\nclass ConcatZeroPadding(OptimizeRule):\n\n def optimize(self, graph: Graph) ->Tuple[Graph, bool]:\n \"\"\"\n Merges padding of ZeroPadding2D and Convolution2D | MaxPooling2D | AveragePooling2D layer\n Args:\n graph:\n\n Returns:\n\n \"\"\"\n flag_changed = False\n for tail_layer in [Convolution2D, MaxPooling2D, AveragePooling2D]:\n matches = search_sub_structure(graph, [ZeroPadding2D, Variable,\n tail_layer])\n while len(matches) > 0:\n match = matches[0]\n a1: ZeroPadding2D = match[0]\n a2: Union[Convolution2D, MaxPooling2D, AveragePooling2D\n ] = match[2]\n zero_pad = a1.parameters['padding']\n conv_pad = a2.parameters['padding']\n a2.parameters['padding'] = zero_pad[0] + conv_pad[0], zero_pad[\n 1] + conv_pad[1]\n x1 = a1.inputs['x']\n x2 = a2.inputs['x']\n a1.remove_all()\n a2.remove_input(x2)\n a2.append_input('x', x1)\n flag_changed = True\n matches = search_sub_structure(graph, [ZeroPadding2D,\n Variable, tail_layer])\n return graph, flag_changed\n",
"step-5": "from typing import Tuple, Union\n\nfrom webdnn.graph.graph import Graph\nfrom webdnn.graph.operators.zero_padding_2d import ZeroPadding2D\nfrom webdnn.graph.operators.convolution2d import Convolution2D\nfrom webdnn.graph.operators.max_pooling_2d import MaxPooling2D\nfrom webdnn.graph.operators.average_pooling_2d import AveragePooling2D\nfrom webdnn.graph.optimize_rule import OptimizeRule\nfrom webdnn.graph.traverse import search_sub_structure\nfrom webdnn.graph.variable import Variable\nfrom webdnn.util import flags\n\n\nclass ConcatZeroPadding(OptimizeRule):\n def optimize(self, graph: Graph) -> Tuple[Graph, bool]:\n \"\"\"\n Merges padding of ZeroPadding2D and Convolution2D | MaxPooling2D | AveragePooling2D layer\n Args:\n graph:\n\n Returns:\n\n \"\"\"\n # this optimization is always applied (since backends do not implement padding)\n flag_changed = False\n\n for tail_layer in [Convolution2D, MaxPooling2D, AveragePooling2D]:\n matches = search_sub_structure(graph, [ZeroPadding2D, Variable, tail_layer])\n while len(matches) > 0:\n match = matches[0]\n a1: ZeroPadding2D = match[0]\n a2: Union[Convolution2D, MaxPooling2D, AveragePooling2D] = match[2]\n\n zero_pad = a1.parameters[\"padding\"]\n conv_pad = a2.parameters[\"padding\"]\n a2.parameters[\"padding\"] = (zero_pad[0] + conv_pad[0], zero_pad[1] + conv_pad[1])\n\n x1 = a1.inputs[\"x\"]\n x2 = a2.inputs[\"x\"]\n\n a1.remove_all()\n # replace_input checks if the shape of x1 and x2 are same, but this restriction does not hold.\n a2.remove_input(x2)\n a2.append_input(\"x\", x1)\n\n flag_changed = True\n matches = search_sub_structure(graph, [ZeroPadding2D, Variable, tail_layer])\n\n return graph, flag_changed\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from . import *
from module import *
from transfer import *
from dataset import *
|
normal
|
{
"blob_id": "94d992ef4b9015aa8f42071bb1409703d509c313",
"index": 9810,
"step-1": "<mask token>\n",
"step-2": "from . import *\nfrom module import *\nfrom transfer import *\nfrom dataset import *\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import pydgm
import numpy as np
import sys
class XS():
# Hold the cross section values with routines for outputting to txt file
def __init__(self, sig_t, sig_f, chi, sig_s, mu=None):
self.sig_t = sig_t
self.sig_f = sig_f
self.chi = chi
self.sig_s = sig_s
self.mu = mu if mu is None else np.ones(self.sig_t.shape)
def write_homogenized_XS(self, fname, mu=None):
if mu is not None:
assert mu.shape == self.sig_t.shape
self.mu = mu
G, npin = self.sig_t.shape
sig_t = self.sig_t * self.mu
vsig_f = self.sig_f * self.mu
sig_s = self.sig_s * self.mu
# Write the cross sections to file
s = '{} {} 0\n'.format(npin, G)
s += '{}\n'.format(' '.join([str(g) for g in range(G + 1)]))
s += '{}\n'.format(' '.join([str(g) for g in range(G)]))
for mat in range(npin):
s += 'pin {}\n'.format(mat + 1)
s += '1 1 1.0 0.0 0.602214179\n'
for g in range(G):
s += '{:<12.9f} {:<12.9f} {:<12.9f} {:<12.9f}\n'.format(sig_t[g, mat], vsig_f[g, mat], vsig_f[g, mat], self.chi[g, mat])
for g in range(G):
s += '{}\n'.format(' '.join(['{:<12.9f}'.format(s) for s in sig_s[:, g, mat]]))
with open(fname, 'w') as f:
f.write(s[:-1])
def __add__(self, newXS):
sig_t = np.concatenate([self.sig_t, newXS.sig_t], axis=-1)
sig_f = np.concatenate([self.sig_f, newXS.sig_f], axis=-1)
sig_s = np.concatenate([self.sig_s, newXS.sig_s], axis=-1)
chi = np.concatenate([self.chi, newXS.chi], axis=-1)
mu = np.concatenate([self.mu, newXS.mu], axis=-1)
return XS(sig_t, sig_f, chi, sig_s, mu)
class DGMSOLVER():
# Solve the problem using unotran
def __init__(self, G, fname, fm, cm, mm, nPin, norm=None, mapping=None, vacuum=False, k=None, phi=None, psi=None):
'''
Inputs:
G - Number of energy groups
fname - Name of the cross section file
fm - Fine mesh
cm - Coarse mesh
mm - Material map
nPin - Number of pincells
norm - norm of the flux to keep constant (match phi shape)
mapping - structure class that holds fine -> coarse mapping
'''
self.G = G
self.fname = fname
self.fm = fm
self.cm = cm
self.mm = mm
self.npin = nPin
self.norm = norm
self.computenorm = self.norm is None
self.vacuum = vacuum
self.mapping = mapping
# Pass on the options to unotran
self.setOptions()
# Solve using unotran
self.solve(k, phi, psi)
# Homogenize the cross sections over each spatial region
self.homogenize_space()
# Homogenize the cross sections over each energy range
if self.mapping is not None:
self.homogenize_energy()
def setOptions(self):
'''
Set the options for the Unotran solve
'''
pydgm.control.spatial_dimension = 1
pydgm.control.fine_mesh_x = self.fm
pydgm.control.coarse_mesh_x = self.cm
pydgm.control.material_map = self.mm
pydgm.control.xs_name = self.fname.ljust(256)
pydgm.control.angle_order = 8
pydgm.control.angle_option = pydgm.angle.gl
pydgm.control.boundary_west = 0.0 if self.vacuum else 1.0
pydgm.control.boundary_east = 0.0 if self.vacuum else 1.0
pydgm.control.allow_fission = True
pydgm.control.eigen_print = 0
pydgm.control.outer_print = 0
pydgm.control.eigen_tolerance = 1e-14
pydgm.control.outer_tolerance = 1e-12
pydgm.control.max_eigen_iters = 10000
pydgm.control.max_outer_iters = 1
pydgm.control.store_psi = True
pydgm.control.solver_type = 'eigen'.ljust(256)
pydgm.control.source_value = 0.0
pydgm.control.equation_type = 'DD'
pydgm.control.scatter_leg_order = 0
pydgm.control.ignore_warnings = True
def solve(self, k, phi, psi):
'''
Solve the problem using Unotran
'''
# Initialize the problem
pydgm.solver.initialize_solver()
if k is not None:
pydgm.state.keff = k
if phi is not None:
pydgm.state.phi = phi
if psi is not None:
pydgm.state.psi = psi
# Call the solver
pydgm.solver.solve()
# Copy any information from Unotran
self.extractInfo()
self.iter_k = np.copy(pydgm.state.keff)
self.iter_phi = np.copy(pydgm.state.phi)
self.iter_psi = np.copy(pydgm.state.psi)
# Clean up the solver
pydgm.solver.finalize_solver()
pydgm.control.finalize_control()
def extractInfo(self):
'''
Copy information from Unotran before the solver is deallocated
'''
self.phi = np.copy(pydgm.state.mg_phi[0])
self.dx = np.copy(pydgm.mesh.dx)
self.mat_map = np.copy(pydgm.state.mg_mmap)
self.sig_t = np.array([pydgm.state.mg_sig_t[:, self.mat_map[c] - 1] for c in range(len(self.mat_map))]).T
self.sig_s = np.array([pydgm.state.mg_sig_s[0, :, :, self.mat_map[c] - 1] for c in range(len(self.mat_map))]).T
self.vsig_f = np.array([pydgm.state.mg_nu_sig_f[:, self.mat_map[c] - 1] for c in range(len(self.mat_map))]).T
self.chi = np.array([pydgm.state.mg_chi[:, self.mat_map[c] - 1] for c in range(len(self.mat_map))]).T
def homogenize_space(self):
'''
Homogenize the cross sections over the spatial region
'''
def homo_space(array):
'''Convenience function to do the integration'''
# sum over region
return np.sum(array.reshape(-1, self.npin, nCellPerPin), axis=2) / V
# Check that everything is the right shape of arrays
shape = self.phi.shape
#assert shape[0] == self.G
#assert (shape[1] / self.npin) == (shape[1] // self.npin)
# Compute the number of pins
nCellPerPin = shape[1] // self.npin
# Compute the \sum_{g\in G} \sum_{c\in r} V_c dE_g
V = np.sum(self.dx.reshape(self.npin, -1), axis=1)
# \forall g\in G, \forall c\in r compute \phi_{g,c} V_c dE_g
# Homogenize the flux
phi_dx = self.phi[:, :] * self.dx[:]
self.phi_homo = homo_space(phi_dx)
# Either find the norm of the flux or normalize the flux to self.norm
if self.computenorm:
self.norm = np.sum(self.phi_homo, axis=-1)
else:
print('compute norm')
norm = self.norm / np.sum(self.phi_homo, axis=-1)
self.phi_homo *= norm[:, np.newaxis]
phi_dx *= norm[:, np.newaxis]
# Homogenize the cross sections
self.sig_t_homo = homo_space(self.sig_t * phi_dx) / self.phi_homo
self.sig_f_homo = homo_space(self.vsig_f * phi_dx) / self.phi_homo
self.chi_homo = homo_space(self.chi * self.dx)
self.sig_s_homo = np.zeros((self.G, self.G, self.npin))
for gp in range(self.G):
self.sig_s_homo[gp] = homo_space(self.sig_s[gp] * phi_dx) / self.phi_homo
def homogenize_energy(self):
'''
Homogenize the cross sections over the energy range
'''
def homo_energy(array1, array2=None):
'''
convinence function to do the integration
return \frac{\sum_i array1[i] * array2[i]}{\sum_i array2[i]} for each coarse group
'''
if array2 is not None:
y = np.zeros((nCG, len(array1[0])))
z = np.zeros((nCG, len(array1[0])))
for g, cg in enumerate(grouping):
z[cg - 1] += array1[g] * array2[g]
y[cg - 1] += array2[g]
return z / y
else:
z = np.zeros((nCG, len(array1[0])))
for g, cg in enumerate(grouping):
z[cg - 1] += array1[g]
return z
nCG = self.mapping.nCG
nFG = self.mapping.nFG
grouping = np.array(self.mapping.grouping)
dE_coarse = np.array(self.mapping.dE_coarse)
dE_fine = np.array(self.mapping.dE_fine)
dE_coarse /= dE_coarse
dE_fine /= dE_fine
phi_homo = homo_energy(self.phi_homo, dE_fine[:, np.newaxis])
if self.computenorm:
norm = np.zeros(nCG)
for g, cg in enumerate(grouping):
norm[cg - 1] += self.norm[g]
self.norm = norm
'''
print(self.mapping.fine_bounds)
import matplotlib.pyplot as plt
def barchart(x, y):
X = np.zeros(2 * len(y))
Y = np.zeros(2 * len(y))
for i in range(0, len(y)):
X[2 * i] = x[i]
X[2 * i + 1] = x[i + 1]
Y[2 * i] = y[i]
Y[2 * i + 1] = y[i]
return X, Y
plt.loglog(*barchart(self.mapping.fine_bounds, self.sig_t_homo[:,0]), 'g-', label='fine group')
'''
self.sig_t_homo = homo_energy(self.sig_t_homo, self.phi_homo)
self.sig_f_homo = homo_energy(self.sig_f_homo, self.phi_homo)
self.chi_homo = homo_energy(self.chi_homo)
sig_s_homo = np.zeros((nCG, nCG, self.npin))
for gp, g in enumerate(grouping):
sig_s_homo[g - 1] += homo_energy(self.sig_s_homo[gp], self.phi_homo)
self.sig_s_homo = sig_s_homo
self.phi_homo = phi_homo
'''
plt.loglog(*barchart(self.mapping.coarse_bounds, self.sig_t_homo[:,0]), 'k-', label='coarse group')
plt.legend(loc=0)
plt.xlabel('Energy [MeV]')
plt.ylabel('$\Sigma_t$ [cm$^{-1}$]')
plt.savefig('test.pdf', transparent=True)
'''
|
normal
|
{
"blob_id": "1358adc3b2b3ffe72c0ed87fb0024f1079ca7d04",
"index": 1710,
"step-1": "<mask token>\n\n\nclass DGMSOLVER:\n\n def __init__(self, G, fname, fm, cm, mm, nPin, norm=None, mapping=None,\n vacuum=False, k=None, phi=None, psi=None):\n \"\"\"\n Inputs:\n G - Number of energy groups\n fname - Name of the cross section file\n fm - Fine mesh\n cm - Coarse mesh\n mm - Material map\n nPin - Number of pincells\n norm - norm of the flux to keep constant (match phi shape)\n mapping - structure class that holds fine -> coarse mapping\n \"\"\"\n self.G = G\n self.fname = fname\n self.fm = fm\n self.cm = cm\n self.mm = mm\n self.npin = nPin\n self.norm = norm\n self.computenorm = self.norm is None\n self.vacuum = vacuum\n self.mapping = mapping\n self.setOptions()\n self.solve(k, phi, psi)\n self.homogenize_space()\n if self.mapping is not None:\n self.homogenize_energy()\n <mask token>\n\n def solve(self, k, phi, psi):\n \"\"\"\n Solve the problem using Unotran\n \"\"\"\n pydgm.solver.initialize_solver()\n if k is not None:\n pydgm.state.keff = k\n if phi is not None:\n pydgm.state.phi = phi\n if psi is not None:\n pydgm.state.psi = psi\n pydgm.solver.solve()\n self.extractInfo()\n self.iter_k = np.copy(pydgm.state.keff)\n self.iter_phi = np.copy(pydgm.state.phi)\n self.iter_psi = np.copy(pydgm.state.psi)\n pydgm.solver.finalize_solver()\n pydgm.control.finalize_control()\n\n def extractInfo(self):\n \"\"\"\n Copy information from Unotran before the solver is deallocated\n \"\"\"\n self.phi = np.copy(pydgm.state.mg_phi[0])\n self.dx = np.copy(pydgm.mesh.dx)\n self.mat_map = np.copy(pydgm.state.mg_mmap)\n self.sig_t = np.array([pydgm.state.mg_sig_t[:, self.mat_map[c] - 1] for\n c in range(len(self.mat_map))]).T\n self.sig_s = np.array([pydgm.state.mg_sig_s[0, :, :, self.mat_map[c\n ] - 1] for c in range(len(self.mat_map))]).T\n self.vsig_f = np.array([pydgm.state.mg_nu_sig_f[:, self.mat_map[c] -\n 1] for c in range(len(self.mat_map))]).T\n self.chi = np.array([pydgm.state.mg_chi[:, self.mat_map[c] - 1] for\n c in range(len(self.mat_map))]).T\n\n def homogenize_space(self):\n \"\"\"\n Homogenize the cross sections over the spatial region\n \"\"\"\n\n def homo_space(array):\n \"\"\"Convenience function to do the integration\"\"\"\n return np.sum(array.reshape(-1, self.npin, nCellPerPin), axis=2\n ) / V\n shape = self.phi.shape\n nCellPerPin = shape[1] // self.npin\n V = np.sum(self.dx.reshape(self.npin, -1), axis=1)\n phi_dx = self.phi[:, :] * self.dx[:]\n self.phi_homo = homo_space(phi_dx)\n if self.computenorm:\n self.norm = np.sum(self.phi_homo, axis=-1)\n else:\n print('compute norm')\n norm = self.norm / np.sum(self.phi_homo, axis=-1)\n self.phi_homo *= norm[:, np.newaxis]\n phi_dx *= norm[:, np.newaxis]\n self.sig_t_homo = homo_space(self.sig_t * phi_dx) / self.phi_homo\n self.sig_f_homo = homo_space(self.vsig_f * phi_dx) / self.phi_homo\n self.chi_homo = homo_space(self.chi * self.dx)\n self.sig_s_homo = np.zeros((self.G, self.G, self.npin))\n for gp in range(self.G):\n self.sig_s_homo[gp] = homo_space(self.sig_s[gp] * phi_dx\n ) / self.phi_homo\n\n def homogenize_energy(self):\n \"\"\"\n Homogenize the cross sections over the energy range\n \"\"\"\n\n def homo_energy(array1, array2=None):\n \"\"\"\n convinence function to do the integration\n\n return \frac{\\\\sum_i array1[i] * array2[i]}{\\\\sum_i array2[i]} for each coarse group\n \"\"\"\n if array2 is not None:\n y = np.zeros((nCG, len(array1[0])))\n z = np.zeros((nCG, len(array1[0])))\n for g, cg in enumerate(grouping):\n z[cg - 1] += array1[g] * array2[g]\n y[cg - 1] += array2[g]\n return z / y\n else:\n z = np.zeros((nCG, len(array1[0])))\n for g, cg in enumerate(grouping):\n z[cg - 1] += array1[g]\n return z\n nCG = self.mapping.nCG\n nFG = self.mapping.nFG\n grouping = np.array(self.mapping.grouping)\n dE_coarse = np.array(self.mapping.dE_coarse)\n dE_fine = np.array(self.mapping.dE_fine)\n dE_coarse /= dE_coarse\n dE_fine /= dE_fine\n phi_homo = homo_energy(self.phi_homo, dE_fine[:, np.newaxis])\n if self.computenorm:\n norm = np.zeros(nCG)\n for g, cg in enumerate(grouping):\n norm[cg - 1] += self.norm[g]\n self.norm = norm\n \"\"\"\n print(self.mapping.fine_bounds)\n import matplotlib.pyplot as plt\n\n def barchart(x, y):\n X = np.zeros(2 * len(y))\n Y = np.zeros(2 * len(y))\n for i in range(0, len(y)):\n X[2 * i] = x[i]\n X[2 * i + 1] = x[i + 1]\n Y[2 * i] = y[i]\n Y[2 * i + 1] = y[i]\n return X, Y\n\n plt.loglog(*barchart(self.mapping.fine_bounds, self.sig_t_homo[:,0]), 'g-', label='fine group')\n \"\"\"\n self.sig_t_homo = homo_energy(self.sig_t_homo, self.phi_homo)\n self.sig_f_homo = homo_energy(self.sig_f_homo, self.phi_homo)\n self.chi_homo = homo_energy(self.chi_homo)\n sig_s_homo = np.zeros((nCG, nCG, self.npin))\n for gp, g in enumerate(grouping):\n sig_s_homo[g - 1] += homo_energy(self.sig_s_homo[gp], self.phi_homo\n )\n self.sig_s_homo = sig_s_homo\n self.phi_homo = phi_homo\n \"\"\"\n plt.loglog(*barchart(self.mapping.coarse_bounds, self.sig_t_homo[:,0]), 'k-', label='coarse group')\n plt.legend(loc=0)\n plt.xlabel('Energy [MeV]')\n plt.ylabel('$\\\\Sigma_t$ [cm$^{-1}$]')\n plt.savefig('test.pdf', transparent=True)\n \"\"\"\n",
"step-2": "<mask token>\n\n\nclass XS:\n <mask token>\n <mask token>\n <mask token>\n\n\nclass DGMSOLVER:\n\n def __init__(self, G, fname, fm, cm, mm, nPin, norm=None, mapping=None,\n vacuum=False, k=None, phi=None, psi=None):\n \"\"\"\n Inputs:\n G - Number of energy groups\n fname - Name of the cross section file\n fm - Fine mesh\n cm - Coarse mesh\n mm - Material map\n nPin - Number of pincells\n norm - norm of the flux to keep constant (match phi shape)\n mapping - structure class that holds fine -> coarse mapping\n \"\"\"\n self.G = G\n self.fname = fname\n self.fm = fm\n self.cm = cm\n self.mm = mm\n self.npin = nPin\n self.norm = norm\n self.computenorm = self.norm is None\n self.vacuum = vacuum\n self.mapping = mapping\n self.setOptions()\n self.solve(k, phi, psi)\n self.homogenize_space()\n if self.mapping is not None:\n self.homogenize_energy()\n\n def setOptions(self):\n \"\"\"\n Set the options for the Unotran solve\n \"\"\"\n pydgm.control.spatial_dimension = 1\n pydgm.control.fine_mesh_x = self.fm\n pydgm.control.coarse_mesh_x = self.cm\n pydgm.control.material_map = self.mm\n pydgm.control.xs_name = self.fname.ljust(256)\n pydgm.control.angle_order = 8\n pydgm.control.angle_option = pydgm.angle.gl\n pydgm.control.boundary_west = 0.0 if self.vacuum else 1.0\n pydgm.control.boundary_east = 0.0 if self.vacuum else 1.0\n pydgm.control.allow_fission = True\n pydgm.control.eigen_print = 0\n pydgm.control.outer_print = 0\n pydgm.control.eigen_tolerance = 1e-14\n pydgm.control.outer_tolerance = 1e-12\n pydgm.control.max_eigen_iters = 10000\n pydgm.control.max_outer_iters = 1\n pydgm.control.store_psi = True\n pydgm.control.solver_type = 'eigen'.ljust(256)\n pydgm.control.source_value = 0.0\n pydgm.control.equation_type = 'DD'\n pydgm.control.scatter_leg_order = 0\n pydgm.control.ignore_warnings = True\n\n def solve(self, k, phi, psi):\n \"\"\"\n Solve the problem using Unotran\n \"\"\"\n pydgm.solver.initialize_solver()\n if k is not None:\n pydgm.state.keff = k\n if phi is not None:\n pydgm.state.phi = phi\n if psi is not None:\n pydgm.state.psi = psi\n pydgm.solver.solve()\n self.extractInfo()\n self.iter_k = np.copy(pydgm.state.keff)\n self.iter_phi = np.copy(pydgm.state.phi)\n self.iter_psi = np.copy(pydgm.state.psi)\n pydgm.solver.finalize_solver()\n pydgm.control.finalize_control()\n\n def extractInfo(self):\n \"\"\"\n Copy information from Unotran before the solver is deallocated\n \"\"\"\n self.phi = np.copy(pydgm.state.mg_phi[0])\n self.dx = np.copy(pydgm.mesh.dx)\n self.mat_map = np.copy(pydgm.state.mg_mmap)\n self.sig_t = np.array([pydgm.state.mg_sig_t[:, self.mat_map[c] - 1] for\n c in range(len(self.mat_map))]).T\n self.sig_s = np.array([pydgm.state.mg_sig_s[0, :, :, self.mat_map[c\n ] - 1] for c in range(len(self.mat_map))]).T\n self.vsig_f = np.array([pydgm.state.mg_nu_sig_f[:, self.mat_map[c] -\n 1] for c in range(len(self.mat_map))]).T\n self.chi = np.array([pydgm.state.mg_chi[:, self.mat_map[c] - 1] for\n c in range(len(self.mat_map))]).T\n\n def homogenize_space(self):\n \"\"\"\n Homogenize the cross sections over the spatial region\n \"\"\"\n\n def homo_space(array):\n \"\"\"Convenience function to do the integration\"\"\"\n return np.sum(array.reshape(-1, self.npin, nCellPerPin), axis=2\n ) / V\n shape = self.phi.shape\n nCellPerPin = shape[1] // self.npin\n V = np.sum(self.dx.reshape(self.npin, -1), axis=1)\n phi_dx = self.phi[:, :] * self.dx[:]\n self.phi_homo = homo_space(phi_dx)\n if self.computenorm:\n self.norm = np.sum(self.phi_homo, axis=-1)\n else:\n print('compute norm')\n norm = self.norm / np.sum(self.phi_homo, axis=-1)\n self.phi_homo *= norm[:, np.newaxis]\n phi_dx *= norm[:, np.newaxis]\n self.sig_t_homo = homo_space(self.sig_t * phi_dx) / self.phi_homo\n self.sig_f_homo = homo_space(self.vsig_f * phi_dx) / self.phi_homo\n self.chi_homo = homo_space(self.chi * self.dx)\n self.sig_s_homo = np.zeros((self.G, self.G, self.npin))\n for gp in range(self.G):\n self.sig_s_homo[gp] = homo_space(self.sig_s[gp] * phi_dx\n ) / self.phi_homo\n\n def homogenize_energy(self):\n \"\"\"\n Homogenize the cross sections over the energy range\n \"\"\"\n\n def homo_energy(array1, array2=None):\n \"\"\"\n convinence function to do the integration\n\n return \frac{\\\\sum_i array1[i] * array2[i]}{\\\\sum_i array2[i]} for each coarse group\n \"\"\"\n if array2 is not None:\n y = np.zeros((nCG, len(array1[0])))\n z = np.zeros((nCG, len(array1[0])))\n for g, cg in enumerate(grouping):\n z[cg - 1] += array1[g] * array2[g]\n y[cg - 1] += array2[g]\n return z / y\n else:\n z = np.zeros((nCG, len(array1[0])))\n for g, cg in enumerate(grouping):\n z[cg - 1] += array1[g]\n return z\n nCG = self.mapping.nCG\n nFG = self.mapping.nFG\n grouping = np.array(self.mapping.grouping)\n dE_coarse = np.array(self.mapping.dE_coarse)\n dE_fine = np.array(self.mapping.dE_fine)\n dE_coarse /= dE_coarse\n dE_fine /= dE_fine\n phi_homo = homo_energy(self.phi_homo, dE_fine[:, np.newaxis])\n if self.computenorm:\n norm = np.zeros(nCG)\n for g, cg in enumerate(grouping):\n norm[cg - 1] += self.norm[g]\n self.norm = norm\n \"\"\"\n print(self.mapping.fine_bounds)\n import matplotlib.pyplot as plt\n\n def barchart(x, y):\n X = np.zeros(2 * len(y))\n Y = np.zeros(2 * len(y))\n for i in range(0, len(y)):\n X[2 * i] = x[i]\n X[2 * i + 1] = x[i + 1]\n Y[2 * i] = y[i]\n Y[2 * i + 1] = y[i]\n return X, Y\n\n plt.loglog(*barchart(self.mapping.fine_bounds, self.sig_t_homo[:,0]), 'g-', label='fine group')\n \"\"\"\n self.sig_t_homo = homo_energy(self.sig_t_homo, self.phi_homo)\n self.sig_f_homo = homo_energy(self.sig_f_homo, self.phi_homo)\n self.chi_homo = homo_energy(self.chi_homo)\n sig_s_homo = np.zeros((nCG, nCG, self.npin))\n for gp, g in enumerate(grouping):\n sig_s_homo[g - 1] += homo_energy(self.sig_s_homo[gp], self.phi_homo\n )\n self.sig_s_homo = sig_s_homo\n self.phi_homo = phi_homo\n \"\"\"\n plt.loglog(*barchart(self.mapping.coarse_bounds, self.sig_t_homo[:,0]), 'k-', label='coarse group')\n plt.legend(loc=0)\n plt.xlabel('Energy [MeV]')\n plt.ylabel('$\\\\Sigma_t$ [cm$^{-1}$]')\n plt.savefig('test.pdf', transparent=True)\n \"\"\"\n",
"step-3": "<mask token>\n\n\nclass XS:\n <mask token>\n\n def write_homogenized_XS(self, fname, mu=None):\n if mu is not None:\n assert mu.shape == self.sig_t.shape\n self.mu = mu\n G, npin = self.sig_t.shape\n sig_t = self.sig_t * self.mu\n vsig_f = self.sig_f * self.mu\n sig_s = self.sig_s * self.mu\n s = '{} {} 0\\n'.format(npin, G)\n s += '{}\\n'.format(' '.join([str(g) for g in range(G + 1)]))\n s += '{}\\n'.format(' '.join([str(g) for g in range(G)]))\n for mat in range(npin):\n s += 'pin {}\\n'.format(mat + 1)\n s += '1 1 1.0 0.0 0.602214179\\n'\n for g in range(G):\n s += '{:<12.9f} {:<12.9f} {:<12.9f} {:<12.9f}\\n'.format(sig_t\n [g, mat], vsig_f[g, mat], vsig_f[g, mat], self.chi[g, mat])\n for g in range(G):\n s += '{}\\n'.format(' '.join(['{:<12.9f}'.format(s) for s in\n sig_s[:, g, mat]]))\n with open(fname, 'w') as f:\n f.write(s[:-1])\n <mask token>\n\n\nclass DGMSOLVER:\n\n def __init__(self, G, fname, fm, cm, mm, nPin, norm=None, mapping=None,\n vacuum=False, k=None, phi=None, psi=None):\n \"\"\"\n Inputs:\n G - Number of energy groups\n fname - Name of the cross section file\n fm - Fine mesh\n cm - Coarse mesh\n mm - Material map\n nPin - Number of pincells\n norm - norm of the flux to keep constant (match phi shape)\n mapping - structure class that holds fine -> coarse mapping\n \"\"\"\n self.G = G\n self.fname = fname\n self.fm = fm\n self.cm = cm\n self.mm = mm\n self.npin = nPin\n self.norm = norm\n self.computenorm = self.norm is None\n self.vacuum = vacuum\n self.mapping = mapping\n self.setOptions()\n self.solve(k, phi, psi)\n self.homogenize_space()\n if self.mapping is not None:\n self.homogenize_energy()\n\n def setOptions(self):\n \"\"\"\n Set the options for the Unotran solve\n \"\"\"\n pydgm.control.spatial_dimension = 1\n pydgm.control.fine_mesh_x = self.fm\n pydgm.control.coarse_mesh_x = self.cm\n pydgm.control.material_map = self.mm\n pydgm.control.xs_name = self.fname.ljust(256)\n pydgm.control.angle_order = 8\n pydgm.control.angle_option = pydgm.angle.gl\n pydgm.control.boundary_west = 0.0 if self.vacuum else 1.0\n pydgm.control.boundary_east = 0.0 if self.vacuum else 1.0\n pydgm.control.allow_fission = True\n pydgm.control.eigen_print = 0\n pydgm.control.outer_print = 0\n pydgm.control.eigen_tolerance = 1e-14\n pydgm.control.outer_tolerance = 1e-12\n pydgm.control.max_eigen_iters = 10000\n pydgm.control.max_outer_iters = 1\n pydgm.control.store_psi = True\n pydgm.control.solver_type = 'eigen'.ljust(256)\n pydgm.control.source_value = 0.0\n pydgm.control.equation_type = 'DD'\n pydgm.control.scatter_leg_order = 0\n pydgm.control.ignore_warnings = True\n\n def solve(self, k, phi, psi):\n \"\"\"\n Solve the problem using Unotran\n \"\"\"\n pydgm.solver.initialize_solver()\n if k is not None:\n pydgm.state.keff = k\n if phi is not None:\n pydgm.state.phi = phi\n if psi is not None:\n pydgm.state.psi = psi\n pydgm.solver.solve()\n self.extractInfo()\n self.iter_k = np.copy(pydgm.state.keff)\n self.iter_phi = np.copy(pydgm.state.phi)\n self.iter_psi = np.copy(pydgm.state.psi)\n pydgm.solver.finalize_solver()\n pydgm.control.finalize_control()\n\n def extractInfo(self):\n \"\"\"\n Copy information from Unotran before the solver is deallocated\n \"\"\"\n self.phi = np.copy(pydgm.state.mg_phi[0])\n self.dx = np.copy(pydgm.mesh.dx)\n self.mat_map = np.copy(pydgm.state.mg_mmap)\n self.sig_t = np.array([pydgm.state.mg_sig_t[:, self.mat_map[c] - 1] for\n c in range(len(self.mat_map))]).T\n self.sig_s = np.array([pydgm.state.mg_sig_s[0, :, :, self.mat_map[c\n ] - 1] for c in range(len(self.mat_map))]).T\n self.vsig_f = np.array([pydgm.state.mg_nu_sig_f[:, self.mat_map[c] -\n 1] for c in range(len(self.mat_map))]).T\n self.chi = np.array([pydgm.state.mg_chi[:, self.mat_map[c] - 1] for\n c in range(len(self.mat_map))]).T\n\n def homogenize_space(self):\n \"\"\"\n Homogenize the cross sections over the spatial region\n \"\"\"\n\n def homo_space(array):\n \"\"\"Convenience function to do the integration\"\"\"\n return np.sum(array.reshape(-1, self.npin, nCellPerPin), axis=2\n ) / V\n shape = self.phi.shape\n nCellPerPin = shape[1] // self.npin\n V = np.sum(self.dx.reshape(self.npin, -1), axis=1)\n phi_dx = self.phi[:, :] * self.dx[:]\n self.phi_homo = homo_space(phi_dx)\n if self.computenorm:\n self.norm = np.sum(self.phi_homo, axis=-1)\n else:\n print('compute norm')\n norm = self.norm / np.sum(self.phi_homo, axis=-1)\n self.phi_homo *= norm[:, np.newaxis]\n phi_dx *= norm[:, np.newaxis]\n self.sig_t_homo = homo_space(self.sig_t * phi_dx) / self.phi_homo\n self.sig_f_homo = homo_space(self.vsig_f * phi_dx) / self.phi_homo\n self.chi_homo = homo_space(self.chi * self.dx)\n self.sig_s_homo = np.zeros((self.G, self.G, self.npin))\n for gp in range(self.G):\n self.sig_s_homo[gp] = homo_space(self.sig_s[gp] * phi_dx\n ) / self.phi_homo\n\n def homogenize_energy(self):\n \"\"\"\n Homogenize the cross sections over the energy range\n \"\"\"\n\n def homo_energy(array1, array2=None):\n \"\"\"\n convinence function to do the integration\n\n return \frac{\\\\sum_i array1[i] * array2[i]}{\\\\sum_i array2[i]} for each coarse group\n \"\"\"\n if array2 is not None:\n y = np.zeros((nCG, len(array1[0])))\n z = np.zeros((nCG, len(array1[0])))\n for g, cg in enumerate(grouping):\n z[cg - 1] += array1[g] * array2[g]\n y[cg - 1] += array2[g]\n return z / y\n else:\n z = np.zeros((nCG, len(array1[0])))\n for g, cg in enumerate(grouping):\n z[cg - 1] += array1[g]\n return z\n nCG = self.mapping.nCG\n nFG = self.mapping.nFG\n grouping = np.array(self.mapping.grouping)\n dE_coarse = np.array(self.mapping.dE_coarse)\n dE_fine = np.array(self.mapping.dE_fine)\n dE_coarse /= dE_coarse\n dE_fine /= dE_fine\n phi_homo = homo_energy(self.phi_homo, dE_fine[:, np.newaxis])\n if self.computenorm:\n norm = np.zeros(nCG)\n for g, cg in enumerate(grouping):\n norm[cg - 1] += self.norm[g]\n self.norm = norm\n \"\"\"\n print(self.mapping.fine_bounds)\n import matplotlib.pyplot as plt\n\n def barchart(x, y):\n X = np.zeros(2 * len(y))\n Y = np.zeros(2 * len(y))\n for i in range(0, len(y)):\n X[2 * i] = x[i]\n X[2 * i + 1] = x[i + 1]\n Y[2 * i] = y[i]\n Y[2 * i + 1] = y[i]\n return X, Y\n\n plt.loglog(*barchart(self.mapping.fine_bounds, self.sig_t_homo[:,0]), 'g-', label='fine group')\n \"\"\"\n self.sig_t_homo = homo_energy(self.sig_t_homo, self.phi_homo)\n self.sig_f_homo = homo_energy(self.sig_f_homo, self.phi_homo)\n self.chi_homo = homo_energy(self.chi_homo)\n sig_s_homo = np.zeros((nCG, nCG, self.npin))\n for gp, g in enumerate(grouping):\n sig_s_homo[g - 1] += homo_energy(self.sig_s_homo[gp], self.phi_homo\n )\n self.sig_s_homo = sig_s_homo\n self.phi_homo = phi_homo\n \"\"\"\n plt.loglog(*barchart(self.mapping.coarse_bounds, self.sig_t_homo[:,0]), 'k-', label='coarse group')\n plt.legend(loc=0)\n plt.xlabel('Energy [MeV]')\n plt.ylabel('$\\\\Sigma_t$ [cm$^{-1}$]')\n plt.savefig('test.pdf', transparent=True)\n \"\"\"\n",
"step-4": "<mask token>\n\n\nclass XS:\n\n def __init__(self, sig_t, sig_f, chi, sig_s, mu=None):\n self.sig_t = sig_t\n self.sig_f = sig_f\n self.chi = chi\n self.sig_s = sig_s\n self.mu = mu if mu is None else np.ones(self.sig_t.shape)\n\n def write_homogenized_XS(self, fname, mu=None):\n if mu is not None:\n assert mu.shape == self.sig_t.shape\n self.mu = mu\n G, npin = self.sig_t.shape\n sig_t = self.sig_t * self.mu\n vsig_f = self.sig_f * self.mu\n sig_s = self.sig_s * self.mu\n s = '{} {} 0\\n'.format(npin, G)\n s += '{}\\n'.format(' '.join([str(g) for g in range(G + 1)]))\n s += '{}\\n'.format(' '.join([str(g) for g in range(G)]))\n for mat in range(npin):\n s += 'pin {}\\n'.format(mat + 1)\n s += '1 1 1.0 0.0 0.602214179\\n'\n for g in range(G):\n s += '{:<12.9f} {:<12.9f} {:<12.9f} {:<12.9f}\\n'.format(sig_t\n [g, mat], vsig_f[g, mat], vsig_f[g, mat], self.chi[g, mat])\n for g in range(G):\n s += '{}\\n'.format(' '.join(['{:<12.9f}'.format(s) for s in\n sig_s[:, g, mat]]))\n with open(fname, 'w') as f:\n f.write(s[:-1])\n\n def __add__(self, newXS):\n sig_t = np.concatenate([self.sig_t, newXS.sig_t], axis=-1)\n sig_f = np.concatenate([self.sig_f, newXS.sig_f], axis=-1)\n sig_s = np.concatenate([self.sig_s, newXS.sig_s], axis=-1)\n chi = np.concatenate([self.chi, newXS.chi], axis=-1)\n mu = np.concatenate([self.mu, newXS.mu], axis=-1)\n return XS(sig_t, sig_f, chi, sig_s, mu)\n\n\nclass DGMSOLVER:\n\n def __init__(self, G, fname, fm, cm, mm, nPin, norm=None, mapping=None,\n vacuum=False, k=None, phi=None, psi=None):\n \"\"\"\n Inputs:\n G - Number of energy groups\n fname - Name of the cross section file\n fm - Fine mesh\n cm - Coarse mesh\n mm - Material map\n nPin - Number of pincells\n norm - norm of the flux to keep constant (match phi shape)\n mapping - structure class that holds fine -> coarse mapping\n \"\"\"\n self.G = G\n self.fname = fname\n self.fm = fm\n self.cm = cm\n self.mm = mm\n self.npin = nPin\n self.norm = norm\n self.computenorm = self.norm is None\n self.vacuum = vacuum\n self.mapping = mapping\n self.setOptions()\n self.solve(k, phi, psi)\n self.homogenize_space()\n if self.mapping is not None:\n self.homogenize_energy()\n\n def setOptions(self):\n \"\"\"\n Set the options for the Unotran solve\n \"\"\"\n pydgm.control.spatial_dimension = 1\n pydgm.control.fine_mesh_x = self.fm\n pydgm.control.coarse_mesh_x = self.cm\n pydgm.control.material_map = self.mm\n pydgm.control.xs_name = self.fname.ljust(256)\n pydgm.control.angle_order = 8\n pydgm.control.angle_option = pydgm.angle.gl\n pydgm.control.boundary_west = 0.0 if self.vacuum else 1.0\n pydgm.control.boundary_east = 0.0 if self.vacuum else 1.0\n pydgm.control.allow_fission = True\n pydgm.control.eigen_print = 0\n pydgm.control.outer_print = 0\n pydgm.control.eigen_tolerance = 1e-14\n pydgm.control.outer_tolerance = 1e-12\n pydgm.control.max_eigen_iters = 10000\n pydgm.control.max_outer_iters = 1\n pydgm.control.store_psi = True\n pydgm.control.solver_type = 'eigen'.ljust(256)\n pydgm.control.source_value = 0.0\n pydgm.control.equation_type = 'DD'\n pydgm.control.scatter_leg_order = 0\n pydgm.control.ignore_warnings = True\n\n def solve(self, k, phi, psi):\n \"\"\"\n Solve the problem using Unotran\n \"\"\"\n pydgm.solver.initialize_solver()\n if k is not None:\n pydgm.state.keff = k\n if phi is not None:\n pydgm.state.phi = phi\n if psi is not None:\n pydgm.state.psi = psi\n pydgm.solver.solve()\n self.extractInfo()\n self.iter_k = np.copy(pydgm.state.keff)\n self.iter_phi = np.copy(pydgm.state.phi)\n self.iter_psi = np.copy(pydgm.state.psi)\n pydgm.solver.finalize_solver()\n pydgm.control.finalize_control()\n\n def extractInfo(self):\n \"\"\"\n Copy information from Unotran before the solver is deallocated\n \"\"\"\n self.phi = np.copy(pydgm.state.mg_phi[0])\n self.dx = np.copy(pydgm.mesh.dx)\n self.mat_map = np.copy(pydgm.state.mg_mmap)\n self.sig_t = np.array([pydgm.state.mg_sig_t[:, self.mat_map[c] - 1] for\n c in range(len(self.mat_map))]).T\n self.sig_s = np.array([pydgm.state.mg_sig_s[0, :, :, self.mat_map[c\n ] - 1] for c in range(len(self.mat_map))]).T\n self.vsig_f = np.array([pydgm.state.mg_nu_sig_f[:, self.mat_map[c] -\n 1] for c in range(len(self.mat_map))]).T\n self.chi = np.array([pydgm.state.mg_chi[:, self.mat_map[c] - 1] for\n c in range(len(self.mat_map))]).T\n\n def homogenize_space(self):\n \"\"\"\n Homogenize the cross sections over the spatial region\n \"\"\"\n\n def homo_space(array):\n \"\"\"Convenience function to do the integration\"\"\"\n return np.sum(array.reshape(-1, self.npin, nCellPerPin), axis=2\n ) / V\n shape = self.phi.shape\n nCellPerPin = shape[1] // self.npin\n V = np.sum(self.dx.reshape(self.npin, -1), axis=1)\n phi_dx = self.phi[:, :] * self.dx[:]\n self.phi_homo = homo_space(phi_dx)\n if self.computenorm:\n self.norm = np.sum(self.phi_homo, axis=-1)\n else:\n print('compute norm')\n norm = self.norm / np.sum(self.phi_homo, axis=-1)\n self.phi_homo *= norm[:, np.newaxis]\n phi_dx *= norm[:, np.newaxis]\n self.sig_t_homo = homo_space(self.sig_t * phi_dx) / self.phi_homo\n self.sig_f_homo = homo_space(self.vsig_f * phi_dx) / self.phi_homo\n self.chi_homo = homo_space(self.chi * self.dx)\n self.sig_s_homo = np.zeros((self.G, self.G, self.npin))\n for gp in range(self.G):\n self.sig_s_homo[gp] = homo_space(self.sig_s[gp] * phi_dx\n ) / self.phi_homo\n\n def homogenize_energy(self):\n \"\"\"\n Homogenize the cross sections over the energy range\n \"\"\"\n\n def homo_energy(array1, array2=None):\n \"\"\"\n convinence function to do the integration\n\n return \frac{\\\\sum_i array1[i] * array2[i]}{\\\\sum_i array2[i]} for each coarse group\n \"\"\"\n if array2 is not None:\n y = np.zeros((nCG, len(array1[0])))\n z = np.zeros((nCG, len(array1[0])))\n for g, cg in enumerate(grouping):\n z[cg - 1] += array1[g] * array2[g]\n y[cg - 1] += array2[g]\n return z / y\n else:\n z = np.zeros((nCG, len(array1[0])))\n for g, cg in enumerate(grouping):\n z[cg - 1] += array1[g]\n return z\n nCG = self.mapping.nCG\n nFG = self.mapping.nFG\n grouping = np.array(self.mapping.grouping)\n dE_coarse = np.array(self.mapping.dE_coarse)\n dE_fine = np.array(self.mapping.dE_fine)\n dE_coarse /= dE_coarse\n dE_fine /= dE_fine\n phi_homo = homo_energy(self.phi_homo, dE_fine[:, np.newaxis])\n if self.computenorm:\n norm = np.zeros(nCG)\n for g, cg in enumerate(grouping):\n norm[cg - 1] += self.norm[g]\n self.norm = norm\n \"\"\"\n print(self.mapping.fine_bounds)\n import matplotlib.pyplot as plt\n\n def barchart(x, y):\n X = np.zeros(2 * len(y))\n Y = np.zeros(2 * len(y))\n for i in range(0, len(y)):\n X[2 * i] = x[i]\n X[2 * i + 1] = x[i + 1]\n Y[2 * i] = y[i]\n Y[2 * i + 1] = y[i]\n return X, Y\n\n plt.loglog(*barchart(self.mapping.fine_bounds, self.sig_t_homo[:,0]), 'g-', label='fine group')\n \"\"\"\n self.sig_t_homo = homo_energy(self.sig_t_homo, self.phi_homo)\n self.sig_f_homo = homo_energy(self.sig_f_homo, self.phi_homo)\n self.chi_homo = homo_energy(self.chi_homo)\n sig_s_homo = np.zeros((nCG, nCG, self.npin))\n for gp, g in enumerate(grouping):\n sig_s_homo[g - 1] += homo_energy(self.sig_s_homo[gp], self.phi_homo\n )\n self.sig_s_homo = sig_s_homo\n self.phi_homo = phi_homo\n \"\"\"\n plt.loglog(*barchart(self.mapping.coarse_bounds, self.sig_t_homo[:,0]), 'k-', label='coarse group')\n plt.legend(loc=0)\n plt.xlabel('Energy [MeV]')\n plt.ylabel('$\\\\Sigma_t$ [cm$^{-1}$]')\n plt.savefig('test.pdf', transparent=True)\n \"\"\"\n",
"step-5": "import pydgm\nimport numpy as np\nimport sys\n\n\nclass XS():\n\n # Hold the cross section values with routines for outputting to txt file\n def __init__(self, sig_t, sig_f, chi, sig_s, mu=None):\n self.sig_t = sig_t\n self.sig_f = sig_f\n self.chi = chi\n self.sig_s = sig_s\n self.mu = mu if mu is None else np.ones(self.sig_t.shape)\n\n def write_homogenized_XS(self, fname, mu=None):\n if mu is not None:\n assert mu.shape == self.sig_t.shape\n self.mu = mu\n\n G, npin = self.sig_t.shape\n\n sig_t = self.sig_t * self.mu\n vsig_f = self.sig_f * self.mu\n sig_s = self.sig_s * self.mu\n\n # Write the cross sections to file\n s = '{} {} 0\\n'.format(npin, G)\n s += '{}\\n'.format(' '.join([str(g) for g in range(G + 1)]))\n s += '{}\\n'.format(' '.join([str(g) for g in range(G)]))\n for mat in range(npin):\n s += 'pin {}\\n'.format(mat + 1)\n s += '1 1 1.0 0.0 0.602214179\\n'\n\n for g in range(G):\n s += '{:<12.9f} {:<12.9f} {:<12.9f} {:<12.9f}\\n'.format(sig_t[g, mat], vsig_f[g, mat], vsig_f[g, mat], self.chi[g, mat])\n for g in range(G):\n s += '{}\\n'.format(' '.join(['{:<12.9f}'.format(s) for s in sig_s[:, g, mat]]))\n\n with open(fname, 'w') as f:\n f.write(s[:-1])\n\n def __add__(self, newXS):\n sig_t = np.concatenate([self.sig_t, newXS.sig_t], axis=-1)\n sig_f = np.concatenate([self.sig_f, newXS.sig_f], axis=-1)\n sig_s = np.concatenate([self.sig_s, newXS.sig_s], axis=-1)\n chi = np.concatenate([self.chi, newXS.chi], axis=-1)\n mu = np.concatenate([self.mu, newXS.mu], axis=-1)\n\n return XS(sig_t, sig_f, chi, sig_s, mu)\n\n\nclass DGMSOLVER():\n\n # Solve the problem using unotran\n def __init__(self, G, fname, fm, cm, mm, nPin, norm=None, mapping=None, vacuum=False, k=None, phi=None, psi=None):\n '''\n Inputs:\n G - Number of energy groups\n fname - Name of the cross section file\n fm - Fine mesh\n cm - Coarse mesh\n mm - Material map\n nPin - Number of pincells\n norm - norm of the flux to keep constant (match phi shape)\n mapping - structure class that holds fine -> coarse mapping\n '''\n\n self.G = G\n self.fname = fname\n self.fm = fm\n self.cm = cm\n self.mm = mm\n self.npin = nPin\n self.norm = norm\n self.computenorm = self.norm is None\n self.vacuum = vacuum\n\n self.mapping = mapping\n # Pass on the options to unotran\n self.setOptions()\n # Solve using unotran\n self.solve(k, phi, psi)\n # Homogenize the cross sections over each spatial region\n self.homogenize_space()\n # Homogenize the cross sections over each energy range\n if self.mapping is not None:\n self.homogenize_energy()\n\n def setOptions(self):\n '''\n Set the options for the Unotran solve\n '''\n pydgm.control.spatial_dimension = 1\n pydgm.control.fine_mesh_x = self.fm\n pydgm.control.coarse_mesh_x = self.cm\n pydgm.control.material_map = self.mm\n pydgm.control.xs_name = self.fname.ljust(256)\n pydgm.control.angle_order = 8\n pydgm.control.angle_option = pydgm.angle.gl\n pydgm.control.boundary_west = 0.0 if self.vacuum else 1.0\n pydgm.control.boundary_east = 0.0 if self.vacuum else 1.0\n pydgm.control.allow_fission = True\n pydgm.control.eigen_print = 0\n pydgm.control.outer_print = 0\n pydgm.control.eigen_tolerance = 1e-14\n pydgm.control.outer_tolerance = 1e-12\n pydgm.control.max_eigen_iters = 10000\n pydgm.control.max_outer_iters = 1\n pydgm.control.store_psi = True\n pydgm.control.solver_type = 'eigen'.ljust(256)\n pydgm.control.source_value = 0.0\n pydgm.control.equation_type = 'DD'\n pydgm.control.scatter_leg_order = 0\n pydgm.control.ignore_warnings = True\n\n def solve(self, k, phi, psi):\n '''\n Solve the problem using Unotran\n '''\n\n # Initialize the problem\n pydgm.solver.initialize_solver()\n\n if k is not None:\n pydgm.state.keff = k\n if phi is not None:\n pydgm.state.phi = phi\n if psi is not None:\n pydgm.state.psi = psi\n\n # Call the solver\n pydgm.solver.solve()\n\n # Copy any information from Unotran\n self.extractInfo()\n\n self.iter_k = np.copy(pydgm.state.keff)\n self.iter_phi = np.copy(pydgm.state.phi)\n self.iter_psi = np.copy(pydgm.state.psi)\n\n # Clean up the solver\n pydgm.solver.finalize_solver()\n pydgm.control.finalize_control()\n\n def extractInfo(self):\n '''\n Copy information from Unotran before the solver is deallocated\n '''\n self.phi = np.copy(pydgm.state.mg_phi[0])\n self.dx = np.copy(pydgm.mesh.dx)\n self.mat_map = np.copy(pydgm.state.mg_mmap)\n self.sig_t = np.array([pydgm.state.mg_sig_t[:, self.mat_map[c] - 1] for c in range(len(self.mat_map))]).T\n self.sig_s = np.array([pydgm.state.mg_sig_s[0, :, :, self.mat_map[c] - 1] for c in range(len(self.mat_map))]).T\n self.vsig_f = np.array([pydgm.state.mg_nu_sig_f[:, self.mat_map[c] - 1] for c in range(len(self.mat_map))]).T\n self.chi = np.array([pydgm.state.mg_chi[:, self.mat_map[c] - 1] for c in range(len(self.mat_map))]).T\n\n def homogenize_space(self):\n '''\n Homogenize the cross sections over the spatial region\n '''\n\n def homo_space(array):\n '''Convenience function to do the integration'''\n # sum over region\n return np.sum(array.reshape(-1, self.npin, nCellPerPin), axis=2) / V\n\n # Check that everything is the right shape of arrays\n shape = self.phi.shape\n #assert shape[0] == self.G\n #assert (shape[1] / self.npin) == (shape[1] // self.npin)\n\n # Compute the number of pins\n nCellPerPin = shape[1] // self.npin\n\n # Compute the \\sum_{g\\in G} \\sum_{c\\in r} V_c dE_g\n V = np.sum(self.dx.reshape(self.npin, -1), axis=1)\n\n # \\forall g\\in G, \\forall c\\in r compute \\phi_{g,c} V_c dE_g\n # Homogenize the flux\n phi_dx = self.phi[:, :] * self.dx[:]\n self.phi_homo = homo_space(phi_dx)\n\n # Either find the norm of the flux or normalize the flux to self.norm\n if self.computenorm:\n self.norm = np.sum(self.phi_homo, axis=-1)\n else:\n print('compute norm')\n norm = self.norm / np.sum(self.phi_homo, axis=-1)\n self.phi_homo *= norm[:, np.newaxis]\n phi_dx *= norm[:, np.newaxis]\n\n # Homogenize the cross sections\n self.sig_t_homo = homo_space(self.sig_t * phi_dx) / self.phi_homo\n self.sig_f_homo = homo_space(self.vsig_f * phi_dx) / self.phi_homo\n self.chi_homo = homo_space(self.chi * self.dx)\n self.sig_s_homo = np.zeros((self.G, self.G, self.npin))\n for gp in range(self.G):\n self.sig_s_homo[gp] = homo_space(self.sig_s[gp] * phi_dx) / self.phi_homo\n\n def homogenize_energy(self):\n '''\n Homogenize the cross sections over the energy range\n '''\n\n def homo_energy(array1, array2=None):\n '''\n convinence function to do the integration\n\n return \\frac{\\sum_i array1[i] * array2[i]}{\\sum_i array2[i]} for each coarse group\n '''\n if array2 is not None:\n y = np.zeros((nCG, len(array1[0])))\n z = np.zeros((nCG, len(array1[0])))\n for g, cg in enumerate(grouping):\n z[cg - 1] += array1[g] * array2[g]\n y[cg - 1] += array2[g]\n\n return z / y\n else:\n z = np.zeros((nCG, len(array1[0])))\n for g, cg in enumerate(grouping):\n z[cg - 1] += array1[g]\n return z\n\n nCG = self.mapping.nCG\n nFG = self.mapping.nFG\n grouping = np.array(self.mapping.grouping)\n\n dE_coarse = np.array(self.mapping.dE_coarse)\n dE_fine = np.array(self.mapping.dE_fine)\n dE_coarse /= dE_coarse\n dE_fine /= dE_fine\n\n phi_homo = homo_energy(self.phi_homo, dE_fine[:, np.newaxis])\n\n if self.computenorm:\n norm = np.zeros(nCG)\n for g, cg in enumerate(grouping):\n norm[cg - 1] += self.norm[g]\n self.norm = norm\n\n '''\n print(self.mapping.fine_bounds)\n import matplotlib.pyplot as plt\n\n def barchart(x, y):\n X = np.zeros(2 * len(y))\n Y = np.zeros(2 * len(y))\n for i in range(0, len(y)):\n X[2 * i] = x[i]\n X[2 * i + 1] = x[i + 1]\n Y[2 * i] = y[i]\n Y[2 * i + 1] = y[i]\n return X, Y\n\n plt.loglog(*barchart(self.mapping.fine_bounds, self.sig_t_homo[:,0]), 'g-', label='fine group')\n '''\n\n self.sig_t_homo = homo_energy(self.sig_t_homo, self.phi_homo)\n self.sig_f_homo = homo_energy(self.sig_f_homo, self.phi_homo)\n self.chi_homo = homo_energy(self.chi_homo)\n sig_s_homo = np.zeros((nCG, nCG, self.npin))\n for gp, g in enumerate(grouping):\n sig_s_homo[g - 1] += homo_energy(self.sig_s_homo[gp], self.phi_homo)\n self.sig_s_homo = sig_s_homo\n self.phi_homo = phi_homo\n\n '''\n plt.loglog(*barchart(self.mapping.coarse_bounds, self.sig_t_homo[:,0]), 'k-', label='coarse group')\n plt.legend(loc=0)\n plt.xlabel('Energy [MeV]')\n plt.ylabel('$\\Sigma_t$ [cm$^{-1}$]')\n plt.savefig('test.pdf', transparent=True)\n '''\n",
"step-ids": [
6,
8,
9,
11,
13
]
}
|
[
6,
8,
9,
11,
13
] |
# -*- coding: utf-8 -*-
import sys
#from Constants import *
# start
import CrudMatrixDao
class CrudAccessValue:
def __init__(self):
self.crudAccessValue = {}
self.__run()
def __run(self):
aCrudMatrixDao = CrudMatrixDao.CrudMatrixDao()
# print aCrudMatrixDao.selectCrudAccessValueAction()
for row in aCrudMatrixDao.selectCrudAccessValueAction():
crudGubun = row[0]; accessValue= row[1]
self.crudAccessValue[crudGubun] = accessValue
def getAccessValue(self, crudGubun):
try:
out = self.crudAccessValue[crudGubun]
except KeyError:
out = crudGubun
return out
if __name__ == "__main__":
aCrudAccessValue = CrudAccessValue()
print aCrudAccessValue.getAccessValue('C')
|
normal
|
{
"blob_id": "38e616e35f165d458d774dd0b6837a733b8402d7",
"index": 1555,
"step-1": "# -*- coding: utf-8 -*-\r\nimport sys\r\n#from Constants import *\r\n# start\r\nimport CrudMatrixDao\r\n\r\nclass CrudAccessValue:\r\n\tdef __init__(self):\r\n\t\tself.crudAccessValue = {}\r\n\t\tself.__run()\r\n\t\t\r\n\tdef __run(self):\r\n\t\taCrudMatrixDao = CrudMatrixDao.CrudMatrixDao()\r\n\t\t# print aCrudMatrixDao.selectCrudAccessValueAction()\r\n\r\n\t\tfor row in aCrudMatrixDao.selectCrudAccessValueAction():\r\n\t\t\tcrudGubun = row[0]; accessValue= row[1]\r\n\t\t\tself.crudAccessValue[crudGubun] = accessValue\r\n\t\t\t\r\n\tdef getAccessValue(self, crudGubun):\r\n\t\t\r\n\t\ttry:\r\n\t\t\tout = self.crudAccessValue[crudGubun]\r\n\t\texcept KeyError:\r\n\t\t\tout = crudGubun\r\n\t\t\t\r\n\t\treturn out\r\n\t\r\nif __name__ == \"__main__\":\r\n\taCrudAccessValue = CrudAccessValue()\r\n\tprint aCrudAccessValue.getAccessValue('C')\r\n\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with onto:
class Pizza(Thing):
pass
class MeatPizza(Pizza):
pass
class Topping(Thing):
pass
class has_Topping((Pizza >> Topping)):
pass
print(Pizza)
<|reserved_special_token_0|>
print(Pizza.subclasses())
<|reserved_special_token_0|>
print(MeatPizza.is_a)
<|reserved_special_token_0|>
print(MeatPizza.ancestors())
<|reserved_special_token_0|>
print(Pizza.iri)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
onto = get_ontology('http://test1.org/onto.owl')
with onto:
class Pizza(Thing):
pass
class MeatPizza(Pizza):
pass
class Topping(Thing):
pass
class has_Topping((Pizza >> Topping)):
pass
print(Pizza)
<|reserved_special_token_0|>
print(Pizza.subclasses())
<|reserved_special_token_0|>
print(MeatPizza.is_a)
<|reserved_special_token_0|>
print(MeatPizza.ancestors())
<|reserved_special_token_0|>
print(Pizza.iri)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from owlready2 import *
onto = get_ontology('http://test1.org/onto.owl')
with onto:
class Pizza(Thing):
pass
class MeatPizza(Pizza):
pass
class Topping(Thing):
pass
class has_Topping((Pizza >> Topping)):
pass
print(Pizza)
<|reserved_special_token_0|>
print(Pizza.subclasses())
<|reserved_special_token_0|>
print(MeatPizza.is_a)
<|reserved_special_token_0|>
print(MeatPizza.ancestors())
<|reserved_special_token_0|>
print(Pizza.iri)
<|reserved_special_token_1|>
'For learning OWL and owlready2'
'From "https://qiita.com/sci-koke/items/a650c09bf77331f5537f"'
'From "https://owlready2.readthedocs.io/en/latest/class.html"'
'* Owlready2 * Warning: optimized Cython parser module "owlready2_optimized" is not available, defaulting to slower Python implementation'
'↑ This wartning mean is You can either install Cython (and a C compiler) and re-install Owlready to benefit from the optimized module, or continue using the non-optimized version'
'pip install Owlready2'
from owlready2 import *
onto = get_ontology("http://test1.org/onto.owl")
with onto:
#エンティティクラス
class Pizza(Thing):
pass
class MeatPizza(Pizza):
pass
class Topping(Thing):
pass
# プロパティクラス
class has_Topping(Pizza >> Topping):
pass
print(Pizza)
'The .subclasses() method returns the list of direct subclasses of a class.'
print(Pizza.subclasses())
'Owlready2 provides the .is_a attribute for getting the list of superclasses'
print(MeatPizza.is_a)
'The .descendants() and .ancestors() Class methods return a set of the descendant and ancestor Classes.'
print(MeatPizza.ancestors())
'The .iri attribute of the Class can be used to obtain the full IRI of the class.'
print(Pizza.iri)
|
flexible
|
{
"blob_id": "cc7f1f38efcd4d757c1d11e2bd53695fca44e15a",
"index": 212,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith onto:\n\n\n class Pizza(Thing):\n pass\n\n\n class MeatPizza(Pizza):\n pass\n\n\n class Topping(Thing):\n pass\n\n\n class has_Topping((Pizza >> Topping)):\n pass\nprint(Pizza)\n<mask token>\nprint(Pizza.subclasses())\n<mask token>\nprint(MeatPizza.is_a)\n<mask token>\nprint(MeatPizza.ancestors())\n<mask token>\nprint(Pizza.iri)\n",
"step-3": "<mask token>\nonto = get_ontology('http://test1.org/onto.owl')\nwith onto:\n\n\n class Pizza(Thing):\n pass\n\n\n class MeatPizza(Pizza):\n pass\n\n\n class Topping(Thing):\n pass\n\n\n class has_Topping((Pizza >> Topping)):\n pass\nprint(Pizza)\n<mask token>\nprint(Pizza.subclasses())\n<mask token>\nprint(MeatPizza.is_a)\n<mask token>\nprint(MeatPizza.ancestors())\n<mask token>\nprint(Pizza.iri)\n",
"step-4": "<mask token>\nfrom owlready2 import *\nonto = get_ontology('http://test1.org/onto.owl')\nwith onto:\n\n\n class Pizza(Thing):\n pass\n\n\n class MeatPizza(Pizza):\n pass\n\n\n class Topping(Thing):\n pass\n\n\n class has_Topping((Pizza >> Topping)):\n pass\nprint(Pizza)\n<mask token>\nprint(Pizza.subclasses())\n<mask token>\nprint(MeatPizza.is_a)\n<mask token>\nprint(MeatPizza.ancestors())\n<mask token>\nprint(Pizza.iri)\n",
"step-5": "'For learning OWL and owlready2'\n'From \"https://qiita.com/sci-koke/items/a650c09bf77331f5537f\"'\n'From \"https://owlready2.readthedocs.io/en/latest/class.html\"'\n\n'* Owlready2 * Warning: optimized Cython parser module \"owlready2_optimized\" is not available, defaulting to slower Python implementation'\n'↑ This wartning mean is You can either install Cython (and a C compiler) and re-install Owlready to benefit from the optimized module, or continue using the non-optimized version'\n\n'pip install Owlready2'\n\nfrom owlready2 import *\nonto = get_ontology(\"http://test1.org/onto.owl\")\n\nwith onto:\n #エンティティクラス\n class Pizza(Thing):\n pass\n class MeatPizza(Pizza):\n pass\n\n class Topping(Thing):\n pass\n\n # プロパティクラス\n class has_Topping(Pizza >> Topping):\n pass\n\n\nprint(Pizza)\n\n'The .subclasses() method returns the list of direct subclasses of a class.'\nprint(Pizza.subclasses())\n\n'Owlready2 provides the .is_a attribute for getting the list of superclasses'\nprint(MeatPizza.is_a)\n\n'The .descendants() and .ancestors() Class methods return a set of the descendant and ancestor Classes.'\nprint(MeatPizza.ancestors())\n\n'The .iri attribute of the Class can be used to obtain the full IRI of the class.'\nprint(Pizza.iri)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import logging as log
from time import monotonic
import re
from jmap.account import ImapAccount
import jmap.core as core
import jmap.mail as mail
import jmap.submission as submission
import jmap.vacationresponse as vacationresponse
import jmap.contacts as contacts
import jmap.calendars as calendars
from jmap import errors
CAPABILITIES = {
'urn:ietf:params:jmap:core': core,
'urn:ietf:params:jmap:mail': mail,
# 'urn:ietf:params:jmap:submission': jmap.submission,
# 'urn:ietf:params:jmap:vacationresponse': jmap.vacationresponse,
# 'urn:ietf:params:jmap:contacts': jmap.contacts,
# 'urn:ietf:params:jmap:calendars': jmap.calendars,
}
def handle_request(user, data):
results = []
resultsByTag = {}
api = Api(user, data.get('createdIds', None))
for capability in data['using']:
CAPABILITIES[capability].register_methods(api)
for cmd, kwargs, tag in data['methodCalls']:
t0 = monotonic()
logbit = ''
try:
func = api.methods[cmd]
except KeyError:
results.append(('error', {'error': 'unknownMethod'}, tag))
continue
# resolve kwargs
error = False
for key in [k for k in kwargs.keys() if k[0] == '#']:
# we are updating dict over which we iterate
# please check that your changes don't skip keys
val = kwargs.pop(key)
val = _parsepath(val['path'], resultsByTag[val['resultOf']])
if val is None:
results.append(('error',
{'type': 'resultReference', 'message': repr(val)}, tag))
error = True
break
elif not isinstance(val, list):
val = [val]
kwargs[key[1:]] = val
if error: continue
try:
result = func(api, **kwargs)
results.append((cmd, result, tag))
resultsByTag[tag] = result
except Exception as e:
results.append(('error', {
'type': e.__class__.__name__,
'message': str(e),
}, tag))
raise e
api.rollback()
elapsed = monotonic() - t0
# log method call
if kwargs.get('ids', None):
logbit += " [" + (",".join(kwargs['ids'][:4]))
if len(kwargs['ids']) > 4:
logbit += ", ..." + str(len(kwargs['ids']))
logbit += "]"
if kwargs.get('properties', None):
logbit += " (" + (",".join(kwargs['properties'][:4]))
if len(kwargs['properties']) > 4:
logbit += ", ..." + str(len(kwargs['properties']))
logbit += ")"
log.info(f'JMAP CMD {cmd}{logbit} took {elapsed}')
out = {
'methodResponses': results,
'sessionState': user.sessionState,
}
if 'createdIds' in data:
out['createdIds'] = data['createdIds']
return out
class Api:
def __init__(self, user, idmap=None):
self.user = user
self._idmap = idmap or {}
self.methods = {}
def get_account(self, accountId) -> ImapAccount:
try:
return self.user.accounts[accountId]
except KeyError:
raise errors.accountNotFound()
def setid(self, key, val):
self._idmap[f'#{key}'] = val
def idmap(self, key):
return self._idmap.get(key, key)
def _parsepath(path, item):
match = re.match(r'^/([^/]+)', path)
if not match:
return item
selector = match.group(1)
if isinstance(item, list):
if selector == '*':
res = []
for one in item:
r = _parsepath(path[match.end():], one)
if isinstance(r, list):
res.extend(r)
else:
res.append(r)
return res
if selector.isnumeric():
return item[int(selector)]
elif isinstance(item, dict):
return _parsepath(path[match.end():], item[selector])
return item
|
normal
|
{
"blob_id": "aac3b2478980d3a5453451cb848afcfd6aca1743",
"index": 1680,
"step-1": "<mask token>\n\n\ndef handle_request(user, data):\n results = []\n resultsByTag = {}\n api = Api(user, data.get('createdIds', None))\n for capability in data['using']:\n CAPABILITIES[capability].register_methods(api)\n for cmd, kwargs, tag in data['methodCalls']:\n t0 = monotonic()\n logbit = ''\n try:\n func = api.methods[cmd]\n except KeyError:\n results.append(('error', {'error': 'unknownMethod'}, tag))\n continue\n error = False\n for key in [k for k in kwargs.keys() if k[0] == '#']:\n val = kwargs.pop(key)\n val = _parsepath(val['path'], resultsByTag[val['resultOf']])\n if val is None:\n results.append(('error', {'type': 'resultReference',\n 'message': repr(val)}, tag))\n error = True\n break\n elif not isinstance(val, list):\n val = [val]\n kwargs[key[1:]] = val\n if error:\n continue\n try:\n result = func(api, **kwargs)\n results.append((cmd, result, tag))\n resultsByTag[tag] = result\n except Exception as e:\n results.append(('error', {'type': e.__class__.__name__,\n 'message': str(e)}, tag))\n raise e\n api.rollback()\n elapsed = monotonic() - t0\n if kwargs.get('ids', None):\n logbit += ' [' + ','.join(kwargs['ids'][:4])\n if len(kwargs['ids']) > 4:\n logbit += ', ...' + str(len(kwargs['ids']))\n logbit += ']'\n if kwargs.get('properties', None):\n logbit += ' (' + ','.join(kwargs['properties'][:4])\n if len(kwargs['properties']) > 4:\n logbit += ', ...' + str(len(kwargs['properties']))\n logbit += ')'\n log.info(f'JMAP CMD {cmd}{logbit} took {elapsed}')\n out = {'methodResponses': results, 'sessionState': user.sessionState}\n if 'createdIds' in data:\n out['createdIds'] = data['createdIds']\n return out\n\n\nclass Api:\n\n def __init__(self, user, idmap=None):\n self.user = user\n self._idmap = idmap or {}\n self.methods = {}\n\n def get_account(self, accountId) ->ImapAccount:\n try:\n return self.user.accounts[accountId]\n except KeyError:\n raise errors.accountNotFound()\n\n def setid(self, key, val):\n self._idmap[f'#{key}'] = val\n\n def idmap(self, key):\n return self._idmap.get(key, key)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef handle_request(user, data):\n results = []\n resultsByTag = {}\n api = Api(user, data.get('createdIds', None))\n for capability in data['using']:\n CAPABILITIES[capability].register_methods(api)\n for cmd, kwargs, tag in data['methodCalls']:\n t0 = monotonic()\n logbit = ''\n try:\n func = api.methods[cmd]\n except KeyError:\n results.append(('error', {'error': 'unknownMethod'}, tag))\n continue\n error = False\n for key in [k for k in kwargs.keys() if k[0] == '#']:\n val = kwargs.pop(key)\n val = _parsepath(val['path'], resultsByTag[val['resultOf']])\n if val is None:\n results.append(('error', {'type': 'resultReference',\n 'message': repr(val)}, tag))\n error = True\n break\n elif not isinstance(val, list):\n val = [val]\n kwargs[key[1:]] = val\n if error:\n continue\n try:\n result = func(api, **kwargs)\n results.append((cmd, result, tag))\n resultsByTag[tag] = result\n except Exception as e:\n results.append(('error', {'type': e.__class__.__name__,\n 'message': str(e)}, tag))\n raise e\n api.rollback()\n elapsed = monotonic() - t0\n if kwargs.get('ids', None):\n logbit += ' [' + ','.join(kwargs['ids'][:4])\n if len(kwargs['ids']) > 4:\n logbit += ', ...' + str(len(kwargs['ids']))\n logbit += ']'\n if kwargs.get('properties', None):\n logbit += ' (' + ','.join(kwargs['properties'][:4])\n if len(kwargs['properties']) > 4:\n logbit += ', ...' + str(len(kwargs['properties']))\n logbit += ')'\n log.info(f'JMAP CMD {cmd}{logbit} took {elapsed}')\n out = {'methodResponses': results, 'sessionState': user.sessionState}\n if 'createdIds' in data:\n out['createdIds'] = data['createdIds']\n return out\n\n\nclass Api:\n\n def __init__(self, user, idmap=None):\n self.user = user\n self._idmap = idmap or {}\n self.methods = {}\n\n def get_account(self, accountId) ->ImapAccount:\n try:\n return self.user.accounts[accountId]\n except KeyError:\n raise errors.accountNotFound()\n\n def setid(self, key, val):\n self._idmap[f'#{key}'] = val\n\n def idmap(self, key):\n return self._idmap.get(key, key)\n\n\ndef _parsepath(path, item):\n match = re.match('^/([^/]+)', path)\n if not match:\n return item\n selector = match.group(1)\n if isinstance(item, list):\n if selector == '*':\n res = []\n for one in item:\n r = _parsepath(path[match.end():], one)\n if isinstance(r, list):\n res.extend(r)\n else:\n res.append(r)\n return res\n if selector.isnumeric():\n return item[int(selector)]\n elif isinstance(item, dict):\n return _parsepath(path[match.end():], item[selector])\n return item\n",
"step-3": "<mask token>\nCAPABILITIES = {'urn:ietf:params:jmap:core': core,\n 'urn:ietf:params:jmap:mail': mail}\n\n\ndef handle_request(user, data):\n results = []\n resultsByTag = {}\n api = Api(user, data.get('createdIds', None))\n for capability in data['using']:\n CAPABILITIES[capability].register_methods(api)\n for cmd, kwargs, tag in data['methodCalls']:\n t0 = monotonic()\n logbit = ''\n try:\n func = api.methods[cmd]\n except KeyError:\n results.append(('error', {'error': 'unknownMethod'}, tag))\n continue\n error = False\n for key in [k for k in kwargs.keys() if k[0] == '#']:\n val = kwargs.pop(key)\n val = _parsepath(val['path'], resultsByTag[val['resultOf']])\n if val is None:\n results.append(('error', {'type': 'resultReference',\n 'message': repr(val)}, tag))\n error = True\n break\n elif not isinstance(val, list):\n val = [val]\n kwargs[key[1:]] = val\n if error:\n continue\n try:\n result = func(api, **kwargs)\n results.append((cmd, result, tag))\n resultsByTag[tag] = result\n except Exception as e:\n results.append(('error', {'type': e.__class__.__name__,\n 'message': str(e)}, tag))\n raise e\n api.rollback()\n elapsed = monotonic() - t0\n if kwargs.get('ids', None):\n logbit += ' [' + ','.join(kwargs['ids'][:4])\n if len(kwargs['ids']) > 4:\n logbit += ', ...' + str(len(kwargs['ids']))\n logbit += ']'\n if kwargs.get('properties', None):\n logbit += ' (' + ','.join(kwargs['properties'][:4])\n if len(kwargs['properties']) > 4:\n logbit += ', ...' + str(len(kwargs['properties']))\n logbit += ')'\n log.info(f'JMAP CMD {cmd}{logbit} took {elapsed}')\n out = {'methodResponses': results, 'sessionState': user.sessionState}\n if 'createdIds' in data:\n out['createdIds'] = data['createdIds']\n return out\n\n\nclass Api:\n\n def __init__(self, user, idmap=None):\n self.user = user\n self._idmap = idmap or {}\n self.methods = {}\n\n def get_account(self, accountId) ->ImapAccount:\n try:\n return self.user.accounts[accountId]\n except KeyError:\n raise errors.accountNotFound()\n\n def setid(self, key, val):\n self._idmap[f'#{key}'] = val\n\n def idmap(self, key):\n return self._idmap.get(key, key)\n\n\ndef _parsepath(path, item):\n match = re.match('^/([^/]+)', path)\n if not match:\n return item\n selector = match.group(1)\n if isinstance(item, list):\n if selector == '*':\n res = []\n for one in item:\n r = _parsepath(path[match.end():], one)\n if isinstance(r, list):\n res.extend(r)\n else:\n res.append(r)\n return res\n if selector.isnumeric():\n return item[int(selector)]\n elif isinstance(item, dict):\n return _parsepath(path[match.end():], item[selector])\n return item\n",
"step-4": "import logging as log\nfrom time import monotonic\nimport re\nfrom jmap.account import ImapAccount\nimport jmap.core as core\nimport jmap.mail as mail\nimport jmap.submission as submission\nimport jmap.vacationresponse as vacationresponse\nimport jmap.contacts as contacts\nimport jmap.calendars as calendars\nfrom jmap import errors\nCAPABILITIES = {'urn:ietf:params:jmap:core': core,\n 'urn:ietf:params:jmap:mail': mail}\n\n\ndef handle_request(user, data):\n results = []\n resultsByTag = {}\n api = Api(user, data.get('createdIds', None))\n for capability in data['using']:\n CAPABILITIES[capability].register_methods(api)\n for cmd, kwargs, tag in data['methodCalls']:\n t0 = monotonic()\n logbit = ''\n try:\n func = api.methods[cmd]\n except KeyError:\n results.append(('error', {'error': 'unknownMethod'}, tag))\n continue\n error = False\n for key in [k for k in kwargs.keys() if k[0] == '#']:\n val = kwargs.pop(key)\n val = _parsepath(val['path'], resultsByTag[val['resultOf']])\n if val is None:\n results.append(('error', {'type': 'resultReference',\n 'message': repr(val)}, tag))\n error = True\n break\n elif not isinstance(val, list):\n val = [val]\n kwargs[key[1:]] = val\n if error:\n continue\n try:\n result = func(api, **kwargs)\n results.append((cmd, result, tag))\n resultsByTag[tag] = result\n except Exception as e:\n results.append(('error', {'type': e.__class__.__name__,\n 'message': str(e)}, tag))\n raise e\n api.rollback()\n elapsed = monotonic() - t0\n if kwargs.get('ids', None):\n logbit += ' [' + ','.join(kwargs['ids'][:4])\n if len(kwargs['ids']) > 4:\n logbit += ', ...' + str(len(kwargs['ids']))\n logbit += ']'\n if kwargs.get('properties', None):\n logbit += ' (' + ','.join(kwargs['properties'][:4])\n if len(kwargs['properties']) > 4:\n logbit += ', ...' + str(len(kwargs['properties']))\n logbit += ')'\n log.info(f'JMAP CMD {cmd}{logbit} took {elapsed}')\n out = {'methodResponses': results, 'sessionState': user.sessionState}\n if 'createdIds' in data:\n out['createdIds'] = data['createdIds']\n return out\n\n\nclass Api:\n\n def __init__(self, user, idmap=None):\n self.user = user\n self._idmap = idmap or {}\n self.methods = {}\n\n def get_account(self, accountId) ->ImapAccount:\n try:\n return self.user.accounts[accountId]\n except KeyError:\n raise errors.accountNotFound()\n\n def setid(self, key, val):\n self._idmap[f'#{key}'] = val\n\n def idmap(self, key):\n return self._idmap.get(key, key)\n\n\ndef _parsepath(path, item):\n match = re.match('^/([^/]+)', path)\n if not match:\n return item\n selector = match.group(1)\n if isinstance(item, list):\n if selector == '*':\n res = []\n for one in item:\n r = _parsepath(path[match.end():], one)\n if isinstance(r, list):\n res.extend(r)\n else:\n res.append(r)\n return res\n if selector.isnumeric():\n return item[int(selector)]\n elif isinstance(item, dict):\n return _parsepath(path[match.end():], item[selector])\n return item\n",
"step-5": "import logging as log\nfrom time import monotonic\nimport re\n\nfrom jmap.account import ImapAccount\nimport jmap.core as core\nimport jmap.mail as mail\nimport jmap.submission as submission\nimport jmap.vacationresponse as vacationresponse\nimport jmap.contacts as contacts\nimport jmap.calendars as calendars\nfrom jmap import errors\n\n\nCAPABILITIES = {\n 'urn:ietf:params:jmap:core': core,\n 'urn:ietf:params:jmap:mail': mail,\n # 'urn:ietf:params:jmap:submission': jmap.submission,\n # 'urn:ietf:params:jmap:vacationresponse': jmap.vacationresponse,\n # 'urn:ietf:params:jmap:contacts': jmap.contacts,\n # 'urn:ietf:params:jmap:calendars': jmap.calendars,\n}\n\ndef handle_request(user, data):\n results = []\n resultsByTag = {}\n\n api = Api(user, data.get('createdIds', None))\n for capability in data['using']:\n CAPABILITIES[capability].register_methods(api)\n\n for cmd, kwargs, tag in data['methodCalls']:\n t0 = monotonic()\n logbit = ''\n try:\n func = api.methods[cmd]\n except KeyError:\n results.append(('error', {'error': 'unknownMethod'}, tag))\n continue\n\n # resolve kwargs\n error = False\n for key in [k for k in kwargs.keys() if k[0] == '#']:\n # we are updating dict over which we iterate\n # please check that your changes don't skip keys\n val = kwargs.pop(key)\n val = _parsepath(val['path'], resultsByTag[val['resultOf']])\n if val is None:\n results.append(('error',\n {'type': 'resultReference', 'message': repr(val)}, tag))\n error = True\n break\n elif not isinstance(val, list):\n val = [val]\n kwargs[key[1:]] = val\n if error: continue\n\n try:\n result = func(api, **kwargs)\n results.append((cmd, result, tag))\n resultsByTag[tag] = result\n except Exception as e:\n results.append(('error', {\n 'type': e.__class__.__name__,\n 'message': str(e),\n }, tag))\n raise e\n api.rollback()\n\n elapsed = monotonic() - t0\n\n # log method call\n if kwargs.get('ids', None):\n logbit += \" [\" + (\",\".join(kwargs['ids'][:4]))\n if len(kwargs['ids']) > 4:\n logbit += \", ...\" + str(len(kwargs['ids']))\n logbit += \"]\"\n if kwargs.get('properties', None):\n logbit += \" (\" + (\",\".join(kwargs['properties'][:4]))\n if len(kwargs['properties']) > 4:\n logbit += \", ...\" + str(len(kwargs['properties']))\n logbit += \")\"\n log.info(f'JMAP CMD {cmd}{logbit} took {elapsed}')\n\n out = {\n 'methodResponses': results,\n 'sessionState': user.sessionState,\n }\n if 'createdIds' in data:\n out['createdIds'] = data['createdIds']\n return out\n\n\nclass Api:\n def __init__(self, user, idmap=None):\n self.user = user\n self._idmap = idmap or {}\n self.methods = {}\n \n def get_account(self, accountId) -> ImapAccount:\n try:\n return self.user.accounts[accountId]\n except KeyError:\n raise errors.accountNotFound()\n \n def setid(self, key, val):\n self._idmap[f'#{key}'] = val\n\n def idmap(self, key):\n return self._idmap.get(key, key)\n\n\ndef _parsepath(path, item):\n match = re.match(r'^/([^/]+)', path)\n if not match:\n return item\n selector = match.group(1)\n if isinstance(item, list):\n if selector == '*':\n res = []\n for one in item:\n r = _parsepath(path[match.end():], one)\n if isinstance(r, list):\n res.extend(r)\n else:\n res.append(r)\n return res\n if selector.isnumeric():\n return item[int(selector)]\n\n elif isinstance(item, dict):\n return _parsepath(path[match.end():], item[selector])\n\n return item\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
#!/bin/python3
import socket
HOST = '127.0.0.1'
PORT= 4444
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST,PORT))
|
normal
|
{
"blob_id": "14a39b9aa56777c8198794fe2f51c9a068500743",
"index": 4075,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ns.connect((HOST, PORT))\n",
"step-3": "<mask token>\nHOST = '127.0.0.1'\nPORT = 4444\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((HOST, PORT))\n",
"step-4": "import socket\nHOST = '127.0.0.1'\nPORT = 4444\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((HOST, PORT))\n",
"step-5": "#!/bin/python3\nimport socket\nHOST = '127.0.0.1'\nPORT= 4444\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((HOST,PORT))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#@@range_begin(list1) # ←この行は無視してください。本文に引用するためのものです。
#ファイル名 Chapter07/0703person.py
# __metaclass__ = type #← python 2を使っている場合は行頭の「#」を取る
class Person:
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def greet(self): # あいさつをする
print(f"こんにちは。私は{self.name}です。")
#@@range_end(list1) # ←この行は無視してください。本文に引用するためのものです。
#実行
#@@range_begin(list2) # ←この行は無視してください。本文に引用するためのものです。
foo = Person()
bar = Person()
foo.set_name('ルーク・スカイウォーカー') #『スター・ウォーズ』の主要登場人物
bar.set_name('アナキン・スカイウォーカー') # ルークの父
foo.greet() #←こんにちは。私はルーク・スカイウォーカーです。
bar.greet() #←こんにちは。私はアナキン・スカイウォーカーです。
#@@range_end(list2) # ←この行は無視してください。本文に引用するためのものです。
#@@range_begin(list3) # ←この行は無視してください。本文に引用するためのものです。
print(foo.name) #←ルーク・スカイウォーカー
bar.name = 'ヨーダ'
bar.greet() #←こんにちは。私はヨーダです。
#@@range_end(list3) # ←この行は無視してください。本文に引用するためのものです。
|
normal
|
{
"blob_id": "321dc411b003949a6744216a13c59c70d919a675",
"index": 8402,
"step-1": "class Person:\n <mask token>\n\n def get_name(self):\n return self.name\n\n def greet(self):\n print(f'こんにちは。私は{self.name}です。')\n\n\n<mask token>\n",
"step-2": "class Person:\n\n def set_name(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n def greet(self):\n print(f'こんにちは。私は{self.name}です。')\n\n\n<mask token>\n",
"step-3": "class Person:\n\n def set_name(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n def greet(self):\n print(f'こんにちは。私は{self.name}です。')\n\n\n<mask token>\nfoo.set_name('ルーク・スカイウォーカー')\nbar.set_name('アナキン・スカイウォーカー')\nfoo.greet()\nbar.greet()\nprint(foo.name)\n<mask token>\nbar.greet()\n",
"step-4": "class Person:\n\n def set_name(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n def greet(self):\n print(f'こんにちは。私は{self.name}です。')\n\n\nfoo = Person()\nbar = Person()\nfoo.set_name('ルーク・スカイウォーカー')\nbar.set_name('アナキン・スカイウォーカー')\nfoo.greet()\nbar.greet()\nprint(foo.name)\nbar.name = 'ヨーダ'\nbar.greet()\n",
"step-5": "#@@range_begin(list1) # ←この行は無視してください。本文に引用するためのものです。\n#ファイル名 Chapter07/0703person.py\n# __metaclass__ = type #← python 2を使っている場合は行頭の「#」を取る\nclass Person:\n def set_name(self, name):\n self.name = name\n def get_name(self):\n return self.name\n def greet(self): # あいさつをする\n print(f\"こんにちは。私は{self.name}です。\")\n#@@range_end(list1) # ←この行は無視してください。本文に引用するためのものです。\n\n#実行\n#@@range_begin(list2) # ←この行は無視してください。本文に引用するためのものです。\nfoo = Person()\nbar = Person()\nfoo.set_name('ルーク・スカイウォーカー') #『スター・ウォーズ』の主要登場人物\nbar.set_name('アナキン・スカイウォーカー') # ルークの父\nfoo.greet() #←こんにちは。私はルーク・スカイウォーカーです。\nbar.greet() #←こんにちは。私はアナキン・スカイウォーカーです。\n#@@range_end(list2) # ←この行は無視してください。本文に引用するためのものです。\n\n#@@range_begin(list3) # ←この行は無視してください。本文に引用するためのものです。\nprint(foo.name) #←ルーク・スカイウォーカー\nbar.name = 'ヨーダ'\nbar.greet() #←こんにちは。私はヨーダです。\n#@@range_end(list3) # ←この行は無視してください。本文に引用するためのものです。\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# -*- coding: utf-8 -*-
# упражнение выполнено на Python 3
manual_calc = 53 + 1.0/3
def trapezoidal(f, a, b, n):
h = float(b - a)/n
result = 0.5*(f(a) + f(b))
for i in range(1, n):
result += f(a + i*h)
result *= h
return result
def rectangular(f, a, b, n):
h = float(b - a)/n
result = f(a+0.5*h)
for i in range(1, n):
result += f(a + 0.5*h + i*h)
result *= h
return result
trap_2 = trapezoidal(lambda x: x * (x - 1), 2, 6, 2)
trap_100 = trapezoidal(lambda x: x * (x - 1), 2, 6, 100)
rect_2 = rectangular(lambda x: x * (x - 1), 2, 6, 2)
rect_100 = rectangular(lambda x: x * (x - 1), 2, 6, 100)
print('Точное значение интеграла: {}\n'.format(manual_calc))
print('Аппроксимация трапециями:\n 2 трапеции: {}\n 100 трапеций: {}'
.format(trap_2, trap_100))
print('Погрешность для аппроксимации трапециями:\n 2 трапеции: {}\n 100 трапеций: {}\n'
.format(abs(trap_2 - manual_calc), abs(trap_100 - manual_calc)))
print('Аппроксимация прямоугольниками:\n 2 прямоугольника: {}\n 100 прямоугольников: {}'
.format(rect_2, rect_100))
print('Погрешность для аппроксимации прямоугольниками:\n 2 прямоугольника: {}\n 100 прямоугольников: {}'
.format(abs(rect_2 - manual_calc), abs(rect_100 - manual_calc)))
|
normal
|
{
"blob_id": "4fbf5b4520aa4dca4c7cc80d56ba00f634d184bf",
"index": 3405,
"step-1": "<mask token>\n\n\ndef rectangular(f, a, b, n):\n h = float(b - a) / n\n result = f(a + 0.5 * h)\n for i in range(1, n):\n result += f(a + 0.5 * h + i * h)\n result *= h\n return result\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef trapezoidal(f, a, b, n):\n h = float(b - a) / n\n result = 0.5 * (f(a) + f(b))\n for i in range(1, n):\n result += f(a + i * h)\n result *= h\n return result\n\n\ndef rectangular(f, a, b, n):\n h = float(b - a) / n\n result = f(a + 0.5 * h)\n for i in range(1, n):\n result += f(a + 0.5 * h + i * h)\n result *= h\n return result\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef trapezoidal(f, a, b, n):\n h = float(b - a) / n\n result = 0.5 * (f(a) + f(b))\n for i in range(1, n):\n result += f(a + i * h)\n result *= h\n return result\n\n\ndef rectangular(f, a, b, n):\n h = float(b - a) / n\n result = f(a + 0.5 * h)\n for i in range(1, n):\n result += f(a + 0.5 * h + i * h)\n result *= h\n return result\n\n\n<mask token>\nprint('Точное значение интеграла: {}\\n'.format(manual_calc))\nprint(\"\"\"Аппроксимация трапециями:\n 2 трапеции: {}\n 100 трапеций: {}\"\"\".\n format(trap_2, trap_100))\nprint(\n \"\"\"Погрешность для аппроксимации трапециями:\n 2 трапеции: {}\n 100 трапеций: {}\n\"\"\"\n .format(abs(trap_2 - manual_calc), abs(trap_100 - manual_calc)))\nprint(\n \"\"\"Аппроксимация прямоугольниками:\n 2 прямоугольника: {}\n 100 прямоугольников: {}\"\"\"\n .format(rect_2, rect_100))\nprint(\n \"\"\"Погрешность для аппроксимации прямоугольниками:\n 2 прямоугольника: {}\n 100 прямоугольников: {}\"\"\"\n .format(abs(rect_2 - manual_calc), abs(rect_100 - manual_calc)))\n",
"step-4": "manual_calc = 53 + 1.0 / 3\n\n\ndef trapezoidal(f, a, b, n):\n h = float(b - a) / n\n result = 0.5 * (f(a) + f(b))\n for i in range(1, n):\n result += f(a + i * h)\n result *= h\n return result\n\n\ndef rectangular(f, a, b, n):\n h = float(b - a) / n\n result = f(a + 0.5 * h)\n for i in range(1, n):\n result += f(a + 0.5 * h + i * h)\n result *= h\n return result\n\n\ntrap_2 = trapezoidal(lambda x: x * (x - 1), 2, 6, 2)\ntrap_100 = trapezoidal(lambda x: x * (x - 1), 2, 6, 100)\nrect_2 = rectangular(lambda x: x * (x - 1), 2, 6, 2)\nrect_100 = rectangular(lambda x: x * (x - 1), 2, 6, 100)\nprint('Точное значение интеграла: {}\\n'.format(manual_calc))\nprint(\"\"\"Аппроксимация трапециями:\n 2 трапеции: {}\n 100 трапеций: {}\"\"\".\n format(trap_2, trap_100))\nprint(\n \"\"\"Погрешность для аппроксимации трапециями:\n 2 трапеции: {}\n 100 трапеций: {}\n\"\"\"\n .format(abs(trap_2 - manual_calc), abs(trap_100 - manual_calc)))\nprint(\n \"\"\"Аппроксимация прямоугольниками:\n 2 прямоугольника: {}\n 100 прямоугольников: {}\"\"\"\n .format(rect_2, rect_100))\nprint(\n \"\"\"Погрешность для аппроксимации прямоугольниками:\n 2 прямоугольника: {}\n 100 прямоугольников: {}\"\"\"\n .format(abs(rect_2 - manual_calc), abs(rect_100 - manual_calc)))\n",
"step-5": "# -*- coding: utf-8 -*-\n# упражнение выполнено на Python 3\n\n\nmanual_calc = 53 + 1.0/3\n\n\ndef trapezoidal(f, a, b, n):\n\t\n\th = float(b - a)/n\n\tresult = 0.5*(f(a) + f(b))\n\tfor i in range(1, n):\n\t\tresult += f(a + i*h)\n\tresult *= h\n\treturn result\n\n\ndef rectangular(f, a, b, n):\n\t\n\th = float(b - a)/n\n\tresult = f(a+0.5*h)\n\tfor i in range(1, n):\n\t\tresult += f(a + 0.5*h + i*h)\n\tresult *= h\n\treturn result\n\n\ntrap_2 = trapezoidal(lambda x: x * (x - 1), 2, 6, 2)\ntrap_100 = trapezoidal(lambda x: x * (x - 1), 2, 6, 100)\nrect_2 = rectangular(lambda x: x * (x - 1), 2, 6, 2)\nrect_100 = rectangular(lambda x: x * (x - 1), 2, 6, 100)\n\nprint('Точное значение интеграла: {}\\n'.format(manual_calc))\n\nprint('Аппроксимация трапециями:\\n 2 трапеции: {}\\n 100 трапеций: {}'\n .format(trap_2, trap_100))\n\nprint('Погрешность для аппроксимации трапециями:\\n 2 трапеции: {}\\n 100 трапеций: {}\\n'\n .format(abs(trap_2 - manual_calc), abs(trap_100 - manual_calc)))\n\nprint('Аппроксимация прямоугольниками:\\n 2 прямоугольника: {}\\n 100 прямоугольников: {}'\n .format(rect_2, rect_100))\n\nprint('Погрешность для аппроксимации прямоугольниками:\\n 2 прямоугольника: {}\\n 100 прямоугольников: {}'\n .format(abs(rect_2 - manual_calc), abs(rect_100 - manual_calc)))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
janela.title('Teste de frame')
janela.geometry('800x600')
<|reserved_special_token_0|>
Label(frame, text='lsdakçasd').grid(row=0, column=0)
janela.mainloop()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
janela = Tk()
janela.title('Teste de frame')
janela.geometry('800x600')
frame = Frame(janela, width=300, height=300, bg='red').grid(row=0, column=0)
Label(frame, text='lsdakçasd').grid(row=0, column=0)
janela.mainloop()
<|reserved_special_token_1|>
from tkinter import *
janela = Tk()
janela.title('Teste de frame')
janela.geometry('800x600')
frame = Frame(janela, width=300, height=300, bg='red').grid(row=0, column=0)
Label(frame, text='lsdakçasd').grid(row=0, column=0)
janela.mainloop()
<|reserved_special_token_1|>
from tkinter import *
janela = Tk()
janela.title("Teste de frame")
janela.geometry("800x600")
frame = Frame(janela, width = 300, height = 300, bg = 'red').grid(row = 0, column = 0)
#frames servem para caso queira colocar labels e butoes dentro de uma area especifica
#assim deve se declarar o frame como pai no inicio dos parametros, por exemplo
Label(frame, text = 'lsdakçasd').grid(row = 0, column = 0)
janela.mainloop()
|
flexible
|
{
"blob_id": "4ae24d1e39bdcde3313a8a0c8029a331864ba40e",
"index": 6985,
"step-1": "<mask token>\n",
"step-2": "<mask token>\njanela.title('Teste de frame')\njanela.geometry('800x600')\n<mask token>\nLabel(frame, text='lsdakçasd').grid(row=0, column=0)\njanela.mainloop()\n",
"step-3": "<mask token>\njanela = Tk()\njanela.title('Teste de frame')\njanela.geometry('800x600')\nframe = Frame(janela, width=300, height=300, bg='red').grid(row=0, column=0)\nLabel(frame, text='lsdakçasd').grid(row=0, column=0)\njanela.mainloop()\n",
"step-4": "from tkinter import *\njanela = Tk()\njanela.title('Teste de frame')\njanela.geometry('800x600')\nframe = Frame(janela, width=300, height=300, bg='red').grid(row=0, column=0)\nLabel(frame, text='lsdakçasd').grid(row=0, column=0)\njanela.mainloop()\n",
"step-5": "from tkinter import *\n\njanela = Tk()\njanela.title(\"Teste de frame\")\njanela.geometry(\"800x600\")\n\nframe = Frame(janela, width = 300, height = 300, bg = 'red').grid(row = 0, column = 0)\n#frames servem para caso queira colocar labels e butoes dentro de uma area especifica\n#assim deve se declarar o frame como pai no inicio dos parametros, por exemplo\n\nLabel(frame, text = 'lsdakçasd').grid(row = 0, column = 0)\n\njanela.mainloop()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def load_data(data):
temp = []
for i in range(len(data)):
im = cv2.imread(data[i])
im = misc.imresize(im, size=DOWNSAMPLE_RATIO)
im = crop(im)
temp.append(im)
return temp
def normalize(data):
a = -0.5
b = 0.5
greyscale_min = 0
greyscale_max = 255
return a + (data - greyscale_min) * (b - a) / (greyscale_max -
greyscale_min)
def color_change(data):
x = cv2.cvtColor(data, cv2.COLOR_BGR2HSV)
return x
<|reserved_special_token_0|>
def img_translate(img, angle):
change = np.random.uniform(-0.5, 0.5)
x_translation = TRANS_X_RANGE * change
new_angle = angle + change * TRANS_ANGLE
y_translation = TRANS_Y_RANGE * np.random.uniform(-0.5, 0.5)
translation_matrix = np.float32([[1, 0, x_translation], [0, 1,
y_translation]])
return cv2.warpAffine(img, translation_matrix, (img.shape[1], img.shape[0])
), new_angle
<|reserved_special_token_0|>
def curve_focus(xdata, ydata):
count = 0
for x in range(len(xdata)):
if ydata[x] == 0.0:
count += 1
print('Total = {}\n0 Steering = {}'.format(len(xdata), count))
return xdata, ydata
<|reserved_special_token_0|>
def set_model():
model = Sequential()
model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=SHAPE,
output_shape=SHAPE))
model.add(Convolution2D(3, 1, 1, border_mode='same', name='color_conv'))
model.add(Convolution2D(36, 5, 5, border_mode='same', activation='elu',
name='conv1'))
model.add(Convolution2D(48, 3, 3, activation='elu', border_mode='same',
name='conv2'))
model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',
name='conv3'))
model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',
name='conv4'))
model.add(Flatten(name='flat1'))
model.add(Dense(100, activation='elu', name='dense1'))
model.add(Dense(50, activation='elu', name='dense2'))
model.add(Dense(10, activation='elu', name='dense3'))
model.add(Dense(1, activation='linear', name='dense4'))
return model
<|reserved_special_token_0|>
def augment(x, y):
x, y = flip(x, y)
return x, y
<|reserved_special_token_0|>
def generator(samples, batch_size=32):
"""
Purpose: Yield tensor batches to fit_generator function
Inputs: A file path
Outputs: X_train, a [AHH, 80, 320, 3] tensor and y_train, a [AHH, 1] matrix
Where AHH = ((FEATURE_GENERATION_MULTIPLE * 3) + 3) * BATCH_SIZE
"""
num_samples = len(samples)
shuffle(samples)
while 1:
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset + batch_size]
images = []
angles = []
for batch_sample in batch_samples:
image, angle = process_line(batch_sample)
images.append(image)
angles.append(angle)
X_train = np.array(images)
y_train = np.array(angles)
X_train, y_train = augment(X_train, y_train)
yield shuffle(X_train, y_train)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_data(data):
temp = []
for i in range(len(data)):
im = cv2.imread(data[i])
im = misc.imresize(im, size=DOWNSAMPLE_RATIO)
im = crop(im)
temp.append(im)
return temp
def normalize(data):
a = -0.5
b = 0.5
greyscale_min = 0
greyscale_max = 255
return a + (data - greyscale_min) * (b - a) / (greyscale_max -
greyscale_min)
def color_change(data):
x = cv2.cvtColor(data, cv2.COLOR_BGR2HSV)
return x
<|reserved_special_token_0|>
def img_translate(img, angle):
change = np.random.uniform(-0.5, 0.5)
x_translation = TRANS_X_RANGE * change
new_angle = angle + change * TRANS_ANGLE
y_translation = TRANS_Y_RANGE * np.random.uniform(-0.5, 0.5)
translation_matrix = np.float32([[1, 0, x_translation], [0, 1,
y_translation]])
return cv2.warpAffine(img, translation_matrix, (img.shape[1], img.shape[0])
), new_angle
<|reserved_special_token_0|>
def curve_focus(xdata, ydata):
count = 0
for x in range(len(xdata)):
if ydata[x] == 0.0:
count += 1
print('Total = {}\n0 Steering = {}'.format(len(xdata), count))
return xdata, ydata
<|reserved_special_token_0|>
def set_model():
model = Sequential()
model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=SHAPE,
output_shape=SHAPE))
model.add(Convolution2D(3, 1, 1, border_mode='same', name='color_conv'))
model.add(Convolution2D(36, 5, 5, border_mode='same', activation='elu',
name='conv1'))
model.add(Convolution2D(48, 3, 3, activation='elu', border_mode='same',
name='conv2'))
model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',
name='conv3'))
model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',
name='conv4'))
model.add(Flatten(name='flat1'))
model.add(Dense(100, activation='elu', name='dense1'))
model.add(Dense(50, activation='elu', name='dense2'))
model.add(Dense(10, activation='elu', name='dense3'))
model.add(Dense(1, activation='linear', name='dense4'))
return model
<|reserved_special_token_0|>
def show_data(log):
fig = plt.figure(figsize=(8, 2))
a = fig.add_subplot(1, 2, 1)
im = cv2.imread(FOLDER + log[560, 0].strip())
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
a.set_title('Full Resolution')
plt.axis('off')
plt.imshow(im)
im = misc.imresize(im, size=0.2)
a = fig.add_subplot(1, 2, 2)
a.set_title('After 80% Downsampling')
plt.imshow(im)
plt.axis('off')
fig.savefig('examples/Downsampling.png')
plt.show()
exit()
count = 1
y = 0
steer = log[:, 3]
for x in my_range(-0.8, 0.7, 0.1):
while 1:
y = np.random.randint(len(steer))
if round(steer[y], 1) == x:
print('Found {}', x)
break
a = fig.add_subplot(4, 5, count)
im = cv2.imread(FOLDER + log[y, 0])
im, angle = process_line(log[y])
a.set_title(str(x) + ' to ' + str(round(angle, 1)))
plt.imshow(im, aspect='auto', interpolation='nearest')
count += 1
plt.show()
exit()
pic = np.random.randint(len(X_train))
print(X_train.shape)
plt.imshow(X_train[pic])
plt.show()
exit()
def augment(x, y):
x, y = flip(x, y)
return x, y
def process_line(sample):
img_choice = np.random.randint(3)
angle = 0.0
if img_choice == 0:
angle = float(sample[3])
elif img_choice == 1:
angle = float(sample[3]) + 0.27
elif img_choice == 2:
angle = float(sample[3]) - 0.27
im = cv2.imread(FOLDER + sample[img_choice].strip())
im = misc.imresize(im, size=DOWNSAMPLE_RATIO)
im = crop(im)
im = adjust_brightness(im)
im, angle = img_translate(im, angle)
return im, angle
def generator(samples, batch_size=32):
"""
Purpose: Yield tensor batches to fit_generator function
Inputs: A file path
Outputs: X_train, a [AHH, 80, 320, 3] tensor and y_train, a [AHH, 1] matrix
Where AHH = ((FEATURE_GENERATION_MULTIPLE * 3) + 3) * BATCH_SIZE
"""
num_samples = len(samples)
shuffle(samples)
while 1:
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset + batch_size]
images = []
angles = []
for batch_sample in batch_samples:
image, angle = process_line(batch_sample)
images.append(image)
angles.append(angle)
X_train = np.array(images)
y_train = np.array(angles)
X_train, y_train = augment(X_train, y_train)
yield shuffle(X_train, y_train)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_data(data):
temp = []
for i in range(len(data)):
im = cv2.imread(data[i])
im = misc.imresize(im, size=DOWNSAMPLE_RATIO)
im = crop(im)
temp.append(im)
return temp
def normalize(data):
a = -0.5
b = 0.5
greyscale_min = 0
greyscale_max = 255
return a + (data - greyscale_min) * (b - a) / (greyscale_max -
greyscale_min)
def color_change(data):
x = cv2.cvtColor(data, cv2.COLOR_BGR2HSV)
return x
<|reserved_special_token_0|>
def img_translate(img, angle):
change = np.random.uniform(-0.5, 0.5)
x_translation = TRANS_X_RANGE * change
new_angle = angle + change * TRANS_ANGLE
y_translation = TRANS_Y_RANGE * np.random.uniform(-0.5, 0.5)
translation_matrix = np.float32([[1, 0, x_translation], [0, 1,
y_translation]])
return cv2.warpAffine(img, translation_matrix, (img.shape[1], img.shape[0])
), new_angle
<|reserved_special_token_0|>
def curve_focus(xdata, ydata):
count = 0
for x in range(len(xdata)):
if ydata[x] == 0.0:
count += 1
print('Total = {}\n0 Steering = {}'.format(len(xdata), count))
return xdata, ydata
def flip(xdata, ydata):
for x in range(len(xdata)):
flip = np.fliplr(xdata[x])
xdata = np.append(xdata, [flip], axis=0)
ydata = np.append(ydata, -1 * ydata[x])
return xdata, ydata
def set_model():
model = Sequential()
model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=SHAPE,
output_shape=SHAPE))
model.add(Convolution2D(3, 1, 1, border_mode='same', name='color_conv'))
model.add(Convolution2D(36, 5, 5, border_mode='same', activation='elu',
name='conv1'))
model.add(Convolution2D(48, 3, 3, activation='elu', border_mode='same',
name='conv2'))
model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',
name='conv3'))
model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',
name='conv4'))
model.add(Flatten(name='flat1'))
model.add(Dense(100, activation='elu', name='dense1'))
model.add(Dense(50, activation='elu', name='dense2'))
model.add(Dense(10, activation='elu', name='dense3'))
model.add(Dense(1, activation='linear', name='dense4'))
return model
def my_range(start, end, step):
while start <= end:
yield round(start, 1)
start += step
def show_data(log):
fig = plt.figure(figsize=(8, 2))
a = fig.add_subplot(1, 2, 1)
im = cv2.imread(FOLDER + log[560, 0].strip())
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
a.set_title('Full Resolution')
plt.axis('off')
plt.imshow(im)
im = misc.imresize(im, size=0.2)
a = fig.add_subplot(1, 2, 2)
a.set_title('After 80% Downsampling')
plt.imshow(im)
plt.axis('off')
fig.savefig('examples/Downsampling.png')
plt.show()
exit()
count = 1
y = 0
steer = log[:, 3]
for x in my_range(-0.8, 0.7, 0.1):
while 1:
y = np.random.randint(len(steer))
if round(steer[y], 1) == x:
print('Found {}', x)
break
a = fig.add_subplot(4, 5, count)
im = cv2.imread(FOLDER + log[y, 0])
im, angle = process_line(log[y])
a.set_title(str(x) + ' to ' + str(round(angle, 1)))
plt.imshow(im, aspect='auto', interpolation='nearest')
count += 1
plt.show()
exit()
pic = np.random.randint(len(X_train))
print(X_train.shape)
plt.imshow(X_train[pic])
plt.show()
exit()
def augment(x, y):
x, y = flip(x, y)
return x, y
def process_line(sample):
img_choice = np.random.randint(3)
angle = 0.0
if img_choice == 0:
angle = float(sample[3])
elif img_choice == 1:
angle = float(sample[3]) + 0.27
elif img_choice == 2:
angle = float(sample[3]) - 0.27
im = cv2.imread(FOLDER + sample[img_choice].strip())
im = misc.imresize(im, size=DOWNSAMPLE_RATIO)
im = crop(im)
im = adjust_brightness(im)
im, angle = img_translate(im, angle)
return im, angle
def generator(samples, batch_size=32):
"""
Purpose: Yield tensor batches to fit_generator function
Inputs: A file path
Outputs: X_train, a [AHH, 80, 320, 3] tensor and y_train, a [AHH, 1] matrix
Where AHH = ((FEATURE_GENERATION_MULTIPLE * 3) + 3) * BATCH_SIZE
"""
num_samples = len(samples)
shuffle(samples)
while 1:
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset + batch_size]
images = []
angles = []
for batch_sample in batch_samples:
image, angle = process_line(batch_sample)
images.append(image)
angles.append(angle)
X_train = np.array(images)
y_train = np.array(angles)
X_train, y_train = augment(X_train, y_train)
yield shuffle(X_train, y_train)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
matplotlib.use('TkAgg')
<|reserved_special_token_0|>
def load_data(data):
temp = []
for i in range(len(data)):
im = cv2.imread(data[i])
im = misc.imresize(im, size=DOWNSAMPLE_RATIO)
im = crop(im)
temp.append(im)
return temp
def normalize(data):
a = -0.5
b = 0.5
greyscale_min = 0
greyscale_max = 255
return a + (data - greyscale_min) * (b - a) / (greyscale_max -
greyscale_min)
def color_change(data):
x = cv2.cvtColor(data, cv2.COLOR_BGR2HSV)
return x
def adjust_brightness(im):
temp = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
brightness = BRIGHTNESS_RANGE * np.random.uniform(-1, 1)
temp[:, :, 2] = temp[:, :, 2] * (1 - brightness)
return cv2.cvtColor(temp, cv2.COLOR_HSV2RGB)
def img_translate(img, angle):
change = np.random.uniform(-0.5, 0.5)
x_translation = TRANS_X_RANGE * change
new_angle = angle + change * TRANS_ANGLE
y_translation = TRANS_Y_RANGE * np.random.uniform(-0.5, 0.5)
translation_matrix = np.float32([[1, 0, x_translation], [0, 1,
y_translation]])
return cv2.warpAffine(img, translation_matrix, (img.shape[1], img.shape[0])
), new_angle
def crop(im):
shape = np.array(im).shape
y1 = int(shape[0] * 0.4)
y2 = int(shape[0] * 0.87)
im = im[y1:y2, :, :]
im = cv2.resize(im, (IMG_ROWS, IMG_COLS), interpolation=cv2.INTER_AREA)
return im
def curve_focus(xdata, ydata):
count = 0
for x in range(len(xdata)):
if ydata[x] == 0.0:
count += 1
print('Total = {}\n0 Steering = {}'.format(len(xdata), count))
return xdata, ydata
def flip(xdata, ydata):
for x in range(len(xdata)):
flip = np.fliplr(xdata[x])
xdata = np.append(xdata, [flip], axis=0)
ydata = np.append(ydata, -1 * ydata[x])
return xdata, ydata
def set_model():
model = Sequential()
model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=SHAPE,
output_shape=SHAPE))
model.add(Convolution2D(3, 1, 1, border_mode='same', name='color_conv'))
model.add(Convolution2D(36, 5, 5, border_mode='same', activation='elu',
name='conv1'))
model.add(Convolution2D(48, 3, 3, activation='elu', border_mode='same',
name='conv2'))
model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',
name='conv3'))
model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',
name='conv4'))
model.add(Flatten(name='flat1'))
model.add(Dense(100, activation='elu', name='dense1'))
model.add(Dense(50, activation='elu', name='dense2'))
model.add(Dense(10, activation='elu', name='dense3'))
model.add(Dense(1, activation='linear', name='dense4'))
return model
def my_range(start, end, step):
while start <= end:
yield round(start, 1)
start += step
def show_data(log):
fig = plt.figure(figsize=(8, 2))
a = fig.add_subplot(1, 2, 1)
im = cv2.imread(FOLDER + log[560, 0].strip())
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
a.set_title('Full Resolution')
plt.axis('off')
plt.imshow(im)
im = misc.imresize(im, size=0.2)
a = fig.add_subplot(1, 2, 2)
a.set_title('After 80% Downsampling')
plt.imshow(im)
plt.axis('off')
fig.savefig('examples/Downsampling.png')
plt.show()
exit()
count = 1
y = 0
steer = log[:, 3]
for x in my_range(-0.8, 0.7, 0.1):
while 1:
y = np.random.randint(len(steer))
if round(steer[y], 1) == x:
print('Found {}', x)
break
a = fig.add_subplot(4, 5, count)
im = cv2.imread(FOLDER + log[y, 0])
im, angle = process_line(log[y])
a.set_title(str(x) + ' to ' + str(round(angle, 1)))
plt.imshow(im, aspect='auto', interpolation='nearest')
count += 1
plt.show()
exit()
pic = np.random.randint(len(X_train))
print(X_train.shape)
plt.imshow(X_train[pic])
plt.show()
exit()
def augment(x, y):
x, y = flip(x, y)
return x, y
def process_line(sample):
img_choice = np.random.randint(3)
angle = 0.0
if img_choice == 0:
angle = float(sample[3])
elif img_choice == 1:
angle = float(sample[3]) + 0.27
elif img_choice == 2:
angle = float(sample[3]) - 0.27
im = cv2.imread(FOLDER + sample[img_choice].strip())
im = misc.imresize(im, size=DOWNSAMPLE_RATIO)
im = crop(im)
im = adjust_brightness(im)
im, angle = img_translate(im, angle)
return im, angle
def generator(samples, batch_size=32):
"""
Purpose: Yield tensor batches to fit_generator function
Inputs: A file path
Outputs: X_train, a [AHH, 80, 320, 3] tensor and y_train, a [AHH, 1] matrix
Where AHH = ((FEATURE_GENERATION_MULTIPLE * 3) + 3) * BATCH_SIZE
"""
num_samples = len(samples)
shuffle(samples)
while 1:
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset + batch_size]
images = []
angles = []
for batch_sample in batch_samples:
image, angle = process_line(batch_sample)
images.append(image)
angles.append(angle)
X_train = np.array(images)
y_train = np.array(angles)
X_train, y_train = augment(X_train, y_train)
yield shuffle(X_train, y_train)
if __name__ == '__main__':
log = pd.read_csv(FOLDER + 'driving_log.csv').values
show_data(log)
print(log.shape)
train_samples, validation_samples = train_test_split(log, test_size=0.2)
im, an = process_line(train_samples[np.random.randint(len(train_samples))])
print(np.array(im).shape)
model = set_model()
adam = Adam(lr=Learning_Rate)
model.compile(optimizer=adam, loss='mean_squared_error')
history = model.fit_generator(generator(train_samples),
samples_per_epoch=SAMPLES_TRAIN, validation_data=generator(
validation_samples), nb_val_samples=SAMPLES_VALIDATION, nb_epoch=
EPOCHS, verbose=1)
model.save_weights('weights.h5')
model.save('model.h5')
print('Model saved')
def for_drive(im):
print(im.shape)
x = im
x = misc.imresize(x, size=DOWNSAMPLE_RATIO)
x = crop(x)
return x
<|reserved_special_token_1|>
import pandas as pd
import numpy as np
from scipy import misc
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import time
import math
import cv2
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
from keras.models import Sequential
from keras.layers import Lambda
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
from keras.optimizers import Adam
from keras.models import load_model
# Data augmentation constants
TRANS_X_RANGE = 10 # Number of translation pixels for augmented data (-RANGE/2, RANGE/2)
TRANS_Y_RANGE = 10 # Number of translation pixels for augmented data (-RANGE/2, RANGE/2)
TRANS_ANGLE = .3 # Maximum angle change when translating in the X direction
OFF_CENTER_IMG = .25 # Angle change when using off center images
DOWNSAMPLE_RATIO = 0.99
Learning_Rate = 0.0001
FOLDER = "examples/"
EPOCHS = 4
TRAINABLE = True
BRIGHTNESS_RANGE = 0.15
IMG_ROWS = 300
IMG_COLS = 300
SHAPE = (IMG_ROWS,IMG_COLS,3)
SAMPLES_TRAIN = 5000
SAMPLES_VALIDATION = 1000
def load_data(data):
temp = []
for i in range(len(data)):
im = cv2.imread(data[i])
im = misc.imresize(im,size=DOWNSAMPLE_RATIO)
im = crop(im)
# im = color_change(im)
temp.append(im)
return temp
def normalize(data):
a=-0.5
b=0.5
greyscale_min=0
greyscale_max=255
return a + ( ( (data - greyscale_min)*(b - a) )/(greyscale_max - greyscale_min))
def color_change(data):
x = cv2.cvtColor(data,cv2.COLOR_BGR2HSV)
return x
def adjust_brightness(im):
temp = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
# Compute a random brightness value and apply to the image
brightness = BRIGHTNESS_RANGE * np.random.uniform(-1,1)
temp[:, :, 2] = temp[:, :, 2] * (1-brightness)
# Convert back to RGB and return
return cv2.cvtColor(temp, cv2.COLOR_HSV2RGB)
def img_translate(img, angle):
# Randomly form the X translation distance and compute the resulting steering angle change
change = np.random.uniform(-0.5,0.5)
x_translation = (TRANS_X_RANGE * change)
new_angle = angle + (change * TRANS_ANGLE)
# Randomly compute a Y translation
y_translation = (TRANS_Y_RANGE * np.random.uniform(-0.5,0.5))
# Form the translation matrix
translation_matrix = np.float32([[1, 0, x_translation], [0, 1, y_translation]])
# Translate the image
return cv2.warpAffine(img, translation_matrix, (img.shape[1], img.shape[0])),new_angle
def crop(im):
shape = np.array(im).shape
y1 = int(shape[0]*0.4)
y2 = int(shape[0]*0.87)
# print(y)
im = im[y1:y2 , : , :]
im = cv2.resize(im, (IMG_ROWS, IMG_COLS), interpolation=cv2.INTER_AREA)
return im
def curve_focus(xdata,ydata):
count = 0
for x in range(len(xdata)):
if(ydata[x]==0.000):
count+=1
print("Total = {}\n0 Steering = {}".format(len(xdata),count))
return xdata,ydata
def flip(xdata,ydata):
for x in range(len(xdata)):
flip = np.fliplr(xdata[x])
xdata = np.append(xdata, [flip], axis=0)
ydata = np.append(ydata, (-1*ydata[x]))
return xdata,ydata
def set_model():
model = Sequential()
model.add(Lambda(lambda x: x/127.5 - 1.,
input_shape=SHAPE,
output_shape=SHAPE))
model.add(Convolution2D(3, 1, 1, border_mode='same', name='color_conv'))
model.add(Convolution2D(36,5,5,border_mode='same',activation="elu",
name='conv1'))
model.add(Convolution2D(48,3,3,activation="elu",border_mode='same',
name='conv2'))
model.add(Convolution2D(64,3,3,activation="elu",border_mode='same',
name='conv3'))
model.add(Convolution2D(64,3,3,activation="elu",border_mode='same', name='conv4'))
model.add(Flatten(name='flat1'))
# model.add(Dropout(0.2))
# model.add(Dense(1164, activation="elu"))
# model.add(Dropout(.3, name='drop1'))
model.add(Dense(100, activation="elu", name='dense1'))
model.add(Dense(50, activation="elu", name='dense2'))
model.add(Dense(10, activation="elu", name='dense3'))
model.add(Dense(1, activation="linear", name='dense4'))
return model
def my_range(start, end, step):
while start <= end:
yield round(start,1)
start += step
def show_data(log):
fig = plt.figure(figsize=(8,2))
a = fig.add_subplot(1,2,1)
im = cv2.imread(FOLDER+log[560,0].strip())
im = cv2.cvtColor(im,cv2.COLOR_BGR2RGB)
a.set_title("Full Resolution")
plt.axis('off')
plt.imshow(im)
im = misc.imresize(im,size=0.2)
a = fig.add_subplot(1,2,2)
a.set_title("After 80% Downsampling")
plt.imshow(im)
# im = crop(im)
# im, an = process_line(log[600])
# a = fig.add_subplot(2,1,2)
# im, an = process_line(log[600])
# plt.imshow(im,aspect="auto",interpolation="nearest")
plt.axis('off')
fig.savefig('examples/Downsampling.png')
plt.show()
exit()
# plt.hist(steer,bins=100)
# plt.show()
# exit()
count = 1
y = 0
steer = log[:,3]
for x in my_range(-0.8,0.7,0.1):
while 1:
y = np.random.randint(len(steer))
if(round(steer[y],1)==x):
print("Found {}",(x))
break
# else:
# print("Discarded {}",steer[y])
a=fig.add_subplot(4,5,count)
im = cv2.imread(FOLDER+log[y,0])
im,angle = process_line(log[y])
a.set_title(str(x)+" to "+str(round(angle,1)))
plt.imshow(im,aspect="auto",interpolation="nearest")
count+=1
# print(x)
plt.show()
exit()
pic = np.random.randint(len(X_train))
print(X_train.shape)
plt.imshow(X_train[pic])
plt.show()
exit()
def augment(x,y):
x,y = flip(x,y)
return x,y
def process_line(sample):
img_choice = np.random.randint(3)
angle = 0.0
if(img_choice==0):
angle = float(sample[3])
elif(img_choice==1):
angle = float(sample[3])+0.27
elif(img_choice==2):
angle = float(sample[3])-0.27
im = cv2.imread(FOLDER+sample[img_choice].strip())
im = misc.imresize(im,size=DOWNSAMPLE_RATIO)
im = crop(im)
im = adjust_brightness(im)
im,angle = img_translate(im,angle)
# im = normalize(im)
return im,angle
def generator(samples, batch_size=32):
"""
Purpose: Yield tensor batches to fit_generator function
Inputs: A file path
Outputs: X_train, a [AHH, 80, 320, 3] tensor and y_train, a [AHH, 1] matrix
Where AHH = ((FEATURE_GENERATION_MULTIPLE * 3) + 3) * BATCH_SIZE
"""
num_samples = len(samples)
shuffle(samples)
while 1: # Loop forever so the generator never terminates
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
image,angle = process_line(batch_sample)
images.append(image)
angles.append(angle)
# trim image to only see section with road
X_train = np.array(images)
y_train = np.array(angles)
X_train, y_train = augment(X_train,y_train)
yield shuffle(X_train, y_train)
if __name__ == "__main__":
log = pd.read_csv(FOLDER+"driving_log.csv").values
show_data(log)
print(log.shape)
train_samples, validation_samples = train_test_split(log,test_size=0.2)
im,an = process_line(train_samples[np.random.randint(len(train_samples))])
print(np.array(im).shape)
# plt.imshow(im)
# plt.title(str(an))
# plt.show()
# exit()
model = set_model()
# model.load_weights('weights.h5',by_name=True)
adam = Adam(lr=Learning_Rate)
model.compile(optimizer = adam, loss = 'mean_squared_error')
history=model.fit_generator(generator(train_samples), samples_per_epoch =
SAMPLES_TRAIN, validation_data=generator(validation_samples),
nb_val_samples=SAMPLES_VALIDATION, nb_epoch=EPOCHS, verbose=1)
model.save_weights('weights.h5')
model.save('model.h5')
print("Model saved")
def for_drive(im):
print(im.shape)
x = im
x = misc.imresize(x,size=DOWNSAMPLE_RATIO)
x = crop(x)
# plt.imshow(x)
# plt.show()
# x = color_change(x)
# x = normalize(x)
return x
|
flexible
|
{
"blob_id": "b109568c4dba05b16cbed1759a2b9e0a99babc67",
"index": 2982,
"step-1": "<mask token>\n\n\ndef load_data(data):\n temp = []\n for i in range(len(data)):\n im = cv2.imread(data[i])\n im = misc.imresize(im, size=DOWNSAMPLE_RATIO)\n im = crop(im)\n temp.append(im)\n return temp\n\n\ndef normalize(data):\n a = -0.5\n b = 0.5\n greyscale_min = 0\n greyscale_max = 255\n return a + (data - greyscale_min) * (b - a) / (greyscale_max -\n greyscale_min)\n\n\ndef color_change(data):\n x = cv2.cvtColor(data, cv2.COLOR_BGR2HSV)\n return x\n\n\n<mask token>\n\n\ndef img_translate(img, angle):\n change = np.random.uniform(-0.5, 0.5)\n x_translation = TRANS_X_RANGE * change\n new_angle = angle + change * TRANS_ANGLE\n y_translation = TRANS_Y_RANGE * np.random.uniform(-0.5, 0.5)\n translation_matrix = np.float32([[1, 0, x_translation], [0, 1,\n y_translation]])\n return cv2.warpAffine(img, translation_matrix, (img.shape[1], img.shape[0])\n ), new_angle\n\n\n<mask token>\n\n\ndef curve_focus(xdata, ydata):\n count = 0\n for x in range(len(xdata)):\n if ydata[x] == 0.0:\n count += 1\n print('Total = {}\\n0 Steering = {}'.format(len(xdata), count))\n return xdata, ydata\n\n\n<mask token>\n\n\ndef set_model():\n model = Sequential()\n model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=SHAPE,\n output_shape=SHAPE))\n model.add(Convolution2D(3, 1, 1, border_mode='same', name='color_conv'))\n model.add(Convolution2D(36, 5, 5, border_mode='same', activation='elu',\n name='conv1'))\n model.add(Convolution2D(48, 3, 3, activation='elu', border_mode='same',\n name='conv2'))\n model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',\n name='conv3'))\n model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',\n name='conv4'))\n model.add(Flatten(name='flat1'))\n model.add(Dense(100, activation='elu', name='dense1'))\n model.add(Dense(50, activation='elu', name='dense2'))\n model.add(Dense(10, activation='elu', name='dense3'))\n model.add(Dense(1, activation='linear', name='dense4'))\n return model\n\n\n<mask token>\n\n\ndef augment(x, y):\n x, y = flip(x, y)\n return x, y\n\n\n<mask token>\n\n\ndef generator(samples, batch_size=32):\n \"\"\"\n\tPurpose: Yield tensor batches to fit_generator function\n\tInputs: A file path\n\tOutputs: X_train, a [AHH, 80, 320, 3] tensor and y_train, a [AHH, 1] matrix\n\tWhere AHH = ((FEATURE_GENERATION_MULTIPLE * 3) + 3) * BATCH_SIZE\n\t\"\"\"\n num_samples = len(samples)\n shuffle(samples)\n while 1:\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset + batch_size]\n images = []\n angles = []\n for batch_sample in batch_samples:\n image, angle = process_line(batch_sample)\n images.append(image)\n angles.append(angle)\n X_train = np.array(images)\n y_train = np.array(angles)\n X_train, y_train = augment(X_train, y_train)\n yield shuffle(X_train, y_train)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_data(data):\n temp = []\n for i in range(len(data)):\n im = cv2.imread(data[i])\n im = misc.imresize(im, size=DOWNSAMPLE_RATIO)\n im = crop(im)\n temp.append(im)\n return temp\n\n\ndef normalize(data):\n a = -0.5\n b = 0.5\n greyscale_min = 0\n greyscale_max = 255\n return a + (data - greyscale_min) * (b - a) / (greyscale_max -\n greyscale_min)\n\n\ndef color_change(data):\n x = cv2.cvtColor(data, cv2.COLOR_BGR2HSV)\n return x\n\n\n<mask token>\n\n\ndef img_translate(img, angle):\n change = np.random.uniform(-0.5, 0.5)\n x_translation = TRANS_X_RANGE * change\n new_angle = angle + change * TRANS_ANGLE\n y_translation = TRANS_Y_RANGE * np.random.uniform(-0.5, 0.5)\n translation_matrix = np.float32([[1, 0, x_translation], [0, 1,\n y_translation]])\n return cv2.warpAffine(img, translation_matrix, (img.shape[1], img.shape[0])\n ), new_angle\n\n\n<mask token>\n\n\ndef curve_focus(xdata, ydata):\n count = 0\n for x in range(len(xdata)):\n if ydata[x] == 0.0:\n count += 1\n print('Total = {}\\n0 Steering = {}'.format(len(xdata), count))\n return xdata, ydata\n\n\n<mask token>\n\n\ndef set_model():\n model = Sequential()\n model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=SHAPE,\n output_shape=SHAPE))\n model.add(Convolution2D(3, 1, 1, border_mode='same', name='color_conv'))\n model.add(Convolution2D(36, 5, 5, border_mode='same', activation='elu',\n name='conv1'))\n model.add(Convolution2D(48, 3, 3, activation='elu', border_mode='same',\n name='conv2'))\n model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',\n name='conv3'))\n model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',\n name='conv4'))\n model.add(Flatten(name='flat1'))\n model.add(Dense(100, activation='elu', name='dense1'))\n model.add(Dense(50, activation='elu', name='dense2'))\n model.add(Dense(10, activation='elu', name='dense3'))\n model.add(Dense(1, activation='linear', name='dense4'))\n return model\n\n\n<mask token>\n\n\ndef show_data(log):\n fig = plt.figure(figsize=(8, 2))\n a = fig.add_subplot(1, 2, 1)\n im = cv2.imread(FOLDER + log[560, 0].strip())\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n a.set_title('Full Resolution')\n plt.axis('off')\n plt.imshow(im)\n im = misc.imresize(im, size=0.2)\n a = fig.add_subplot(1, 2, 2)\n a.set_title('After 80% Downsampling')\n plt.imshow(im)\n plt.axis('off')\n fig.savefig('examples/Downsampling.png')\n plt.show()\n exit()\n count = 1\n y = 0\n steer = log[:, 3]\n for x in my_range(-0.8, 0.7, 0.1):\n while 1:\n y = np.random.randint(len(steer))\n if round(steer[y], 1) == x:\n print('Found {}', x)\n break\n a = fig.add_subplot(4, 5, count)\n im = cv2.imread(FOLDER + log[y, 0])\n im, angle = process_line(log[y])\n a.set_title(str(x) + ' to ' + str(round(angle, 1)))\n plt.imshow(im, aspect='auto', interpolation='nearest')\n count += 1\n plt.show()\n exit()\n pic = np.random.randint(len(X_train))\n print(X_train.shape)\n plt.imshow(X_train[pic])\n plt.show()\n exit()\n\n\ndef augment(x, y):\n x, y = flip(x, y)\n return x, y\n\n\ndef process_line(sample):\n img_choice = np.random.randint(3)\n angle = 0.0\n if img_choice == 0:\n angle = float(sample[3])\n elif img_choice == 1:\n angle = float(sample[3]) + 0.27\n elif img_choice == 2:\n angle = float(sample[3]) - 0.27\n im = cv2.imread(FOLDER + sample[img_choice].strip())\n im = misc.imresize(im, size=DOWNSAMPLE_RATIO)\n im = crop(im)\n im = adjust_brightness(im)\n im, angle = img_translate(im, angle)\n return im, angle\n\n\ndef generator(samples, batch_size=32):\n \"\"\"\n\tPurpose: Yield tensor batches to fit_generator function\n\tInputs: A file path\n\tOutputs: X_train, a [AHH, 80, 320, 3] tensor and y_train, a [AHH, 1] matrix\n\tWhere AHH = ((FEATURE_GENERATION_MULTIPLE * 3) + 3) * BATCH_SIZE\n\t\"\"\"\n num_samples = len(samples)\n shuffle(samples)\n while 1:\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset + batch_size]\n images = []\n angles = []\n for batch_sample in batch_samples:\n image, angle = process_line(batch_sample)\n images.append(image)\n angles.append(angle)\n X_train = np.array(images)\n y_train = np.array(angles)\n X_train, y_train = augment(X_train, y_train)\n yield shuffle(X_train, y_train)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef load_data(data):\n temp = []\n for i in range(len(data)):\n im = cv2.imread(data[i])\n im = misc.imresize(im, size=DOWNSAMPLE_RATIO)\n im = crop(im)\n temp.append(im)\n return temp\n\n\ndef normalize(data):\n a = -0.5\n b = 0.5\n greyscale_min = 0\n greyscale_max = 255\n return a + (data - greyscale_min) * (b - a) / (greyscale_max -\n greyscale_min)\n\n\ndef color_change(data):\n x = cv2.cvtColor(data, cv2.COLOR_BGR2HSV)\n return x\n\n\n<mask token>\n\n\ndef img_translate(img, angle):\n change = np.random.uniform(-0.5, 0.5)\n x_translation = TRANS_X_RANGE * change\n new_angle = angle + change * TRANS_ANGLE\n y_translation = TRANS_Y_RANGE * np.random.uniform(-0.5, 0.5)\n translation_matrix = np.float32([[1, 0, x_translation], [0, 1,\n y_translation]])\n return cv2.warpAffine(img, translation_matrix, (img.shape[1], img.shape[0])\n ), new_angle\n\n\n<mask token>\n\n\ndef curve_focus(xdata, ydata):\n count = 0\n for x in range(len(xdata)):\n if ydata[x] == 0.0:\n count += 1\n print('Total = {}\\n0 Steering = {}'.format(len(xdata), count))\n return xdata, ydata\n\n\ndef flip(xdata, ydata):\n for x in range(len(xdata)):\n flip = np.fliplr(xdata[x])\n xdata = np.append(xdata, [flip], axis=0)\n ydata = np.append(ydata, -1 * ydata[x])\n return xdata, ydata\n\n\ndef set_model():\n model = Sequential()\n model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=SHAPE,\n output_shape=SHAPE))\n model.add(Convolution2D(3, 1, 1, border_mode='same', name='color_conv'))\n model.add(Convolution2D(36, 5, 5, border_mode='same', activation='elu',\n name='conv1'))\n model.add(Convolution2D(48, 3, 3, activation='elu', border_mode='same',\n name='conv2'))\n model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',\n name='conv3'))\n model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',\n name='conv4'))\n model.add(Flatten(name='flat1'))\n model.add(Dense(100, activation='elu', name='dense1'))\n model.add(Dense(50, activation='elu', name='dense2'))\n model.add(Dense(10, activation='elu', name='dense3'))\n model.add(Dense(1, activation='linear', name='dense4'))\n return model\n\n\ndef my_range(start, end, step):\n while start <= end:\n yield round(start, 1)\n start += step\n\n\ndef show_data(log):\n fig = plt.figure(figsize=(8, 2))\n a = fig.add_subplot(1, 2, 1)\n im = cv2.imread(FOLDER + log[560, 0].strip())\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n a.set_title('Full Resolution')\n plt.axis('off')\n plt.imshow(im)\n im = misc.imresize(im, size=0.2)\n a = fig.add_subplot(1, 2, 2)\n a.set_title('After 80% Downsampling')\n plt.imshow(im)\n plt.axis('off')\n fig.savefig('examples/Downsampling.png')\n plt.show()\n exit()\n count = 1\n y = 0\n steer = log[:, 3]\n for x in my_range(-0.8, 0.7, 0.1):\n while 1:\n y = np.random.randint(len(steer))\n if round(steer[y], 1) == x:\n print('Found {}', x)\n break\n a = fig.add_subplot(4, 5, count)\n im = cv2.imread(FOLDER + log[y, 0])\n im, angle = process_line(log[y])\n a.set_title(str(x) + ' to ' + str(round(angle, 1)))\n plt.imshow(im, aspect='auto', interpolation='nearest')\n count += 1\n plt.show()\n exit()\n pic = np.random.randint(len(X_train))\n print(X_train.shape)\n plt.imshow(X_train[pic])\n plt.show()\n exit()\n\n\ndef augment(x, y):\n x, y = flip(x, y)\n return x, y\n\n\ndef process_line(sample):\n img_choice = np.random.randint(3)\n angle = 0.0\n if img_choice == 0:\n angle = float(sample[3])\n elif img_choice == 1:\n angle = float(sample[3]) + 0.27\n elif img_choice == 2:\n angle = float(sample[3]) - 0.27\n im = cv2.imread(FOLDER + sample[img_choice].strip())\n im = misc.imresize(im, size=DOWNSAMPLE_RATIO)\n im = crop(im)\n im = adjust_brightness(im)\n im, angle = img_translate(im, angle)\n return im, angle\n\n\ndef generator(samples, batch_size=32):\n \"\"\"\n\tPurpose: Yield tensor batches to fit_generator function\n\tInputs: A file path\n\tOutputs: X_train, a [AHH, 80, 320, 3] tensor and y_train, a [AHH, 1] matrix\n\tWhere AHH = ((FEATURE_GENERATION_MULTIPLE * 3) + 3) * BATCH_SIZE\n\t\"\"\"\n num_samples = len(samples)\n shuffle(samples)\n while 1:\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset + batch_size]\n images = []\n angles = []\n for batch_sample in batch_samples:\n image, angle = process_line(batch_sample)\n images.append(image)\n angles.append(angle)\n X_train = np.array(images)\n y_train = np.array(angles)\n X_train, y_train = augment(X_train, y_train)\n yield shuffle(X_train, y_train)\n\n\n<mask token>\n",
"step-4": "<mask token>\nmatplotlib.use('TkAgg')\n<mask token>\n\n\ndef load_data(data):\n temp = []\n for i in range(len(data)):\n im = cv2.imread(data[i])\n im = misc.imresize(im, size=DOWNSAMPLE_RATIO)\n im = crop(im)\n temp.append(im)\n return temp\n\n\ndef normalize(data):\n a = -0.5\n b = 0.5\n greyscale_min = 0\n greyscale_max = 255\n return a + (data - greyscale_min) * (b - a) / (greyscale_max -\n greyscale_min)\n\n\ndef color_change(data):\n x = cv2.cvtColor(data, cv2.COLOR_BGR2HSV)\n return x\n\n\ndef adjust_brightness(im):\n temp = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)\n brightness = BRIGHTNESS_RANGE * np.random.uniform(-1, 1)\n temp[:, :, 2] = temp[:, :, 2] * (1 - brightness)\n return cv2.cvtColor(temp, cv2.COLOR_HSV2RGB)\n\n\ndef img_translate(img, angle):\n change = np.random.uniform(-0.5, 0.5)\n x_translation = TRANS_X_RANGE * change\n new_angle = angle + change * TRANS_ANGLE\n y_translation = TRANS_Y_RANGE * np.random.uniform(-0.5, 0.5)\n translation_matrix = np.float32([[1, 0, x_translation], [0, 1,\n y_translation]])\n return cv2.warpAffine(img, translation_matrix, (img.shape[1], img.shape[0])\n ), new_angle\n\n\ndef crop(im):\n shape = np.array(im).shape\n y1 = int(shape[0] * 0.4)\n y2 = int(shape[0] * 0.87)\n im = im[y1:y2, :, :]\n im = cv2.resize(im, (IMG_ROWS, IMG_COLS), interpolation=cv2.INTER_AREA)\n return im\n\n\ndef curve_focus(xdata, ydata):\n count = 0\n for x in range(len(xdata)):\n if ydata[x] == 0.0:\n count += 1\n print('Total = {}\\n0 Steering = {}'.format(len(xdata), count))\n return xdata, ydata\n\n\ndef flip(xdata, ydata):\n for x in range(len(xdata)):\n flip = np.fliplr(xdata[x])\n xdata = np.append(xdata, [flip], axis=0)\n ydata = np.append(ydata, -1 * ydata[x])\n return xdata, ydata\n\n\ndef set_model():\n model = Sequential()\n model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=SHAPE,\n output_shape=SHAPE))\n model.add(Convolution2D(3, 1, 1, border_mode='same', name='color_conv'))\n model.add(Convolution2D(36, 5, 5, border_mode='same', activation='elu',\n name='conv1'))\n model.add(Convolution2D(48, 3, 3, activation='elu', border_mode='same',\n name='conv2'))\n model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',\n name='conv3'))\n model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',\n name='conv4'))\n model.add(Flatten(name='flat1'))\n model.add(Dense(100, activation='elu', name='dense1'))\n model.add(Dense(50, activation='elu', name='dense2'))\n model.add(Dense(10, activation='elu', name='dense3'))\n model.add(Dense(1, activation='linear', name='dense4'))\n return model\n\n\ndef my_range(start, end, step):\n while start <= end:\n yield round(start, 1)\n start += step\n\n\ndef show_data(log):\n fig = plt.figure(figsize=(8, 2))\n a = fig.add_subplot(1, 2, 1)\n im = cv2.imread(FOLDER + log[560, 0].strip())\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n a.set_title('Full Resolution')\n plt.axis('off')\n plt.imshow(im)\n im = misc.imresize(im, size=0.2)\n a = fig.add_subplot(1, 2, 2)\n a.set_title('After 80% Downsampling')\n plt.imshow(im)\n plt.axis('off')\n fig.savefig('examples/Downsampling.png')\n plt.show()\n exit()\n count = 1\n y = 0\n steer = log[:, 3]\n for x in my_range(-0.8, 0.7, 0.1):\n while 1:\n y = np.random.randint(len(steer))\n if round(steer[y], 1) == x:\n print('Found {}', x)\n break\n a = fig.add_subplot(4, 5, count)\n im = cv2.imread(FOLDER + log[y, 0])\n im, angle = process_line(log[y])\n a.set_title(str(x) + ' to ' + str(round(angle, 1)))\n plt.imshow(im, aspect='auto', interpolation='nearest')\n count += 1\n plt.show()\n exit()\n pic = np.random.randint(len(X_train))\n print(X_train.shape)\n plt.imshow(X_train[pic])\n plt.show()\n exit()\n\n\ndef augment(x, y):\n x, y = flip(x, y)\n return x, y\n\n\ndef process_line(sample):\n img_choice = np.random.randint(3)\n angle = 0.0\n if img_choice == 0:\n angle = float(sample[3])\n elif img_choice == 1:\n angle = float(sample[3]) + 0.27\n elif img_choice == 2:\n angle = float(sample[3]) - 0.27\n im = cv2.imread(FOLDER + sample[img_choice].strip())\n im = misc.imresize(im, size=DOWNSAMPLE_RATIO)\n im = crop(im)\n im = adjust_brightness(im)\n im, angle = img_translate(im, angle)\n return im, angle\n\n\ndef generator(samples, batch_size=32):\n \"\"\"\n\tPurpose: Yield tensor batches to fit_generator function\n\tInputs: A file path\n\tOutputs: X_train, a [AHH, 80, 320, 3] tensor and y_train, a [AHH, 1] matrix\n\tWhere AHH = ((FEATURE_GENERATION_MULTIPLE * 3) + 3) * BATCH_SIZE\n\t\"\"\"\n num_samples = len(samples)\n shuffle(samples)\n while 1:\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset + batch_size]\n images = []\n angles = []\n for batch_sample in batch_samples:\n image, angle = process_line(batch_sample)\n images.append(image)\n angles.append(angle)\n X_train = np.array(images)\n y_train = np.array(angles)\n X_train, y_train = augment(X_train, y_train)\n yield shuffle(X_train, y_train)\n\n\nif __name__ == '__main__':\n log = pd.read_csv(FOLDER + 'driving_log.csv').values\n show_data(log)\n print(log.shape)\n train_samples, validation_samples = train_test_split(log, test_size=0.2)\n im, an = process_line(train_samples[np.random.randint(len(train_samples))])\n print(np.array(im).shape)\n model = set_model()\n adam = Adam(lr=Learning_Rate)\n model.compile(optimizer=adam, loss='mean_squared_error')\n history = model.fit_generator(generator(train_samples),\n samples_per_epoch=SAMPLES_TRAIN, validation_data=generator(\n validation_samples), nb_val_samples=SAMPLES_VALIDATION, nb_epoch=\n EPOCHS, verbose=1)\n model.save_weights('weights.h5')\n model.save('model.h5')\n print('Model saved')\n\n\ndef for_drive(im):\n print(im.shape)\n x = im\n x = misc.imresize(x, size=DOWNSAMPLE_RATIO)\n x = crop(x)\n return x\n",
"step-5": "import pandas as pd\nimport numpy as np\nfrom scipy import misc\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\nimport time\nimport math\nimport cv2\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib import pyplot as plt\n\nfrom keras.models import Sequential\nfrom keras.layers import Lambda\nfrom keras.layers.core import Dense, Activation, Flatten, Dropout\nfrom keras.layers.convolutional import Convolution2D\nfrom keras.layers.pooling import MaxPooling2D\nfrom keras.optimizers import Adam\nfrom keras.models import load_model\n\n# Data augmentation constants\nTRANS_X_RANGE = 10 # Number of translation pixels for augmented data (-RANGE/2, RANGE/2)\nTRANS_Y_RANGE = 10 # Number of translation pixels for augmented data (-RANGE/2, RANGE/2)\nTRANS_ANGLE = .3 # Maximum angle change when translating in the X direction\nOFF_CENTER_IMG = .25 # Angle change when using off center images\n\nDOWNSAMPLE_RATIO = 0.99\nLearning_Rate = 0.0001\nFOLDER = \"examples/\"\nEPOCHS = 4\nTRAINABLE = True\nBRIGHTNESS_RANGE = 0.15\nIMG_ROWS = 300\nIMG_COLS = 300\nSHAPE = (IMG_ROWS,IMG_COLS,3)\n\nSAMPLES_TRAIN = 5000\nSAMPLES_VALIDATION = 1000\n\n\ndef load_data(data):\n\ttemp = []\n\tfor i in range(len(data)):\n\t\tim = cv2.imread(data[i])\n\t\tim = misc.imresize(im,size=DOWNSAMPLE_RATIO)\n\t\tim = crop(im)\n\t\t# im = color_change(im)\n\t\ttemp.append(im)\n\treturn temp\n\ndef normalize(data):\n\ta=-0.5\n\tb=0.5\n\tgreyscale_min=0\n\tgreyscale_max=255\n\treturn a + ( ( (data - greyscale_min)*(b - a) )/(greyscale_max - greyscale_min))\n\ndef color_change(data):\n\tx = cv2.cvtColor(data,cv2.COLOR_BGR2HSV)\n\treturn x\n\ndef adjust_brightness(im):\n\ttemp = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)\n\t# Compute a random brightness value and apply to the image\n\tbrightness = BRIGHTNESS_RANGE * np.random.uniform(-1,1)\n\ttemp[:, :, 2] = temp[:, :, 2] * (1-brightness)\n\t# Convert back to RGB and return\n\treturn cv2.cvtColor(temp, cv2.COLOR_HSV2RGB)\n\ndef img_translate(img, angle):\n\n\t# Randomly form the X translation distance and compute the resulting steering angle change\n\tchange = np.random.uniform(-0.5,0.5)\n\tx_translation = (TRANS_X_RANGE * change)\n\tnew_angle = angle + (change * TRANS_ANGLE)\n\n\t# Randomly compute a Y translation\n\ty_translation = (TRANS_Y_RANGE * np.random.uniform(-0.5,0.5))\n\n\t# Form the translation matrix\n\ttranslation_matrix = np.float32([[1, 0, x_translation], [0, 1, y_translation]])\n\n\t# Translate the image\n\treturn cv2.warpAffine(img, translation_matrix, (img.shape[1], img.shape[0])),new_angle\n\n\ndef crop(im):\n\tshape = np.array(im).shape\n\ty1 = int(shape[0]*0.4)\n\ty2 = int(shape[0]*0.87)\n\t# print(y)\n\tim = im[y1:y2 , : , :]\n\tim = cv2.resize(im, (IMG_ROWS, IMG_COLS), interpolation=cv2.INTER_AREA)\n\treturn im\n\ndef curve_focus(xdata,ydata):\n\tcount = 0\n\tfor x in range(len(xdata)):\n\t\tif(ydata[x]==0.000):\n\t\t\tcount+=1\n\tprint(\"Total = {}\\n0 Steering = {}\".format(len(xdata),count))\n\treturn xdata,ydata\n\ndef flip(xdata,ydata):\n\tfor x in range(len(xdata)):\n\t\tflip = np.fliplr(xdata[x])\n\t\txdata = np.append(xdata, [flip], axis=0)\n\t\tydata = np.append(ydata, (-1*ydata[x]))\n\treturn xdata,ydata\n\ndef set_model():\n\tmodel = Sequential()\n\tmodel.add(Lambda(lambda x: x/127.5 - 1.,\n\t\tinput_shape=SHAPE,\n\t\toutput_shape=SHAPE))\n\tmodel.add(Convolution2D(3, 1, 1, border_mode='same', name='color_conv'))\n\tmodel.add(Convolution2D(36,5,5,border_mode='same',activation=\"elu\",\n\t name='conv1'))\n\tmodel.add(Convolution2D(48,3,3,activation=\"elu\",border_mode='same',\n\t name='conv2'))\n\tmodel.add(Convolution2D(64,3,3,activation=\"elu\",border_mode='same', \n\t\tname='conv3'))\n\tmodel.add(Convolution2D(64,3,3,activation=\"elu\",border_mode='same', name='conv4'))\n\tmodel.add(Flatten(name='flat1'))\n\t# model.add(Dropout(0.2))\n\t# model.add(Dense(1164, activation=\"elu\"))\n\t# model.add(Dropout(.3, name='drop1'))\n\tmodel.add(Dense(100, activation=\"elu\", name='dense1'))\n\tmodel.add(Dense(50, activation=\"elu\", name='dense2'))\n\tmodel.add(Dense(10, activation=\"elu\", name='dense3'))\n\tmodel.add(Dense(1, activation=\"linear\", name='dense4'))\n\treturn model\n\ndef my_range(start, end, step):\n\twhile start <= end:\n\t\tyield round(start,1)\n\t\tstart += step\n\ndef show_data(log):\n\tfig = plt.figure(figsize=(8,2))\n\ta = fig.add_subplot(1,2,1)\n\tim = cv2.imread(FOLDER+log[560,0].strip())\n\tim = cv2.cvtColor(im,cv2.COLOR_BGR2RGB)\n\ta.set_title(\"Full Resolution\")\n\tplt.axis('off')\n\tplt.imshow(im)\n\tim = misc.imresize(im,size=0.2)\n\ta = fig.add_subplot(1,2,2)\n\ta.set_title(\"After 80% Downsampling\")\n\tplt.imshow(im)\n\t# im = crop(im)\n\t# im, an = process_line(log[600])\n\t# a = fig.add_subplot(2,1,2)\n\t# im, an = process_line(log[600])\n\t# plt.imshow(im,aspect=\"auto\",interpolation=\"nearest\")\n\tplt.axis('off')\n\tfig.savefig('examples/Downsampling.png')\n\tplt.show()\n\texit()\n\t# plt.hist(steer,bins=100)\n\t# plt.show()\n\t# exit()\n\tcount = 1\n\ty = 0\n\tsteer = log[:,3]\n\tfor x in my_range(-0.8,0.7,0.1):\n\t\twhile 1:\n\t\t\ty = np.random.randint(len(steer))\n\t\t\tif(round(steer[y],1)==x):\n\t\t\t\tprint(\"Found {}\",(x))\n\t\t\t\tbreak\n\t\t\t# else:\n\t\t\t# \tprint(\"Discarded {}\",steer[y])\n\t\ta=fig.add_subplot(4,5,count)\n\t\tim = cv2.imread(FOLDER+log[y,0])\n\t\tim,angle = process_line(log[y])\n\t\ta.set_title(str(x)+\" to \"+str(round(angle,1)))\n\t\tplt.imshow(im,aspect=\"auto\",interpolation=\"nearest\")\n\t\tcount+=1\n\t\t# print(x)\n\tplt.show()\n\n\texit()\n\tpic = np.random.randint(len(X_train))\n\tprint(X_train.shape)\n\tplt.imshow(X_train[pic])\n\tplt.show()\n\texit()\n\ndef augment(x,y):\n\tx,y = flip(x,y)\n\treturn x,y\n\ndef process_line(sample):\n\n\timg_choice = np.random.randint(3)\t\n\n\tangle = 0.0\n\tif(img_choice==0):\n\t\tangle = float(sample[3])\n\telif(img_choice==1):\n\t\tangle = float(sample[3])+0.27\n\telif(img_choice==2):\n\t\tangle = float(sample[3])-0.27\n\n\tim = cv2.imread(FOLDER+sample[img_choice].strip())\n\tim = misc.imresize(im,size=DOWNSAMPLE_RATIO)\n\tim = crop(im)\n\tim = adjust_brightness(im)\n\tim,angle = img_translate(im,angle)\n\t# im = normalize(im)\n\n\treturn im,angle\n\ndef generator(samples, batch_size=32):\n\t\"\"\"\n\tPurpose: Yield tensor batches to fit_generator function\n\tInputs: A file path\n\tOutputs: X_train, a [AHH, 80, 320, 3] tensor and y_train, a [AHH, 1] matrix\n\tWhere AHH = ((FEATURE_GENERATION_MULTIPLE * 3) + 3) * BATCH_SIZE\n\t\"\"\"\n\tnum_samples = len(samples)\n\tshuffle(samples)\n\twhile 1: # Loop forever so the generator never terminates\n\t\tfor offset in range(0, num_samples, batch_size):\n\t\t\tbatch_samples = samples[offset:offset+batch_size]\n\n\t\t\timages = []\n\t\t\tangles = []\n\t\t\tfor batch_sample in batch_samples:\n\t\t\t\timage,angle = process_line(batch_sample)\n\t\t\t\timages.append(image)\n\t\t\t\tangles.append(angle)\n\n\t\t\t# trim image to only see section with road\n\t\t\tX_train = np.array(images)\n\t\t\ty_train = np.array(angles)\n\t\t\tX_train, y_train = augment(X_train,y_train)\n\t\t\tyield shuffle(X_train, y_train)\n\n\nif __name__ == \"__main__\":\n\tlog = pd.read_csv(FOLDER+\"driving_log.csv\").values\n\tshow_data(log)\n\t\n\n\tprint(log.shape)\n\ttrain_samples, validation_samples = train_test_split(log,test_size=0.2)\n\t\n\tim,an = process_line(train_samples[np.random.randint(len(train_samples))])\n\tprint(np.array(im).shape)\n\t# plt.imshow(im)\n\t# plt.title(str(an))\n\t# plt.show()\n\t# exit()\n\tmodel = set_model()\n\t# model.load_weights('weights.h5',by_name=True) \n\n\tadam = Adam(lr=Learning_Rate)\n\tmodel.compile(optimizer = adam, loss = 'mean_squared_error')\n\thistory=model.fit_generator(generator(train_samples), samples_per_epoch = \n\t\t\tSAMPLES_TRAIN, validation_data=generator(validation_samples), \n\t\t\tnb_val_samples=SAMPLES_VALIDATION, nb_epoch=EPOCHS, verbose=1)\n\tmodel.save_weights('weights.h5')\n\tmodel.save('model.h5')\n\t\n\tprint(\"Model saved\")\n\ndef for_drive(im):\n\tprint(im.shape)\n\tx = im\n\tx = misc.imresize(x,size=DOWNSAMPLE_RATIO)\n\tx = crop(x)\n\t# plt.imshow(x)\n\t# plt.show()\n\t# x = color_change(x)\n\t# x = normalize(x)\n\treturn x\n\n",
"step-ids": [
8,
10,
12,
16,
19
]
}
|
[
8,
10,
12,
16,
19
] |
<|reserved_special_token_0|>
class TestListDiff(TestCase):
<|reserved_special_token_0|>
def test_two(self):
assert list_diff([1, 2, 2, 2, 3], [2]) == [1, 3]
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class TestDiffLR(TestCase):
def test_one(self):
assert list_diff_left_right([1, 2], [1]) == ([2], [])
def test_two(self):
assert list_diff_left_right([1, 2, 2, 2, 3], [2]) == ([1, 3], [])
def test_three(self):
assert list_diff_left_right([1, 2, 2, 2], [1, 3, 3]) == ([2, 2, 2],
[3, 3])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestListDiff(TestCase):
def test_one(self):
assert list_diff([1, 2], [1]) == [2]
def test_two(self):
assert list_diff([1, 2, 2, 2, 3], [2]) == [1, 3]
def test_three(self):
assert list_diff([1, 2, 2, 2, 3], [1, 3]) == [2, 2, 2]
<|reserved_special_token_0|>
class TestDiffLR(TestCase):
def test_one(self):
assert list_diff_left_right([1, 2], [1]) == ([2], [])
def test_two(self):
assert list_diff_left_right([1, 2, 2, 2, 3], [2]) == ([1, 3], [])
def test_three(self):
assert list_diff_left_right([1, 2, 2, 2], [1, 3, 3]) == ([2, 2, 2],
[3, 3])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestListDiff(TestCase):
def test_one(self):
assert list_diff([1, 2], [1]) == [2]
def test_two(self):
assert list_diff([1, 2, 2, 2, 3], [2]) == [1, 3]
def test_three(self):
assert list_diff([1, 2, 2, 2, 3], [1, 3]) == [2, 2, 2]
def list_diff_left_right(a, b):
left = [x for x in a if x not in b]
right = [x for x in b if x not in a]
return left, right
class TestDiffLR(TestCase):
def test_one(self):
assert list_diff_left_right([1, 2], [1]) == ([2], [])
def test_two(self):
assert list_diff_left_right([1, 2, 2, 2, 3], [2]) == ([1, 3], [])
def test_three(self):
assert list_diff_left_right([1, 2, 2, 2], [1, 3, 3]) == ([2, 2, 2],
[3, 3])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def list_diff(a, b):
return [x for x in a if x not in b]
class TestListDiff(TestCase):
def test_one(self):
assert list_diff([1, 2], [1]) == [2]
def test_two(self):
assert list_diff([1, 2, 2, 2, 3], [2]) == [1, 3]
def test_three(self):
assert list_diff([1, 2, 2, 2, 3], [1, 3]) == [2, 2, 2]
def list_diff_left_right(a, b):
left = [x for x in a if x not in b]
right = [x for x in b if x not in a]
return left, right
class TestDiffLR(TestCase):
def test_one(self):
assert list_diff_left_right([1, 2], [1]) == ([2], [])
def test_two(self):
assert list_diff_left_right([1, 2, 2, 2, 3], [2]) == ([1, 3], [])
def test_three(self):
assert list_diff_left_right([1, 2, 2, 2], [1, 3, 3]) == ([2, 2, 2],
[3, 3])
<|reserved_special_token_1|>
"""
Array.diff
Our goal in this kata is to implement a difference function,
which subtracts one list from another and returns the result.
It should remove all values from list a, which are present in list b keeping their order.
"""
from unittest import TestCase
def list_diff(a, b):
return [x for x in a if x not in b]
class TestListDiff(TestCase):
def test_one(self):
assert list_diff([1, 2], [1]) == [2]
def test_two(self):
assert list_diff([1, 2, 2, 2, 3], [2]) == [1, 3]
def test_three(self):
assert list_diff([1, 2, 2, 2, 3], [1, 3]) == [2, 2, 2]
def list_diff_left_right(a, b):
left = [x for x in a if x not in b]
right = [x for x in b if x not in a]
return left, right
class TestDiffLR(TestCase):
def test_one(self):
assert list_diff_left_right([1, 2], [1]) == ([2], [])
def test_two(self):
assert list_diff_left_right([1, 2, 2, 2, 3], [2]) == ([1, 3], [])
def test_three(self):
assert list_diff_left_right([1, 2, 2, 2], [1, 3, 3]) == ([2, 2, 2], [3, 3])
|
flexible
|
{
"blob_id": "76526bdff7418997ac90f761936abccbb3468499",
"index": 6513,
"step-1": "<mask token>\n\n\nclass TestListDiff(TestCase):\n <mask token>\n\n def test_two(self):\n assert list_diff([1, 2, 2, 2, 3], [2]) == [1, 3]\n <mask token>\n\n\n<mask token>\n\n\nclass TestDiffLR(TestCase):\n\n def test_one(self):\n assert list_diff_left_right([1, 2], [1]) == ([2], [])\n\n def test_two(self):\n assert list_diff_left_right([1, 2, 2, 2, 3], [2]) == ([1, 3], [])\n\n def test_three(self):\n assert list_diff_left_right([1, 2, 2, 2], [1, 3, 3]) == ([2, 2, 2],\n [3, 3])\n",
"step-2": "<mask token>\n\n\nclass TestListDiff(TestCase):\n\n def test_one(self):\n assert list_diff([1, 2], [1]) == [2]\n\n def test_two(self):\n assert list_diff([1, 2, 2, 2, 3], [2]) == [1, 3]\n\n def test_three(self):\n assert list_diff([1, 2, 2, 2, 3], [1, 3]) == [2, 2, 2]\n\n\n<mask token>\n\n\nclass TestDiffLR(TestCase):\n\n def test_one(self):\n assert list_diff_left_right([1, 2], [1]) == ([2], [])\n\n def test_two(self):\n assert list_diff_left_right([1, 2, 2, 2, 3], [2]) == ([1, 3], [])\n\n def test_three(self):\n assert list_diff_left_right([1, 2, 2, 2], [1, 3, 3]) == ([2, 2, 2],\n [3, 3])\n",
"step-3": "<mask token>\n\n\nclass TestListDiff(TestCase):\n\n def test_one(self):\n assert list_diff([1, 2], [1]) == [2]\n\n def test_two(self):\n assert list_diff([1, 2, 2, 2, 3], [2]) == [1, 3]\n\n def test_three(self):\n assert list_diff([1, 2, 2, 2, 3], [1, 3]) == [2, 2, 2]\n\n\ndef list_diff_left_right(a, b):\n left = [x for x in a if x not in b]\n right = [x for x in b if x not in a]\n return left, right\n\n\nclass TestDiffLR(TestCase):\n\n def test_one(self):\n assert list_diff_left_right([1, 2], [1]) == ([2], [])\n\n def test_two(self):\n assert list_diff_left_right([1, 2, 2, 2, 3], [2]) == ([1, 3], [])\n\n def test_three(self):\n assert list_diff_left_right([1, 2, 2, 2], [1, 3, 3]) == ([2, 2, 2],\n [3, 3])\n",
"step-4": "<mask token>\n\n\ndef list_diff(a, b):\n return [x for x in a if x not in b]\n\n\nclass TestListDiff(TestCase):\n\n def test_one(self):\n assert list_diff([1, 2], [1]) == [2]\n\n def test_two(self):\n assert list_diff([1, 2, 2, 2, 3], [2]) == [1, 3]\n\n def test_three(self):\n assert list_diff([1, 2, 2, 2, 3], [1, 3]) == [2, 2, 2]\n\n\ndef list_diff_left_right(a, b):\n left = [x for x in a if x not in b]\n right = [x for x in b if x not in a]\n return left, right\n\n\nclass TestDiffLR(TestCase):\n\n def test_one(self):\n assert list_diff_left_right([1, 2], [1]) == ([2], [])\n\n def test_two(self):\n assert list_diff_left_right([1, 2, 2, 2, 3], [2]) == ([1, 3], [])\n\n def test_three(self):\n assert list_diff_left_right([1, 2, 2, 2], [1, 3, 3]) == ([2, 2, 2],\n [3, 3])\n",
"step-5": "\"\"\"\nArray.diff\nOur goal in this kata is to implement a difference function,\n which subtracts one list from another and returns the result.\nIt should remove all values from list a, which are present in list b keeping their order.\n\"\"\"\nfrom unittest import TestCase\n\n\ndef list_diff(a, b):\n return [x for x in a if x not in b]\n\n\nclass TestListDiff(TestCase):\n def test_one(self):\n assert list_diff([1, 2], [1]) == [2]\n\n def test_two(self):\n assert list_diff([1, 2, 2, 2, 3], [2]) == [1, 3]\n\n def test_three(self):\n assert list_diff([1, 2, 2, 2, 3], [1, 3]) == [2, 2, 2]\n\n\ndef list_diff_left_right(a, b):\n left = [x for x in a if x not in b]\n right = [x for x in b if x not in a]\n return left, right\n\n\nclass TestDiffLR(TestCase):\n def test_one(self):\n assert list_diff_left_right([1, 2], [1]) == ([2], [])\n\n def test_two(self):\n assert list_diff_left_right([1, 2, 2, 2, 3], [2]) == ([1, 3], [])\n\n def test_three(self):\n assert list_diff_left_right([1, 2, 2, 2], [1, 3, 3]) == ([2, 2, 2], [3, 3])\n",
"step-ids": [
6,
8,
9,
10,
12
]
}
|
[
6,
8,
9,
10,
12
] |
<|reserved_special_token_0|>
class floatlayoutApp(App):
def build(self):
return LayoutWindow()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LayoutWindow(FloatLayout):
pass
class floatlayoutApp(App):
def build(self):
return LayoutWindow()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LayoutWindow(FloatLayout):
pass
class floatlayoutApp(App):
def build(self):
return LayoutWindow()
if __name__ == '__main__':
display = floatlayoutApp()
display.run()
<|reserved_special_token_1|>
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
class LayoutWindow(FloatLayout):
pass
class floatlayoutApp(App):
def build(self):
return LayoutWindow()
if __name__ == '__main__':
display = floatlayoutApp()
display.run()
<|reserved_special_token_1|>
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
class LayoutWindow(FloatLayout):
pass
class floatlayoutApp(App):
def build(self):
return LayoutWindow()
if __name__== "__main__":
display = floatlayoutApp()
display.run()
|
flexible
|
{
"blob_id": "2af8677e76b77b9bfa579012a85ea331c0c7f390",
"index": 136,
"step-1": "<mask token>\n\n\nclass floatlayoutApp(App):\n\n def build(self):\n return LayoutWindow()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass LayoutWindow(FloatLayout):\n pass\n\n\nclass floatlayoutApp(App):\n\n def build(self):\n return LayoutWindow()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass LayoutWindow(FloatLayout):\n pass\n\n\nclass floatlayoutApp(App):\n\n def build(self):\n return LayoutWindow()\n\n\nif __name__ == '__main__':\n display = floatlayoutApp()\n display.run()\n",
"step-4": "from kivy.app import App\nfrom kivy.uix.floatlayout import FloatLayout\n\n\nclass LayoutWindow(FloatLayout):\n pass\n\n\nclass floatlayoutApp(App):\n\n def build(self):\n return LayoutWindow()\n\n\nif __name__ == '__main__':\n display = floatlayoutApp()\n display.run()\n",
"step-5": "from kivy.app import App\nfrom kivy.uix.floatlayout import FloatLayout\n\n\nclass LayoutWindow(FloatLayout):\n pass\n\n\nclass floatlayoutApp(App):\n def build(self):\n return LayoutWindow()\n\n\nif __name__== \"__main__\":\n display = floatlayoutApp()\n display.run()",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(stevila)
print(stevila[1])
<|reserved_special_token_1|>
stevila = [5, 2, 8, 3]
print(stevila)
print(stevila[1])
<|reserved_special_token_1|>
stevila = [5, 2, 8, 3]
#Izpis vseh števil
print(stevila)
#Izpis števila na mestu 1
print(stevila[1])
|
flexible
|
{
"blob_id": "6e845f2543b548fb936cc3719eb150e530281945",
"index": 9505,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(stevila)\nprint(stevila[1])\n",
"step-3": "stevila = [5, 2, 8, 3]\nprint(stevila)\nprint(stevila[1])\n",
"step-4": "stevila = [5, 2, 8, 3]\n\n#Izpis vseh števil\nprint(stevila)\n\n#Izpis števila na mestu 1\nprint(stevila[1])",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django import forms
class LoginForm(forms.Form):
usuario=forms.CharField(label="Usuario",max_length=20, required=True, widget=forms.TextInput(
attrs={'class':'form-control'}
))
contraseña=forms.CharField(label="Contraseña",max_length=20, widget=forms.PasswordInput(
attrs={'class':'form-control'}
))
|
normal
|
{
"blob_id": "7da5a7476c807619bed805cb892774c23c04c6f7",
"index": 4917,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass LoginForm(forms.Form):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass LoginForm(forms.Form):\n usuario = forms.CharField(label='Usuario', max_length=20, required=True,\n widget=forms.TextInput(attrs={'class': 'form-control'}))\n contraseña = forms.CharField(label='Contraseña', max_length=20, widget=\n forms.PasswordInput(attrs={'class': 'form-control'}))\n",
"step-4": "from django import forms\n\n\nclass LoginForm(forms.Form):\n usuario = forms.CharField(label='Usuario', max_length=20, required=True,\n widget=forms.TextInput(attrs={'class': 'form-control'}))\n contraseña = forms.CharField(label='Contraseña', max_length=20, widget=\n forms.PasswordInput(attrs={'class': 'form-control'}))\n",
"step-5": "from django import forms\n\nclass LoginForm(forms.Form):\n usuario=forms.CharField(label=\"Usuario\",max_length=20, required=True, widget=forms.TextInput(\n attrs={'class':'form-control'} \n ))\n contraseña=forms.CharField(label=\"Contraseña\",max_length=20, widget=forms.PasswordInput(\n attrs={'class':'form-control'}\n ))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(volume, surface_area)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
pi = 3.14159
r = 3.14
h = 5
volume = pi * r ** 2 * h
surface_area = 2 * pi * r ** 2 + r * h
print(volume, surface_area)
<|reserved_special_token_1|>
'''
Write the necessary code calculate the volume and surface area
of a cylinder with a radius of 3.14 and a height of 5. Print out the result.
'''
pi = 3.14159
r = 3.14
h = 5
volume = pi*r**2*h
surface_area = 2*pi*r**2+r*h
print(volume,surface_area)
|
flexible
|
{
"blob_id": "d04e69c234f2887f5301e4348b4c4ec2ad3af7a2",
"index": 2623,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(volume, surface_area)\n",
"step-3": "<mask token>\npi = 3.14159\nr = 3.14\nh = 5\nvolume = pi * r ** 2 * h\nsurface_area = 2 * pi * r ** 2 + r * h\nprint(volume, surface_area)\n",
"step-4": "'''\nWrite the necessary code calculate the volume and surface area\nof a cylinder with a radius of 3.14 and a height of 5. Print out the result.\n\n\n'''\n\npi = 3.14159\nr = 3.14\nh = 5\nvolume = pi*r**2*h\nsurface_area = 2*pi*r**2+r*h\nprint(volume,surface_area)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import collections
import inspect
import struct
from pygments.token import *
import decompil.builder
import decompil.disassemblers
import decompil.ir
class Context(decompil.ir.Context):
def __init__(self):
super(Context, self).__init__(16)
self.pointer_type = self.create_pointer_type(self.half_type)
self.init_registers()
def init_registers(self):
self.registers = regs = [
# 0x00-0x03
Register(self, 'ar0', 16),
Register(self, 'ar1', 16),
Register(self, 'ar2', 16),
Register(self, 'ar3', 16),
# 0x04-0x07
Register(self, 'ix0', 16),
Register(self, 'ix1', 16),
Register(self, 'ix2', 16),
Register(self, 'ix3', 16),
# 0x08-0xb
Register(self, 'r08', 16),
Register(self, 'r09', 16),
Register(self, 'r0a', 16),
Register(self, 'r0b', 16),
# 0x0c-0x0f
# TODO: something special?
Register(self, 'st0', 16),
Register(self, 'st1', 16),
Register(self, 'st2', 16),
Register(self, 'st3', 16),
# 0x10-0x11
# TODO: handle 8-bit overflow
Register(self, 'ac0.h', 16),
Register(self, 'ac1.h', 16),
# 0x12-0x13
Register(self, 'config', 16),
Register(self, 'sr', 16),
# 0x14-0x17
Register(self, 'prod.l', 16),
Register(self, 'prod.m1', 16),
# TODO: handle 8-bit overflow
Register(self, 'prod.h', 16),
Register(self, 'prod.m2', 16),
# 0x18-0x1b
Register(self, 'ax0.l', 16),
Register(self, 'ax1.l', 16),
Register(self, 'ax0.h', 16),
Register(self, 'ax1.h', 16),
# 0x1c-0x1f
Register(self, 'ac0.l', 16),
Register(self, 'ac1.l', 16),
Register(self, 'ac0.m', 16),
Register(self, 'ac1.m', 16),
]
self.wr_registers = [
Register(self, 'wr{}'.format(i), 16) for i in range(4)
]
self.addr_to_wr = {
self.registers[0x00]: self.wr_registers[0x00],
self.registers[0x01]: self.wr_registers[0x01],
self.registers[0x02]: self.wr_registers[0x02],
self.registers[0x03]: self.wr_registers[0x03],
}
self.addr_to_ix = {
self.registers[0x00]: self.registers[0x04],
self.registers[0x01]: self.registers[0x05],
self.registers[0x02]: self.registers[0x06],
self.registers[0x03]: self.registers[0x07],
}
self.long_accumulators = [
Register(self, 'ac0', 40, [
(regs[0x10], 32), (regs[0x1e], 16), (regs[0x1c], 0)
]),
Register(self, 'ac1', 40, [
(regs[0x11], 32), (regs[0x1f], 16), (regs[0x1d], 0)
]),
]
self.short_accumulators = [
Register(self, 'acs0', 24, [(regs[0x10], 16), (regs[0x1e], 0)]),
Register(self, 'acs1', 24, [(regs[0x11], 16), (regs[0x1f], 0)]),
]
self.extra_acculumators = [
Register(self, 'ax0', 32, [(regs[0x1a], 16), (regs[0x18], 0)]),
Register(self, 'ax1', 32, [(regs[0x1b], 16), (regs[0x19], 0)]),
]
self.prod_register = Register(self, 'prod', 40, [
(regs[0x17], 16),
(regs[0x16], 32),
(regs[0x15], 16),
(regs[0x14], 0),
])
class Register(decompil.ir.Register):
def __init__(self, context, name, width, components=None):
self.context = context
self.type = context.create_int_type(width)
self.name = name
self.components = components
self.registers = (
[reg for reg, _ in components]
if components else
None
)
def build_load(self, builder):
if self.components is None:
return builder.build_rload(self)
else:
result = None
for reg, shift in self.components:
val = builder.build_zext(
self.type, builder.build_rload(reg)
)
if shift:
val = builder.build_lshl(val, self.type.create(shift))
if result:
result = builder.build_add(result, val)
else:
result = val
return result
def build_store(self, builder, value):
assert value.type == self.type
if self.components is None:
builder.build_rstore(self, value)
else:
for reg, shift in self.components:
if shift:
val = builder.build_lshl(value, value.type.create(shift))
val = builder.build_trunc(reg.type, val)
builder.build_rstore(reg, val)
def build_load_comp(self, builder):
return [
builder.build_rload(reg)
for reg, _ in self.components
]
def build_store_comp(self, builder, *values):
assert len(values) == len(self.components)
for value, (reg, _) in zip(values, self.components):
builder.build_rstore(reg, value)
def format(self):
return [(Name.Variable, '${}'.format(self.name))]
class BaseDecoder:
name = None
opcode = None
opcode_mask = None
operands_format = None
def decode(self, context, disassembler, builder):
raise NotImplementedError()
def decode_operands(self, context):
return [op.extract(context, self) for op in self.operands_format]
class Instruction(BaseDecoder):
have_extra_operand = False
is_extended = False
def __init__(self, address, opcode, extra_operand=None, extension=None):
self.address = address
self.opcode_value = opcode
self.extension = extension
assert self.is_extended == (extension is not None)
assert self.have_extra_operand == (extra_operand is not None)
self.extra_operand = extra_operand
if self.extension:
self.extension.instruction = self
def __repr__(self):
ext = (
' ({})'.format(self.extension.name)
if self.extension else
''
)
return '{:04x}: {}{}'.format(
self.address, self.name, ext
)
class InstructionExtension(BaseDecoder):
def __init__(self, opcode):
self.opcode_value = opcode
# When accepting an extension, instructions should set the following
# field:
self.instruction = None
def __repr__(self):
return '{:04x}: {} (extension)'.format(
self.address, self.name
)
instructions = []
instruction_extensions = []
def _init_tables():
import gcdsp.decoders
def helper(table, cls):
for obj_name in dir(gcdsp.decoders):
obj = getattr(gcdsp.decoders, obj_name)
if not (
inspect.isclass(obj)
and issubclass(obj, cls)
and obj != cls
):
continue
assert (obj.opcode & ~obj.opcode_mask) == 0
table.append(obj)
helper(instructions, Instruction)
helper(instruction_extensions, InstructionExtension)
_init_tables()
def load_insns():
import gcdsp.decoders
def default_decoder(self, context, disassembler, builder):
builder.build_undef()
disassembler.stop_basic_block()
def decode_operands(self, context):
result = []
for _, size, addend, rshift, mask in self.operands_format:
operand = (self.opcode & mask) >> rshift
result.append(self.opcode & mask + addend)
return result
Insn = collections.namedtuple(
'Insn', 'name opcode mask size unused0 operands is_extended unused1'
)
for insn in gcdsp.decoders.opcodes:
insn = Insn(*insn)
insn_decoder = getattr(
gcdsp.decoders,
'decode_{}'.format(insn.name.lower()),
default_decoder,
)
instructions.append(
type(insn.name, (Instruction, ), {
'name': insn.name,
'opcode': insn.opcode,
'opcode_mask': insn.mask,
'have_extra_operand': insn.size == 2,
'is_extended': insn.is_extended,
'decode': insn_decoder,
'decode_operands': decode_operands,
'operands_format': insn.operands
})
)
for ext in gcdsp.decoders.opcodes_ext:
ext = Insn(*ext)
instruction_extensions.append(
type(ext.name, (InstructionExtension, ), {
'name': ext.name,
'opcode': ext.opcode,
'opcode_mask': ext.mask,
'decode': insn_decoder,
'decode_operands': decode_operands,
'operands_format': insn.operands
})
)
load_insns()
class Decoder(decompil.disassemblers.BaseDecoder):
def __init__(self, fp):
self.fp = fp
def parse_insn(self, disassembler, builder, address):
opcode = self.get_word(address)
next_address = address + 1
if opcode is None:
return None
insn_pat = self.lookup(opcode, instructions)
# Parse the extra operand, if any.
if insn_pat.have_extra_operand:
extra_operand = self.get_word(address + 1)
next_address += 1
if extra_operand is None:
raise ValueError('Incomplete file')
else:
extra_operand = None
# Parse the instruction extension, if any.
if insn_pat.is_extended:
ext_pat = self.lookup(opcode, instruction_extensions)
ext = ext_pat(opcode)
else:
ext = None
insn = insn_pat(address, opcode, extra_operand, ext)
insn_image = '{}{}'.format(
insn.name,
"'{}".format(insn.extension.name) if insn.is_extended else ''
)
builder.set_origin('At {:#04x}: {}'.format(address, insn_image))
# Always decode the extension first (if any).
if insn.is_extended:
insn.extension.decode(disassembler.context, disassembler, builder)
# TODO: remove this once all extensions are supported.
if disassembler.must_stop_basic_block:
return next_address
insn.decode(disassembler.context, disassembler, builder)
return next_address
def iter_insns(self, address):
while True:
address, insn = self.parse_insn(address)
if insn is None:
break
else:
yield address, insn
def get_word(self, address):
self.fp.seek(2 * address)
word = self.fp.read(2)
if len(word) == 0:
return None
elif len(word) == 2:
return struct.unpack('>H', word)[0]
else:
raise ValueError('Incomplete file')
def lookup(self, opcode, pattern_set):
for pat in pattern_set:
if opcode & pat.opcode_mask == pat.opcode:
return pat
else:
raise ValueError('Invalid opcode: {:04x}'.format(opcode))
|
normal
|
{
"blob_id": "865d7c606b287dbce158f721c6cf768cd078eb48",
"index": 9231,
"step-1": "<mask token>\n\n\nclass Register(decompil.ir.Register):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass BaseDecoder:\n name = None\n opcode = None\n opcode_mask = None\n operands_format = None\n\n def decode(self, context, disassembler, builder):\n raise NotImplementedError()\n\n def decode_operands(self, context):\n return [op.extract(context, self) for op in self.operands_format]\n\n\nclass Instruction(BaseDecoder):\n have_extra_operand = False\n is_extended = False\n\n def __init__(self, address, opcode, extra_operand=None, extension=None):\n self.address = address\n self.opcode_value = opcode\n self.extension = extension\n assert self.is_extended == (extension is not None)\n assert self.have_extra_operand == (extra_operand is not None)\n self.extra_operand = extra_operand\n if self.extension:\n self.extension.instruction = self\n\n def __repr__(self):\n ext = ' ({})'.format(self.extension.name) if self.extension else ''\n return '{:04x}: {}{}'.format(self.address, self.name, ext)\n\n\nclass InstructionExtension(BaseDecoder):\n\n def __init__(self, opcode):\n self.opcode_value = opcode\n self.instruction = None\n\n def __repr__(self):\n return '{:04x}: {} (extension)'.format(self.address, self.name)\n\n\n<mask token>\n\n\nclass Decoder(decompil.disassemblers.BaseDecoder):\n\n def __init__(self, fp):\n self.fp = fp\n\n def parse_insn(self, disassembler, builder, address):\n opcode = self.get_word(address)\n next_address = address + 1\n if opcode is None:\n return None\n insn_pat = self.lookup(opcode, instructions)\n if insn_pat.have_extra_operand:\n extra_operand = self.get_word(address + 1)\n next_address += 1\n if extra_operand is None:\n raise ValueError('Incomplete file')\n else:\n extra_operand = None\n if insn_pat.is_extended:\n ext_pat = self.lookup(opcode, instruction_extensions)\n ext = ext_pat(opcode)\n else:\n ext = None\n insn = insn_pat(address, opcode, extra_operand, ext)\n insn_image = '{}{}'.format(insn.name, \"'{}\".format(insn.extension.\n name) if insn.is_extended else '')\n builder.set_origin('At {:#04x}: {}'.format(address, insn_image))\n if insn.is_extended:\n insn.extension.decode(disassembler.context, disassembler, builder)\n if disassembler.must_stop_basic_block:\n return next_address\n insn.decode(disassembler.context, disassembler, builder)\n return next_address\n\n def iter_insns(self, address):\n while True:\n address, insn = self.parse_insn(address)\n if insn is None:\n break\n else:\n yield address, insn\n\n def get_word(self, address):\n self.fp.seek(2 * address)\n word = self.fp.read(2)\n if len(word) == 0:\n return None\n elif len(word) == 2:\n return struct.unpack('>H', word)[0]\n else:\n raise ValueError('Incomplete file')\n\n def lookup(self, opcode, pattern_set):\n for pat in pattern_set:\n if opcode & pat.opcode_mask == pat.opcode:\n return pat\n else:\n raise ValueError('Invalid opcode: {:04x}'.format(opcode))\n",
"step-2": "<mask token>\n\n\nclass Register(decompil.ir.Register):\n\n def __init__(self, context, name, width, components=None):\n self.context = context\n self.type = context.create_int_type(width)\n self.name = name\n self.components = components\n self.registers = [reg for reg, _ in components] if components else None\n\n def build_load(self, builder):\n if self.components is None:\n return builder.build_rload(self)\n else:\n result = None\n for reg, shift in self.components:\n val = builder.build_zext(self.type, builder.build_rload(reg))\n if shift:\n val = builder.build_lshl(val, self.type.create(shift))\n if result:\n result = builder.build_add(result, val)\n else:\n result = val\n return result\n\n def build_store(self, builder, value):\n assert value.type == self.type\n if self.components is None:\n builder.build_rstore(self, value)\n else:\n for reg, shift in self.components:\n if shift:\n val = builder.build_lshl(value, value.type.create(shift))\n val = builder.build_trunc(reg.type, val)\n builder.build_rstore(reg, val)\n\n def build_load_comp(self, builder):\n return [builder.build_rload(reg) for reg, _ in self.components]\n\n def build_store_comp(self, builder, *values):\n assert len(values) == len(self.components)\n for value, (reg, _) in zip(values, self.components):\n builder.build_rstore(reg, value)\n\n def format(self):\n return [(Name.Variable, '${}'.format(self.name))]\n\n\nclass BaseDecoder:\n name = None\n opcode = None\n opcode_mask = None\n operands_format = None\n\n def decode(self, context, disassembler, builder):\n raise NotImplementedError()\n\n def decode_operands(self, context):\n return [op.extract(context, self) for op in self.operands_format]\n\n\nclass Instruction(BaseDecoder):\n have_extra_operand = False\n is_extended = False\n\n def __init__(self, address, opcode, extra_operand=None, extension=None):\n self.address = address\n self.opcode_value = opcode\n self.extension = extension\n assert self.is_extended == (extension is not None)\n assert self.have_extra_operand == (extra_operand is not None)\n self.extra_operand = extra_operand\n if self.extension:\n self.extension.instruction = self\n\n def __repr__(self):\n ext = ' ({})'.format(self.extension.name) if self.extension else ''\n return '{:04x}: {}{}'.format(self.address, self.name, ext)\n\n\nclass InstructionExtension(BaseDecoder):\n\n def __init__(self, opcode):\n self.opcode_value = opcode\n self.instruction = None\n\n def __repr__(self):\n return '{:04x}: {} (extension)'.format(self.address, self.name)\n\n\n<mask token>\n\n\nclass Decoder(decompil.disassemblers.BaseDecoder):\n\n def __init__(self, fp):\n self.fp = fp\n\n def parse_insn(self, disassembler, builder, address):\n opcode = self.get_word(address)\n next_address = address + 1\n if opcode is None:\n return None\n insn_pat = self.lookup(opcode, instructions)\n if insn_pat.have_extra_operand:\n extra_operand = self.get_word(address + 1)\n next_address += 1\n if extra_operand is None:\n raise ValueError('Incomplete file')\n else:\n extra_operand = None\n if insn_pat.is_extended:\n ext_pat = self.lookup(opcode, instruction_extensions)\n ext = ext_pat(opcode)\n else:\n ext = None\n insn = insn_pat(address, opcode, extra_operand, ext)\n insn_image = '{}{}'.format(insn.name, \"'{}\".format(insn.extension.\n name) if insn.is_extended else '')\n builder.set_origin('At {:#04x}: {}'.format(address, insn_image))\n if insn.is_extended:\n insn.extension.decode(disassembler.context, disassembler, builder)\n if disassembler.must_stop_basic_block:\n return next_address\n insn.decode(disassembler.context, disassembler, builder)\n return next_address\n\n def iter_insns(self, address):\n while True:\n address, insn = self.parse_insn(address)\n if insn is None:\n break\n else:\n yield address, insn\n\n def get_word(self, address):\n self.fp.seek(2 * address)\n word = self.fp.read(2)\n if len(word) == 0:\n return None\n elif len(word) == 2:\n return struct.unpack('>H', word)[0]\n else:\n raise ValueError('Incomplete file')\n\n def lookup(self, opcode, pattern_set):\n for pat in pattern_set:\n if opcode & pat.opcode_mask == pat.opcode:\n return pat\n else:\n raise ValueError('Invalid opcode: {:04x}'.format(opcode))\n",
"step-3": "<mask token>\n\n\nclass Context(decompil.ir.Context):\n <mask token>\n <mask token>\n\n\nclass Register(decompil.ir.Register):\n\n def __init__(self, context, name, width, components=None):\n self.context = context\n self.type = context.create_int_type(width)\n self.name = name\n self.components = components\n self.registers = [reg for reg, _ in components] if components else None\n\n def build_load(self, builder):\n if self.components is None:\n return builder.build_rload(self)\n else:\n result = None\n for reg, shift in self.components:\n val = builder.build_zext(self.type, builder.build_rload(reg))\n if shift:\n val = builder.build_lshl(val, self.type.create(shift))\n if result:\n result = builder.build_add(result, val)\n else:\n result = val\n return result\n\n def build_store(self, builder, value):\n assert value.type == self.type\n if self.components is None:\n builder.build_rstore(self, value)\n else:\n for reg, shift in self.components:\n if shift:\n val = builder.build_lshl(value, value.type.create(shift))\n val = builder.build_trunc(reg.type, val)\n builder.build_rstore(reg, val)\n\n def build_load_comp(self, builder):\n return [builder.build_rload(reg) for reg, _ in self.components]\n\n def build_store_comp(self, builder, *values):\n assert len(values) == len(self.components)\n for value, (reg, _) in zip(values, self.components):\n builder.build_rstore(reg, value)\n\n def format(self):\n return [(Name.Variable, '${}'.format(self.name))]\n\n\nclass BaseDecoder:\n name = None\n opcode = None\n opcode_mask = None\n operands_format = None\n\n def decode(self, context, disassembler, builder):\n raise NotImplementedError()\n\n def decode_operands(self, context):\n return [op.extract(context, self) for op in self.operands_format]\n\n\nclass Instruction(BaseDecoder):\n have_extra_operand = False\n is_extended = False\n\n def __init__(self, address, opcode, extra_operand=None, extension=None):\n self.address = address\n self.opcode_value = opcode\n self.extension = extension\n assert self.is_extended == (extension is not None)\n assert self.have_extra_operand == (extra_operand is not None)\n self.extra_operand = extra_operand\n if self.extension:\n self.extension.instruction = self\n\n def __repr__(self):\n ext = ' ({})'.format(self.extension.name) if self.extension else ''\n return '{:04x}: {}{}'.format(self.address, self.name, ext)\n\n\nclass InstructionExtension(BaseDecoder):\n\n def __init__(self, opcode):\n self.opcode_value = opcode\n self.instruction = None\n\n def __repr__(self):\n return '{:04x}: {} (extension)'.format(self.address, self.name)\n\n\n<mask token>\n\n\nclass Decoder(decompil.disassemblers.BaseDecoder):\n\n def __init__(self, fp):\n self.fp = fp\n\n def parse_insn(self, disassembler, builder, address):\n opcode = self.get_word(address)\n next_address = address + 1\n if opcode is None:\n return None\n insn_pat = self.lookup(opcode, instructions)\n if insn_pat.have_extra_operand:\n extra_operand = self.get_word(address + 1)\n next_address += 1\n if extra_operand is None:\n raise ValueError('Incomplete file')\n else:\n extra_operand = None\n if insn_pat.is_extended:\n ext_pat = self.lookup(opcode, instruction_extensions)\n ext = ext_pat(opcode)\n else:\n ext = None\n insn = insn_pat(address, opcode, extra_operand, ext)\n insn_image = '{}{}'.format(insn.name, \"'{}\".format(insn.extension.\n name) if insn.is_extended else '')\n builder.set_origin('At {:#04x}: {}'.format(address, insn_image))\n if insn.is_extended:\n insn.extension.decode(disassembler.context, disassembler, builder)\n if disassembler.must_stop_basic_block:\n return next_address\n insn.decode(disassembler.context, disassembler, builder)\n return next_address\n\n def iter_insns(self, address):\n while True:\n address, insn = self.parse_insn(address)\n if insn is None:\n break\n else:\n yield address, insn\n\n def get_word(self, address):\n self.fp.seek(2 * address)\n word = self.fp.read(2)\n if len(word) == 0:\n return None\n elif len(word) == 2:\n return struct.unpack('>H', word)[0]\n else:\n raise ValueError('Incomplete file')\n\n def lookup(self, opcode, pattern_set):\n for pat in pattern_set:\n if opcode & pat.opcode_mask == pat.opcode:\n return pat\n else:\n raise ValueError('Invalid opcode: {:04x}'.format(opcode))\n",
"step-4": "<mask token>\n\n\nclass Context(decompil.ir.Context):\n\n def __init__(self):\n super(Context, self).__init__(16)\n self.pointer_type = self.create_pointer_type(self.half_type)\n self.init_registers()\n\n def init_registers(self):\n self.registers = regs = [Register(self, 'ar0', 16), Register(self,\n 'ar1', 16), Register(self, 'ar2', 16), Register(self, 'ar3', 16\n ), Register(self, 'ix0', 16), Register(self, 'ix1', 16),\n Register(self, 'ix2', 16), Register(self, 'ix3', 16), Register(\n self, 'r08', 16), Register(self, 'r09', 16), Register(self,\n 'r0a', 16), Register(self, 'r0b', 16), Register(self, 'st0', 16\n ), Register(self, 'st1', 16), Register(self, 'st2', 16),\n Register(self, 'st3', 16), Register(self, 'ac0.h', 16),\n Register(self, 'ac1.h', 16), Register(self, 'config', 16),\n Register(self, 'sr', 16), Register(self, 'prod.l', 16),\n Register(self, 'prod.m1', 16), Register(self, 'prod.h', 16),\n Register(self, 'prod.m2', 16), Register(self, 'ax0.l', 16),\n Register(self, 'ax1.l', 16), Register(self, 'ax0.h', 16),\n Register(self, 'ax1.h', 16), Register(self, 'ac0.l', 16),\n Register(self, 'ac1.l', 16), Register(self, 'ac0.m', 16),\n Register(self, 'ac1.m', 16)]\n self.wr_registers = [Register(self, 'wr{}'.format(i), 16) for i in\n range(4)]\n self.addr_to_wr = {self.registers[0]: self.wr_registers[0], self.\n registers[1]: self.wr_registers[1], self.registers[2]: self.\n wr_registers[2], self.registers[3]: self.wr_registers[3]}\n self.addr_to_ix = {self.registers[0]: self.registers[4], self.\n registers[1]: self.registers[5], self.registers[2]: self.\n registers[6], self.registers[3]: self.registers[7]}\n self.long_accumulators = [Register(self, 'ac0', 40, [(regs[16], 32),\n (regs[30], 16), (regs[28], 0)]), Register(self, 'ac1', 40, [(\n regs[17], 32), (regs[31], 16), (regs[29], 0)])]\n self.short_accumulators = [Register(self, 'acs0', 24, [(regs[16], \n 16), (regs[30], 0)]), Register(self, 'acs1', 24, [(regs[17], 16\n ), (regs[31], 0)])]\n self.extra_acculumators = [Register(self, 'ax0', 32, [(regs[26], 16\n ), (regs[24], 0)]), Register(self, 'ax1', 32, [(regs[27], 16),\n (regs[25], 0)])]\n self.prod_register = Register(self, 'prod', 40, [(regs[23], 16), (\n regs[22], 32), (regs[21], 16), (regs[20], 0)])\n\n\nclass Register(decompil.ir.Register):\n\n def __init__(self, context, name, width, components=None):\n self.context = context\n self.type = context.create_int_type(width)\n self.name = name\n self.components = components\n self.registers = [reg for reg, _ in components] if components else None\n\n def build_load(self, builder):\n if self.components is None:\n return builder.build_rload(self)\n else:\n result = None\n for reg, shift in self.components:\n val = builder.build_zext(self.type, builder.build_rload(reg))\n if shift:\n val = builder.build_lshl(val, self.type.create(shift))\n if result:\n result = builder.build_add(result, val)\n else:\n result = val\n return result\n\n def build_store(self, builder, value):\n assert value.type == self.type\n if self.components is None:\n builder.build_rstore(self, value)\n else:\n for reg, shift in self.components:\n if shift:\n val = builder.build_lshl(value, value.type.create(shift))\n val = builder.build_trunc(reg.type, val)\n builder.build_rstore(reg, val)\n\n def build_load_comp(self, builder):\n return [builder.build_rload(reg) for reg, _ in self.components]\n\n def build_store_comp(self, builder, *values):\n assert len(values) == len(self.components)\n for value, (reg, _) in zip(values, self.components):\n builder.build_rstore(reg, value)\n\n def format(self):\n return [(Name.Variable, '${}'.format(self.name))]\n\n\nclass BaseDecoder:\n name = None\n opcode = None\n opcode_mask = None\n operands_format = None\n\n def decode(self, context, disassembler, builder):\n raise NotImplementedError()\n\n def decode_operands(self, context):\n return [op.extract(context, self) for op in self.operands_format]\n\n\nclass Instruction(BaseDecoder):\n have_extra_operand = False\n is_extended = False\n\n def __init__(self, address, opcode, extra_operand=None, extension=None):\n self.address = address\n self.opcode_value = opcode\n self.extension = extension\n assert self.is_extended == (extension is not None)\n assert self.have_extra_operand == (extra_operand is not None)\n self.extra_operand = extra_operand\n if self.extension:\n self.extension.instruction = self\n\n def __repr__(self):\n ext = ' ({})'.format(self.extension.name) if self.extension else ''\n return '{:04x}: {}{}'.format(self.address, self.name, ext)\n\n\nclass InstructionExtension(BaseDecoder):\n\n def __init__(self, opcode):\n self.opcode_value = opcode\n self.instruction = None\n\n def __repr__(self):\n return '{:04x}: {} (extension)'.format(self.address, self.name)\n\n\n<mask token>\n\n\ndef _init_tables():\n import gcdsp.decoders\n\n def helper(table, cls):\n for obj_name in dir(gcdsp.decoders):\n obj = getattr(gcdsp.decoders, obj_name)\n if not (inspect.isclass(obj) and issubclass(obj, cls) and obj !=\n cls):\n continue\n assert obj.opcode & ~obj.opcode_mask == 0\n table.append(obj)\n helper(instructions, Instruction)\n helper(instruction_extensions, InstructionExtension)\n\n\n<mask token>\n\n\ndef load_insns():\n import gcdsp.decoders\n\n def default_decoder(self, context, disassembler, builder):\n builder.build_undef()\n disassembler.stop_basic_block()\n\n def decode_operands(self, context):\n result = []\n for _, size, addend, rshift, mask in self.operands_format:\n operand = (self.opcode & mask) >> rshift\n result.append(self.opcode & mask + addend)\n return result\n Insn = collections.namedtuple('Insn',\n 'name opcode mask size unused0 operands is_extended unused1')\n for insn in gcdsp.decoders.opcodes:\n insn = Insn(*insn)\n insn_decoder = getattr(gcdsp.decoders, 'decode_{}'.format(insn.name\n .lower()), default_decoder)\n instructions.append(type(insn.name, (Instruction,), {'name': insn.\n name, 'opcode': insn.opcode, 'opcode_mask': insn.mask,\n 'have_extra_operand': insn.size == 2, 'is_extended': insn.\n is_extended, 'decode': insn_decoder, 'decode_operands':\n decode_operands, 'operands_format': insn.operands}))\n for ext in gcdsp.decoders.opcodes_ext:\n ext = Insn(*ext)\n instruction_extensions.append(type(ext.name, (InstructionExtension,\n ), {'name': ext.name, 'opcode': ext.opcode, 'opcode_mask': ext.\n mask, 'decode': insn_decoder, 'decode_operands':\n decode_operands, 'operands_format': insn.operands}))\n\n\n<mask token>\n\n\nclass Decoder(decompil.disassemblers.BaseDecoder):\n\n def __init__(self, fp):\n self.fp = fp\n\n def parse_insn(self, disassembler, builder, address):\n opcode = self.get_word(address)\n next_address = address + 1\n if opcode is None:\n return None\n insn_pat = self.lookup(opcode, instructions)\n if insn_pat.have_extra_operand:\n extra_operand = self.get_word(address + 1)\n next_address += 1\n if extra_operand is None:\n raise ValueError('Incomplete file')\n else:\n extra_operand = None\n if insn_pat.is_extended:\n ext_pat = self.lookup(opcode, instruction_extensions)\n ext = ext_pat(opcode)\n else:\n ext = None\n insn = insn_pat(address, opcode, extra_operand, ext)\n insn_image = '{}{}'.format(insn.name, \"'{}\".format(insn.extension.\n name) if insn.is_extended else '')\n builder.set_origin('At {:#04x}: {}'.format(address, insn_image))\n if insn.is_extended:\n insn.extension.decode(disassembler.context, disassembler, builder)\n if disassembler.must_stop_basic_block:\n return next_address\n insn.decode(disassembler.context, disassembler, builder)\n return next_address\n\n def iter_insns(self, address):\n while True:\n address, insn = self.parse_insn(address)\n if insn is None:\n break\n else:\n yield address, insn\n\n def get_word(self, address):\n self.fp.seek(2 * address)\n word = self.fp.read(2)\n if len(word) == 0:\n return None\n elif len(word) == 2:\n return struct.unpack('>H', word)[0]\n else:\n raise ValueError('Incomplete file')\n\n def lookup(self, opcode, pattern_set):\n for pat in pattern_set:\n if opcode & pat.opcode_mask == pat.opcode:\n return pat\n else:\n raise ValueError('Invalid opcode: {:04x}'.format(opcode))\n",
"step-5": "import collections\nimport inspect\nimport struct\n\nfrom pygments.token import *\n\nimport decompil.builder\nimport decompil.disassemblers\nimport decompil.ir\n\n\nclass Context(decompil.ir.Context):\n\n def __init__(self):\n super(Context, self).__init__(16)\n self.pointer_type = self.create_pointer_type(self.half_type)\n self.init_registers()\n\n def init_registers(self):\n self.registers = regs = [\n # 0x00-0x03\n Register(self, 'ar0', 16),\n Register(self, 'ar1', 16),\n Register(self, 'ar2', 16),\n Register(self, 'ar3', 16),\n\n # 0x04-0x07\n Register(self, 'ix0', 16),\n Register(self, 'ix1', 16),\n Register(self, 'ix2', 16),\n Register(self, 'ix3', 16),\n\n # 0x08-0xb\n Register(self, 'r08', 16),\n Register(self, 'r09', 16),\n Register(self, 'r0a', 16),\n Register(self, 'r0b', 16),\n\n # 0x0c-0x0f\n # TODO: something special?\n Register(self, 'st0', 16),\n Register(self, 'st1', 16),\n Register(self, 'st2', 16),\n Register(self, 'st3', 16),\n\n # 0x10-0x11\n # TODO: handle 8-bit overflow\n Register(self, 'ac0.h', 16),\n Register(self, 'ac1.h', 16),\n\n # 0x12-0x13\n Register(self, 'config', 16),\n Register(self, 'sr', 16),\n\n # 0x14-0x17\n Register(self, 'prod.l', 16),\n Register(self, 'prod.m1', 16),\n # TODO: handle 8-bit overflow\n Register(self, 'prod.h', 16),\n Register(self, 'prod.m2', 16),\n\n # 0x18-0x1b\n Register(self, 'ax0.l', 16),\n Register(self, 'ax1.l', 16),\n Register(self, 'ax0.h', 16),\n Register(self, 'ax1.h', 16),\n\n # 0x1c-0x1f\n Register(self, 'ac0.l', 16),\n Register(self, 'ac1.l', 16),\n Register(self, 'ac0.m', 16),\n Register(self, 'ac1.m', 16),\n ]\n\n self.wr_registers = [\n Register(self, 'wr{}'.format(i), 16) for i in range(4)\n ]\n\n self.addr_to_wr = {\n self.registers[0x00]: self.wr_registers[0x00],\n self.registers[0x01]: self.wr_registers[0x01],\n self.registers[0x02]: self.wr_registers[0x02],\n self.registers[0x03]: self.wr_registers[0x03],\n }\n self.addr_to_ix = {\n self.registers[0x00]: self.registers[0x04],\n self.registers[0x01]: self.registers[0x05],\n self.registers[0x02]: self.registers[0x06],\n self.registers[0x03]: self.registers[0x07],\n }\n\n self.long_accumulators = [\n Register(self, 'ac0', 40, [\n (regs[0x10], 32), (regs[0x1e], 16), (regs[0x1c], 0)\n ]),\n Register(self, 'ac1', 40, [\n (regs[0x11], 32), (regs[0x1f], 16), (regs[0x1d], 0)\n ]),\n ]\n self.short_accumulators = [\n Register(self, 'acs0', 24, [(regs[0x10], 16), (regs[0x1e], 0)]),\n Register(self, 'acs1', 24, [(regs[0x11], 16), (regs[0x1f], 0)]),\n ]\n self.extra_acculumators = [\n Register(self, 'ax0', 32, [(regs[0x1a], 16), (regs[0x18], 0)]),\n Register(self, 'ax1', 32, [(regs[0x1b], 16), (regs[0x19], 0)]),\n ]\n self.prod_register = Register(self, 'prod', 40, [\n (regs[0x17], 16),\n (regs[0x16], 32),\n (regs[0x15], 16),\n (regs[0x14], 0),\n ])\n\n\nclass Register(decompil.ir.Register):\n def __init__(self, context, name, width, components=None):\n self.context = context\n self.type = context.create_int_type(width)\n self.name = name\n self.components = components\n self.registers = (\n [reg for reg, _ in components]\n if components else\n None\n )\n\n def build_load(self, builder):\n if self.components is None:\n return builder.build_rload(self)\n else:\n result = None\n for reg, shift in self.components:\n val = builder.build_zext(\n self.type, builder.build_rload(reg)\n )\n if shift:\n val = builder.build_lshl(val, self.type.create(shift))\n\n if result:\n result = builder.build_add(result, val)\n else:\n result = val\n return result\n\n def build_store(self, builder, value):\n assert value.type == self.type\n if self.components is None:\n builder.build_rstore(self, value)\n else:\n for reg, shift in self.components:\n if shift:\n val = builder.build_lshl(value, value.type.create(shift))\n val = builder.build_trunc(reg.type, val)\n builder.build_rstore(reg, val)\n\n def build_load_comp(self, builder):\n return [\n builder.build_rload(reg)\n for reg, _ in self.components\n ]\n\n def build_store_comp(self, builder, *values):\n assert len(values) == len(self.components)\n for value, (reg, _) in zip(values, self.components):\n builder.build_rstore(reg, value)\n\n def format(self):\n return [(Name.Variable, '${}'.format(self.name))]\n\n\nclass BaseDecoder:\n name = None\n opcode = None\n opcode_mask = None\n operands_format = None\n\n def decode(self, context, disassembler, builder):\n raise NotImplementedError()\n\n def decode_operands(self, context):\n return [op.extract(context, self) for op in self.operands_format]\n\nclass Instruction(BaseDecoder):\n have_extra_operand = False\n is_extended = False\n\n def __init__(self, address, opcode, extra_operand=None, extension=None):\n self.address = address\n self.opcode_value = opcode\n self.extension = extension\n assert self.is_extended == (extension is not None)\n assert self.have_extra_operand == (extra_operand is not None)\n self.extra_operand = extra_operand\n if self.extension:\n self.extension.instruction = self\n\n def __repr__(self):\n ext = (\n ' ({})'.format(self.extension.name)\n if self.extension else\n ''\n )\n return '{:04x}: {}{}'.format(\n self.address, self.name, ext\n )\n\n\nclass InstructionExtension(BaseDecoder):\n def __init__(self, opcode):\n self.opcode_value = opcode\n # When accepting an extension, instructions should set the following\n # field:\n self.instruction = None\n\n def __repr__(self):\n return '{:04x}: {} (extension)'.format(\n self.address, self.name\n )\n\n\ninstructions = []\ninstruction_extensions = []\ndef _init_tables():\n import gcdsp.decoders\n\n def helper(table, cls):\n for obj_name in dir(gcdsp.decoders):\n obj = getattr(gcdsp.decoders, obj_name)\n if not (\n inspect.isclass(obj)\n and issubclass(obj, cls)\n and obj != cls\n ):\n continue\n assert (obj.opcode & ~obj.opcode_mask) == 0\n table.append(obj)\n\n helper(instructions, Instruction)\n helper(instruction_extensions, InstructionExtension)\n_init_tables()\n\n\ndef load_insns():\n import gcdsp.decoders\n\n def default_decoder(self, context, disassembler, builder):\n builder.build_undef()\n disassembler.stop_basic_block()\n\n def decode_operands(self, context):\n result = []\n for _, size, addend, rshift, mask in self.operands_format:\n operand = (self.opcode & mask) >> rshift\n result.append(self.opcode & mask + addend)\n return result\n\n Insn = collections.namedtuple(\n 'Insn', 'name opcode mask size unused0 operands is_extended unused1'\n )\n\n for insn in gcdsp.decoders.opcodes:\n insn = Insn(*insn)\n insn_decoder = getattr(\n gcdsp.decoders,\n 'decode_{}'.format(insn.name.lower()),\n default_decoder,\n )\n instructions.append(\n type(insn.name, (Instruction, ), {\n 'name': insn.name,\n 'opcode': insn.opcode,\n 'opcode_mask': insn.mask,\n 'have_extra_operand': insn.size == 2,\n 'is_extended': insn.is_extended,\n 'decode': insn_decoder,\n 'decode_operands': decode_operands,\n 'operands_format': insn.operands\n })\n )\n\n for ext in gcdsp.decoders.opcodes_ext:\n ext = Insn(*ext)\n instruction_extensions.append(\n type(ext.name, (InstructionExtension, ), {\n 'name': ext.name,\n 'opcode': ext.opcode,\n 'opcode_mask': ext.mask,\n 'decode': insn_decoder,\n 'decode_operands': decode_operands,\n 'operands_format': insn.operands\n })\n )\nload_insns()\n\n\nclass Decoder(decompil.disassemblers.BaseDecoder):\n\n def __init__(self, fp):\n self.fp = fp\n\n def parse_insn(self, disassembler, builder, address):\n\n opcode = self.get_word(address)\n next_address = address + 1\n if opcode is None:\n return None\n insn_pat = self.lookup(opcode, instructions)\n\n # Parse the extra operand, if any.\n if insn_pat.have_extra_operand:\n extra_operand = self.get_word(address + 1)\n next_address += 1\n if extra_operand is None:\n raise ValueError('Incomplete file')\n else:\n extra_operand = None\n\n # Parse the instruction extension, if any.\n if insn_pat.is_extended:\n ext_pat = self.lookup(opcode, instruction_extensions)\n ext = ext_pat(opcode)\n else:\n ext = None\n\n insn = insn_pat(address, opcode, extra_operand, ext)\n insn_image = '{}{}'.format(\n insn.name,\n \"'{}\".format(insn.extension.name) if insn.is_extended else ''\n )\n builder.set_origin('At {:#04x}: {}'.format(address, insn_image))\n\n # Always decode the extension first (if any).\n if insn.is_extended:\n insn.extension.decode(disassembler.context, disassembler, builder)\n # TODO: remove this once all extensions are supported.\n if disassembler.must_stop_basic_block:\n return next_address\n insn.decode(disassembler.context, disassembler, builder)\n\n return next_address\n\n def iter_insns(self, address):\n while True:\n address, insn = self.parse_insn(address)\n if insn is None:\n break\n else:\n yield address, insn\n\n def get_word(self, address):\n self.fp.seek(2 * address)\n word = self.fp.read(2)\n if len(word) == 0:\n return None\n elif len(word) == 2:\n return struct.unpack('>H', word)[0]\n else:\n raise ValueError('Incomplete file')\n\n def lookup(self, opcode, pattern_set):\n for pat in pattern_set:\n if opcode & pat.opcode_mask == pat.opcode:\n return pat\n else:\n raise ValueError('Invalid opcode: {:04x}'.format(opcode))\n",
"step-ids": [
18,
24,
25,
29,
33
]
}
|
[
18,
24,
25,
29,
33
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def reverse(string):
if len(string) == 0:
return
temp = string[0]
reverse(string[1:])
print(temp, end='')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def reverse(string):
if len(string) == 0:
return
temp = string[0]
reverse(string[1:])
print(temp, end='')
<|reserved_special_token_0|>
reverse(string)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def reverse(string):
if len(string) == 0:
return
temp = string[0]
reverse(string[1:])
print(temp, end='')
string = input()
reverse(string)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 24 20:59:36 2021
@author: Abeg
"""
#factorial using recursion
"""def factorial(n):
if n==0 or n==1:
return 1
elif n==2:
return n
else:
return n*factorial(n-1)
n=int(input("enter the no"))
print(factorial(n))"""
#fibonancci using recursion
"""def fiborecursively(n):
if n<=1:
return n
else:
return(fiborecursively(n-1) + fiborecursively(n-2))
for i in range(0,10):
print(fiborecursively(i))"""
#reverse a string with recursion
def reverse(string):
if len(string) == 0:
return
temp = string[0]
reverse(string[1:])
print(temp,end="")
string=(input())
reverse(string)
|
flexible
|
{
"blob_id": "d1ee33ce6fb071aae800b0597a09e7039a209ec8",
"index": 2574,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef reverse(string):\n if len(string) == 0:\n return\n temp = string[0]\n reverse(string[1:])\n print(temp, end='')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef reverse(string):\n if len(string) == 0:\n return\n temp = string[0]\n reverse(string[1:])\n print(temp, end='')\n\n\n<mask token>\nreverse(string)\n",
"step-4": "<mask token>\n\n\ndef reverse(string):\n if len(string) == 0:\n return\n temp = string[0]\n reverse(string[1:])\n print(temp, end='')\n\n\nstring = input()\nreverse(string)\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 24 20:59:36 2021\r\n\r\n@author: Abeg\r\n\"\"\"\r\n#factorial using recursion\r\n\"\"\"def factorial(n):\r\n if n==0 or n==1:\r\n return 1\r\n elif n==2:\r\n return n \r\n else:\r\n return n*factorial(n-1)\r\nn=int(input(\"enter the no\"))\r\nprint(factorial(n))\"\"\"\r\n#fibonancci using recursion\r\n\"\"\"def fiborecursively(n):\r\n if n<=1:\r\n return n\r\n else:\r\n return(fiborecursively(n-1) + fiborecursively(n-2))\r\nfor i in range(0,10):\r\n print(fiborecursively(i))\"\"\"\r\n \r\n#reverse a string with recursion\r\ndef reverse(string): \r\n if len(string) == 0: \r\n return\r\n temp = string[0] \r\n reverse(string[1:]) \r\n print(temp,end=\"\")\r\nstring=(input())\r\nreverse(string)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# data={
# "name":"Alby",
# "age":23
# }
# print (data['age'])
# def foo():
# print("Hellow world")
# return 1
# print (foo())
a="aa"
def add():
print(a)
add()
|
normal
|
{
"blob_id": "97857c1c5468a96187d44abc23ffaaf2a7ead1a6",
"index": 1869,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef add():\n print(a)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef add():\n print(a)\n\n\nadd()\n",
"step-4": "a = 'aa'\n\n\ndef add():\n print(a)\n\n\nadd()\n",
"step-5": "# data={\n# \"name\":\"Alby\",\n# \"age\":23\n# }\n\n\n# print (data['age'])\n\n# def foo():\n# print(\"Hellow world\")\n# return 1\n\n# print (foo())\na=\"aa\"\n\ndef add():\n \n print(a)\n\n\nadd()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import random
def main():
#print('You rolled a die')
return random.randint(1,6)
if __name__== "__main__":
main()
|
normal
|
{
"blob_id": "6d92b944ab8503d3635626c0c23021fc2b40dce3",
"index": 5732,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n return random.randint(1, 6)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n return random.randint(1, 6)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import random\n\n\ndef main():\n return random.randint(1, 6)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import random\n\ndef main():\n #print('You rolled a die')\n return random.randint(1,6)\n\nif __name__== \"__main__\":\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django import forms
from django.contrib.auth.forms import UserCreationForm
from .models import AuthUser
class SignUpForm(forms.Form):
username = forms.CharField(widget=forms.TextInput(attrs={'class':
'form-control'}))
email = forms.EmailField(widget=forms.EmailInput(attrs={'class':
'form-control'}))
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':
'form-control'}))
password2 = forms.CharField(widget=forms.PasswordInput(attrs={'class':
'form-control'}))
first_name = forms.CharField(widget=forms.TextInput(attrs={'class':
'form-control'}))
last_name = forms.CharField(widget=forms.TextInput(attrs={'class':
'form-control'}))
class Meta:
model = AuthUser
fields = ('username', 'email', 'password1', 'password2',
'first_name', 'last_name')
class LoginForm(forms.Form):
username = forms.CharField(widget=forms.TextInput(attrs={'class':
'form-control'}))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class':
'form-control'}))
|
normal
|
{
"blob_id": "7644dcd956e1ad179f42e44870864386744c6cdf",
"index": 2553,
"step-1": "<mask token>\n\n\nclass LoginForm(forms.Form):\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'form-control'}))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'form-control'}))\n",
"step-2": "<mask token>\n\n\nclass SignUpForm(forms.Form):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = AuthUser\n fields = ('username', 'email', 'password1', 'password2',\n 'first_name', 'last_name')\n\n\nclass LoginForm(forms.Form):\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'form-control'}))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'form-control'}))\n",
"step-3": "<mask token>\n\n\nclass SignUpForm(forms.Form):\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'form-control'}))\n email = forms.EmailField(widget=forms.EmailInput(attrs={'class':\n 'form-control'}))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'form-control'}))\n password2 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'form-control'}))\n first_name = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'form-control'}))\n last_name = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'form-control'}))\n\n\n class Meta:\n model = AuthUser\n fields = ('username', 'email', 'password1', 'password2',\n 'first_name', 'last_name')\n\n\nclass LoginForm(forms.Form):\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'form-control'}))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'form-control'}))\n",
"step-4": "from django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom .models import AuthUser\n\n\nclass SignUpForm(forms.Form):\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'form-control'}))\n email = forms.EmailField(widget=forms.EmailInput(attrs={'class':\n 'form-control'}))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'form-control'}))\n password2 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'form-control'}))\n first_name = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'form-control'}))\n last_name = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'form-control'}))\n\n\n class Meta:\n model = AuthUser\n fields = ('username', 'email', 'password1', 'password2',\n 'first_name', 'last_name')\n\n\nclass LoginForm(forms.Form):\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'form-control'}))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'form-control'}))\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
a = [1, 1, 2, 3, 4, 4, 5, 7, 12, 30, 49]
for i in range(0, len(a)):
if a[i] < 5:
print(str(a[i]) + " ")
i += 1
else:
i += 1
|
normal
|
{
"blob_id": "24635989ccdb0f35f1e618dd8dc07f2cf84faddb",
"index": 6621,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(0, len(a)):\n if a[i] < 5:\n print(str(a[i]) + ' ')\n i += 1\n else:\n i += 1\n",
"step-3": "a = [1, 1, 2, 3, 4, 4, 5, 7, 12, 30, 49]\nfor i in range(0, len(a)):\n if a[i] < 5:\n print(str(a[i]) + ' ')\n i += 1\n else:\n i += 1\n",
"step-4": "a = [1, 1, 2, 3, 4, 4, 5, 7, 12, 30, 49]\n\nfor i in range(0, len(a)):\n if a[i] < 5:\n print(str(a[i]) + \" \")\n i += 1\n else:\n i += 1\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def pre_proc(INP_DIR):
INP_DIR = INP_DIR + '/'
NONE_DIR = os.path.dirname(INP_DIR) + '_none'
SQUARE_DIR = os.path.dirname(INP_DIR) + '_square'
CROP_DIR = os.path.dirname(INP_DIR) + '_cropped'
os.makedirs(NONE_DIR, exist_ok=True)
os.makedirs(SQUARE_DIR, exist_ok=True)
os.makedirs(CROP_DIR, exist_ok=True)
dir = os.listdir(INP_DIR)
dir.sort()
for fi in dir:
print(fi)
inp_path = os.path.join(INP_DIR, fi)
img = Image.open(inp_path)
if img.format == 'GIF':
shutil.move(inp_path, os.path.join(NONE_DIR, fi))
continue
width, height = img.size
if width == height:
shutil.copyfile(inp_path, os.path.join(SQUARE_DIR, fi))
continue
img = cv2.imread(inp_path)
if width > height:
img = img[:, width // 2 - height // 2:width // 2 + height // 2]
else:
img = img[height // 2 - width // 2:height // 2 + width // 2, :]
cv2.imwrite(os.path.join(CROP_DIR, fi), img)
return NONE_DIR, SQUARE_DIR, CROP_DIR
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def pre_proc(INP_DIR):
INP_DIR = INP_DIR + '/'
NONE_DIR = os.path.dirname(INP_DIR) + '_none'
SQUARE_DIR = os.path.dirname(INP_DIR) + '_square'
CROP_DIR = os.path.dirname(INP_DIR) + '_cropped'
os.makedirs(NONE_DIR, exist_ok=True)
os.makedirs(SQUARE_DIR, exist_ok=True)
os.makedirs(CROP_DIR, exist_ok=True)
dir = os.listdir(INP_DIR)
dir.sort()
for fi in dir:
print(fi)
inp_path = os.path.join(INP_DIR, fi)
img = Image.open(inp_path)
if img.format == 'GIF':
shutil.move(inp_path, os.path.join(NONE_DIR, fi))
continue
width, height = img.size
if width == height:
shutil.copyfile(inp_path, os.path.join(SQUARE_DIR, fi))
continue
img = cv2.imread(inp_path)
if width > height:
img = img[:, width // 2 - height // 2:width // 2 + height // 2]
else:
img = img[height // 2 - width // 2:height // 2 + width // 2, :]
cv2.imwrite(os.path.join(CROP_DIR, fi), img)
return NONE_DIR, SQUARE_DIR, CROP_DIR
if __name__ == '__main__':
pre_proc(INP_DIR)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
INP_DIR = '/dataset/test_set_A_full'
def pre_proc(INP_DIR):
INP_DIR = INP_DIR + '/'
NONE_DIR = os.path.dirname(INP_DIR) + '_none'
SQUARE_DIR = os.path.dirname(INP_DIR) + '_square'
CROP_DIR = os.path.dirname(INP_DIR) + '_cropped'
os.makedirs(NONE_DIR, exist_ok=True)
os.makedirs(SQUARE_DIR, exist_ok=True)
os.makedirs(CROP_DIR, exist_ok=True)
dir = os.listdir(INP_DIR)
dir.sort()
for fi in dir:
print(fi)
inp_path = os.path.join(INP_DIR, fi)
img = Image.open(inp_path)
if img.format == 'GIF':
shutil.move(inp_path, os.path.join(NONE_DIR, fi))
continue
width, height = img.size
if width == height:
shutil.copyfile(inp_path, os.path.join(SQUARE_DIR, fi))
continue
img = cv2.imread(inp_path)
if width > height:
img = img[:, width // 2 - height // 2:width // 2 + height // 2]
else:
img = img[height // 2 - width // 2:height // 2 + width // 2, :]
cv2.imwrite(os.path.join(CROP_DIR, fi), img)
return NONE_DIR, SQUARE_DIR, CROP_DIR
if __name__ == '__main__':
pre_proc(INP_DIR)
<|reserved_special_token_1|>
import os, shutil, cv2
from PIL import Image
INP_DIR = '/dataset/test_set_A_full'
def pre_proc(INP_DIR):
INP_DIR = INP_DIR + '/'
NONE_DIR = os.path.dirname(INP_DIR) + '_none'
SQUARE_DIR = os.path.dirname(INP_DIR) + '_square'
CROP_DIR = os.path.dirname(INP_DIR) + '_cropped'
os.makedirs(NONE_DIR, exist_ok=True)
os.makedirs(SQUARE_DIR, exist_ok=True)
os.makedirs(CROP_DIR, exist_ok=True)
dir = os.listdir(INP_DIR)
dir.sort()
for fi in dir:
print(fi)
inp_path = os.path.join(INP_DIR, fi)
img = Image.open(inp_path)
if img.format == 'GIF':
shutil.move(inp_path, os.path.join(NONE_DIR, fi))
continue
width, height = img.size
if width == height:
shutil.copyfile(inp_path, os.path.join(SQUARE_DIR, fi))
continue
img = cv2.imread(inp_path)
if width > height:
img = img[:, width // 2 - height // 2:width // 2 + height // 2]
else:
img = img[height // 2 - width // 2:height // 2 + width // 2, :]
cv2.imwrite(os.path.join(CROP_DIR, fi), img)
return NONE_DIR, SQUARE_DIR, CROP_DIR
if __name__ == '__main__':
pre_proc(INP_DIR)
<|reserved_special_token_1|>
import os, shutil, cv2
from PIL import Image
INP_DIR = '/dataset/test_set_A_full'
# Lọc thư mục data test ra thành 3 thư mục: None, Square (1:1), và phần còn lại (đã được crop ngay chính giữa)
# Trả về path dẫn đến 3 thư mục nói trên
def pre_proc(INP_DIR):
INP_DIR = INP_DIR + '/'
NONE_DIR = os.path.dirname(INP_DIR) + '_none'
SQUARE_DIR = os.path.dirname(INP_DIR) + '_square'
CROP_DIR = os.path.dirname(INP_DIR) + '_cropped'
os.makedirs(NONE_DIR, exist_ok=True)
os.makedirs(SQUARE_DIR, exist_ok=True)
os.makedirs(CROP_DIR, exist_ok=True)
dir = os.listdir(INP_DIR)
dir.sort()
for fi in dir:
print(fi)
inp_path = os.path.join(INP_DIR, fi)
img = Image.open(inp_path)
if img.format == 'GIF':
shutil.move(inp_path, os.path.join(NONE_DIR, fi))
continue
width, height = img.size
if width == height:
shutil.copyfile(inp_path, os.path.join(SQUARE_DIR, fi))
continue
img = cv2.imread(inp_path)
if width > height:
img = img[:, width//2-height//2:width//2+height//2]
else:
img = img[height//2-width//2:height//2+width//2, :]
cv2.imwrite(os.path.join(CROP_DIR, fi), img)
return NONE_DIR, SQUARE_DIR, CROP_DIR
if __name__ == '__main__':
pre_proc(INP_DIR)
|
flexible
|
{
"blob_id": "4ad4cf46be735c6ac26b5b0953d4c2458f37496a",
"index": 9372,
"step-1": "<mask token>\n\n\ndef pre_proc(INP_DIR):\n INP_DIR = INP_DIR + '/'\n NONE_DIR = os.path.dirname(INP_DIR) + '_none'\n SQUARE_DIR = os.path.dirname(INP_DIR) + '_square'\n CROP_DIR = os.path.dirname(INP_DIR) + '_cropped'\n os.makedirs(NONE_DIR, exist_ok=True)\n os.makedirs(SQUARE_DIR, exist_ok=True)\n os.makedirs(CROP_DIR, exist_ok=True)\n dir = os.listdir(INP_DIR)\n dir.sort()\n for fi in dir:\n print(fi)\n inp_path = os.path.join(INP_DIR, fi)\n img = Image.open(inp_path)\n if img.format == 'GIF':\n shutil.move(inp_path, os.path.join(NONE_DIR, fi))\n continue\n width, height = img.size\n if width == height:\n shutil.copyfile(inp_path, os.path.join(SQUARE_DIR, fi))\n continue\n img = cv2.imread(inp_path)\n if width > height:\n img = img[:, width // 2 - height // 2:width // 2 + height // 2]\n else:\n img = img[height // 2 - width // 2:height // 2 + width // 2, :]\n cv2.imwrite(os.path.join(CROP_DIR, fi), img)\n return NONE_DIR, SQUARE_DIR, CROP_DIR\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef pre_proc(INP_DIR):\n INP_DIR = INP_DIR + '/'\n NONE_DIR = os.path.dirname(INP_DIR) + '_none'\n SQUARE_DIR = os.path.dirname(INP_DIR) + '_square'\n CROP_DIR = os.path.dirname(INP_DIR) + '_cropped'\n os.makedirs(NONE_DIR, exist_ok=True)\n os.makedirs(SQUARE_DIR, exist_ok=True)\n os.makedirs(CROP_DIR, exist_ok=True)\n dir = os.listdir(INP_DIR)\n dir.sort()\n for fi in dir:\n print(fi)\n inp_path = os.path.join(INP_DIR, fi)\n img = Image.open(inp_path)\n if img.format == 'GIF':\n shutil.move(inp_path, os.path.join(NONE_DIR, fi))\n continue\n width, height = img.size\n if width == height:\n shutil.copyfile(inp_path, os.path.join(SQUARE_DIR, fi))\n continue\n img = cv2.imread(inp_path)\n if width > height:\n img = img[:, width // 2 - height // 2:width // 2 + height // 2]\n else:\n img = img[height // 2 - width // 2:height // 2 + width // 2, :]\n cv2.imwrite(os.path.join(CROP_DIR, fi), img)\n return NONE_DIR, SQUARE_DIR, CROP_DIR\n\n\nif __name__ == '__main__':\n pre_proc(INP_DIR)\n",
"step-3": "<mask token>\nINP_DIR = '/dataset/test_set_A_full'\n\n\ndef pre_proc(INP_DIR):\n INP_DIR = INP_DIR + '/'\n NONE_DIR = os.path.dirname(INP_DIR) + '_none'\n SQUARE_DIR = os.path.dirname(INP_DIR) + '_square'\n CROP_DIR = os.path.dirname(INP_DIR) + '_cropped'\n os.makedirs(NONE_DIR, exist_ok=True)\n os.makedirs(SQUARE_DIR, exist_ok=True)\n os.makedirs(CROP_DIR, exist_ok=True)\n dir = os.listdir(INP_DIR)\n dir.sort()\n for fi in dir:\n print(fi)\n inp_path = os.path.join(INP_DIR, fi)\n img = Image.open(inp_path)\n if img.format == 'GIF':\n shutil.move(inp_path, os.path.join(NONE_DIR, fi))\n continue\n width, height = img.size\n if width == height:\n shutil.copyfile(inp_path, os.path.join(SQUARE_DIR, fi))\n continue\n img = cv2.imread(inp_path)\n if width > height:\n img = img[:, width // 2 - height // 2:width // 2 + height // 2]\n else:\n img = img[height // 2 - width // 2:height // 2 + width // 2, :]\n cv2.imwrite(os.path.join(CROP_DIR, fi), img)\n return NONE_DIR, SQUARE_DIR, CROP_DIR\n\n\nif __name__ == '__main__':\n pre_proc(INP_DIR)\n",
"step-4": "import os, shutil, cv2\nfrom PIL import Image\nINP_DIR = '/dataset/test_set_A_full'\n\n\ndef pre_proc(INP_DIR):\n INP_DIR = INP_DIR + '/'\n NONE_DIR = os.path.dirname(INP_DIR) + '_none'\n SQUARE_DIR = os.path.dirname(INP_DIR) + '_square'\n CROP_DIR = os.path.dirname(INP_DIR) + '_cropped'\n os.makedirs(NONE_DIR, exist_ok=True)\n os.makedirs(SQUARE_DIR, exist_ok=True)\n os.makedirs(CROP_DIR, exist_ok=True)\n dir = os.listdir(INP_DIR)\n dir.sort()\n for fi in dir:\n print(fi)\n inp_path = os.path.join(INP_DIR, fi)\n img = Image.open(inp_path)\n if img.format == 'GIF':\n shutil.move(inp_path, os.path.join(NONE_DIR, fi))\n continue\n width, height = img.size\n if width == height:\n shutil.copyfile(inp_path, os.path.join(SQUARE_DIR, fi))\n continue\n img = cv2.imread(inp_path)\n if width > height:\n img = img[:, width // 2 - height // 2:width // 2 + height // 2]\n else:\n img = img[height // 2 - width // 2:height // 2 + width // 2, :]\n cv2.imwrite(os.path.join(CROP_DIR, fi), img)\n return NONE_DIR, SQUARE_DIR, CROP_DIR\n\n\nif __name__ == '__main__':\n pre_proc(INP_DIR)\n",
"step-5": "import os, shutil, cv2\nfrom PIL import Image\n\nINP_DIR = '/dataset/test_set_A_full'\n\n\n# Lọc thư mục data test ra thành 3 thư mục: None, Square (1:1), và phần còn lại (đã được crop ngay chính giữa)\n# Trả về path dẫn đến 3 thư mục nói trên\ndef pre_proc(INP_DIR):\n INP_DIR = INP_DIR + '/'\n NONE_DIR = os.path.dirname(INP_DIR) + '_none'\n SQUARE_DIR = os.path.dirname(INP_DIR) + '_square'\n CROP_DIR = os.path.dirname(INP_DIR) + '_cropped'\n\n os.makedirs(NONE_DIR, exist_ok=True)\n os.makedirs(SQUARE_DIR, exist_ok=True)\n os.makedirs(CROP_DIR, exist_ok=True)\n\n dir = os.listdir(INP_DIR)\n dir.sort()\n\n for fi in dir:\n print(fi)\n inp_path = os.path.join(INP_DIR, fi)\n \n img = Image.open(inp_path)\n if img.format == 'GIF': \n shutil.move(inp_path, os.path.join(NONE_DIR, fi))\n continue\n width, height = img.size\n if width == height:\n shutil.copyfile(inp_path, os.path.join(SQUARE_DIR, fi))\n continue\n \n img = cv2.imread(inp_path)\n if width > height:\n img = img[:, width//2-height//2:width//2+height//2]\n else:\n img = img[height//2-width//2:height//2+width//2, :]\n \n cv2.imwrite(os.path.join(CROP_DIR, fi), img)\n \n return NONE_DIR, SQUARE_DIR, CROP_DIR\n \nif __name__ == '__main__':\n pre_proc(INP_DIR)\n ",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Python 2.7 Doritobot Vision System
# EECS 498 Purple Team, 2014
# Written by Cody Hyman (hymanc@umich.edu)
# Written against OpenCV 3.0.0-alpha
import sys
import os
import cv2
import numpy as np
from uvcinterface import UVCInterface as uvc
from visionUtil import VisionUtil as vu
from collections import deque
from math import *
# Calibration state 'Enumeration'
class CalState(object):
UNCAL = 1
CAL_PROG = 2
CALIBRATED = 3
### Vision System Class ###
class VisionSystem(object):
# Window names
CAM_FEED_NAME = 'Camera Feed'
CAL_NAME = 'Calibrated Image'
PROC_NAME = 'Vision Processing'
CTL_NAME = 'Filter Controls'
# Constants
G_CENTER = 52
R_CENTER = 0
SMIN = 50
VMIN = 80
#HISTORY_LENGTH = 15
EMPTY_KERNEL = [0, 0, 0, 0, 0, 0, 0]
RAW_KERNEL = np.array([1, 2, 3, 6, 10, 18, 20], dtype = np.float32)
FIR_KERNEL = np.multiply(RAW_KERNEL,1/np.linalg.norm(RAW_KERNEL,1)) # Normalized kernel
def __init__(self, camera):
### Instance Value initialization ###
self.camera = camera
self.calstate = CalState.UNCAL
self.calpts = []
self.XSIZE = 1000
self.YSIZE = 1000
self.x_est = -1
self.y_est = -1
self.theta_est = -1
# Drawing storage
self.waypointEst = [(300,300)] # Waypoint estimates for UI
self.tagLoc = (10,10) # Tag location estimate
self.fVectorStart = (0,0)
self.fVectorEnd = (0,0)
#self.worldpts = np.float32([
# [0,self.YSIZE/2],
# [0,0],
# [self.XSIZE,0],
# [self.XSIZE,self.YSIZE/2]
# ])
# ===== ***** Calibration points from world *****===== #
'''self.worldpts = np.float32([
[-5, -1. * -105], #22
[90, -1. * -100], #27
[90, -1. * 110], #26
[0, -1. * 107] #25
])#*self.IMG_SCALE + self.IMG_OFFSET'''
# Swap x-y coordinates (WTF!)
'''self.worldpts = np.float32([
[-105,-5], #22
[-100, 90], #27
[110, 90], #26
[107, 0] #25
])#*self.IMG_SCALE + self.IMG_OFFSET'''
self.worldpts = np.float32([
[-104,-2], #22
[-104,85], #27
[115,84], #26
[115,3] #25
])
self.worldpts = vu.toImageCoordinates(self.worldpts)
testPts = vu.toWaypointCoordinates(self.worldpts)
print 'TestWorldPts', str(testPts)
# ===== *************** ===== #
### Camera initialization ###
print 'Opening Camera ' + str(camera)
self.vidcap = cv2.VideoCapture(camera)# Open up specified camera
# Check if camera is opened and exit if not
if self.vidcap.isOpened():
print 'Camera ' + str(camera) + ' opened successfully'
else:
print 'ERROR: Camera ' + str(camera) + ' not opened'
return False
# Set camera autoexposure
uvc.set(self.camera, uvc.EXPOSURE_AUTO, 1)
uvc.set(self.camera, uvc.EXPOSURE_AUTO_PRIORITY, 0)
### Initialize UI elements ###
# Filter Controls Window
ctlWindow = cv2.namedWindow(self.CTL_NAME)
cv2.createTrackbar('Blue', self.CTL_NAME, 88, 180, self.trackbarChangeHandler)
cv2.createTrackbar('Green', self.CTL_NAME, 41, 180, self.trackbarChangeHandler)
cv2.createTrackbar('Red', self.CTL_NAME, 172, 180, self.trackbarChangeHandler)
cv2.createTrackbar('B Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler)
cv2.createTrackbar('G Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler)
cv2.createTrackbar('R Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler)
cv2.createTrackbar('Sat Cutoff', self.CTL_NAME, 100, 255, self.trackbarChangeHandler)
cv2.createTrackbar('Show Background', self.CTL_NAME, 1, 1, self.trackbarChangeHandler)
# Camera input window
camWindow = cv2.namedWindow(self.CAM_FEED_NAME)
cv2.createTrackbar('Gain', self.CAM_FEED_NAME, 128, 255, self.gainChanged)
cv2.createTrackbar('Exposure', self.CAM_FEED_NAME, 1600, 2000, self.exposureChanged)
cv2.createTrackbar('Saturation', self.CAM_FEED_NAME, 128, 255, self.saturationChanged)
cv2.setMouseCallback(self.CAM_FEED_NAME, self.mouseClickHandler) # Set mouse callbacks for calibration
# Rectified/Calibrated Image window
#calWindow = cv2.namedWindow(self.CAL_NAME)
#cv2.setMouseCallback(self.CAL_NAME, self.colorClickHandler)
# Image processing Window 2
procWindow = cv2.namedWindow(self.PROC_NAME)
# History for filter bank
self.xHistory = deque(self.EMPTY_KERNEL)
self.yHistory = deque(self.EMPTY_KERNEL)
self.thetaHistory = deque(self.EMPTY_KERNEL)
# Run vision on a frame
def processFrame(self):
### Main processing loop ###
#while(True):
frameRet, self.camImg = self.vidcap.read()
#Img = self.drawCalMarkers()
cv2.imshow(self.CAM_FEED_NAME, self.drawCalMarkers())
if(self.calstate == CalState.CALIBRATED):
self.remapImage() # Apply perspective warp
bl = cv2.getTrackbarPos('Blue', self.CTL_NAME)
gr = cv2.getTrackbarPos('Green', self.CTL_NAME)
rd = cv2.getTrackbarPos('Red', self.CTL_NAME)
bvmin = cv2.getTrackbarPos('B Cutoff', self.CTL_NAME)
gvmin = cv2.getTrackbarPos('G Cutoff', self.CTL_NAME)
rvmin = cv2.getTrackbarPos('R Cutoff', self.CTL_NAME)
smin = cv2.getTrackbarPos('Sat Cutoff', self.CTL_NAME)
bgroundFlag = cv2.getTrackbarPos('Show Background', self.CTL_NAME)
bCentroid, self.bTagImg = self.findMarker(self.warpImg, bl, 10, smin, bvmin)
gCentroid, self.gTagImg = self.findMarker(self.warpImg, gr, 10, smin, gvmin)
rCentroid, self.rTagImg = self.findMarker(self.warpImg, rd, 10, smin, rvmin)
#vu.printCentroids(gCentroid, rCentroid)
if(bgroundFlag):
self.rgbImg = vu.comboImage(self.bTagImg, self.gTagImg, self.rTagImg, self.warpImg)
else:
self.rgbImg = vu.comboImage(self.bTagImg, self.gTagImg, self.rTagImg)
ctr, theta, bCtr, gCtr, rCtr = vu.localizeRobot(bCentroid, gCentroid, rCentroid)
if((ctr != None) and (theta != None)):
fctr, ftheta = self.filterPoints(ctr, theta)
self.x_est = ctr[0]
self.y_est = ctr[1]
# print 'Theta IN:', theta
self.theta_est = theta#ftheta
self.tagLoc = vu.computeTagLocation(ctr, bCtr) # Compute tag location
vu.drawSquareMarker(self.rgbImg, int(fctr[0]), int(fctr[1]), 5, (255,0,255))
if(gCentroid != None):
vu.drawSquareMarker(self.rgbImg, int(gCentroid[0]), int(gCentroid[1]), 5, (0,0,255))
if(rCentroid != None):
vu.drawSquareMarker(self.rgbImg, int(rCentroid[0]), int(rCentroid[1]), 5, (255,0,0))
if(bCentroid != None):
vu.drawSquareMarker(self.rgbImg, int(bCentroid[0]), int(bCentroid[1]), 5, (255,255,0))
wpIndex = 0
for wp in self.waypointEst:
wpIndex = wpIndex + 1
if(wpIndex == 1):
wpcolor = (0,0,255)
else:
wpcolor = (0,255,255)
vu.drawFilledCircleMarker(self.rgbImg, wp[0], wp[1], 10, wpcolor) #
vu.drawTextIndex(self.rgbImg, wp[0], wp[1], str(wpIndex)) # Draw waypoint index
if(self.tagLoc[0] != None):
vu.drawFilledCircleMarker(self.rgbImg, self.tagLoc[0], self.tagLoc[1], 5, (0,0,160))
#vu.drawVector(self.rgbImg, self.fVectorStart, self.fVectorEnd, (255,128,255))
#cv2.imshow(self.CAL_NAME, self.warpImg)
cv2.imshow(self.PROC_NAME, self.rgbImg)
#if cv2.waitKey(20) & 0xFF == ord('q'):
# break
# Use current perspective transform to remap image
def remapImage(self):
if(self.calstate == CalState.CALIBRATED):
self.warpImg = cv2.warpPerspective(self.camImg, self.warp,(int(300*vu.IMG_SCALE),int(300*vu.IMG_SCALE)))
self.warpImg = cv2.GaussianBlur(self.warpImg, (9,9), 1)
self.warpImg = cv2.medianBlur(self.warpImg, 5)
else:
print 'Transform not calibrated'
# Draws calibration markers on the camera image
def drawCalMarkers(self):
markedImg = self.camImg.copy()
for pt in self.calpts:
vu.drawSquareMarker(markedImg, pt[0], pt[1], 5, (255,0,255))
return markedImg
# Finds a marker's central moment
def findMarker(self, image, hueCenter, hueWidth, satMin, valMin):
hsvImg = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
markerImg = cv2.inRange(hsvImg, np.array([hueCenter-hueWidth/2, satMin, valMin]), np.array([hueCenter+hueWidth/2, 255, 255]))
cleanElement = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
markerImg = cv2.erode(markerImg, cleanElement) # Clean up marker image w/ erode-dilate-median
markerImg = cv2.dilate(markerImg, cleanElement)
markerImg = cv2.medianBlur(markerImg, 3)
mMoments = cv2.moments(markerImg) # Compute moments
m00 = mMoments['m00']
if(m00 > 0.1):
return (mMoments['m10']/m00, mMoments['m01']/m00), markerImg
return None, markerImg
# FIR on centers and angles
def filterPoints(self, ctr, theta):
if((ctr != None) and (theta != None)):
if(len(self.xHistory) == len(self.FIR_KERNEL)):
self.xHistory.popleft()
if(len(self.yHistory) == len(self.FIR_KERNEL)):
self.yHistory.popleft()
if(len(self.thetaHistory) == len(self.FIR_KERNEL)):
self.thetaHistory.popleft()
self.xHistory.append(ctr[0])
self.yHistory.append(ctr[1])
self.thetaHistory.append(theta)
xFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.xHistory)),1)
yFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.yHistory)),1)
thetaFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.thetaHistory)),1)
#print 'Filtered Phi:', phiFilter, ' Raw Theta:', theta
return (xFilter, yFilter), thetaFilter
# Interface to get current state estimates
def getState(self):
# Give estimated [x,y,theta]
if(self.tagLoc != None):
tx = self.tagLoc[0]
ty = self.tagLoc[1]
else:
tx = None
ty = None
return [self.x_est, self.y_est, self.theta_est, tx, ty]
### Event Handlers ###
# Camera input mouseclick handler
def mouseClickHandler(self, event, x, y, flags, param):
if event == cv2.EVENT_RBUTTONDOWN:
print 'Recalibration requested'
self.calstate = CalState.CAL_PROG
self.calpts = [] # Reset calibration points
if event == cv2.EVENT_LBUTTONDOWN:
print 'Mouse left click event at ' + str(x) + ',' + str(y)
if(self.calstate == CalState.UNCAL):
self.calstate = CalState.CAL_PROG
print 'Adding calibration point at (' + str(x) + ',' + str(y) + ')'
self.calpts.append([x,y])
elif(self.calstate == CalState.CAL_PROG):
if(len(self.calpts) < 4):
print 'Adding calibration point at (' + str(x) + ',' + str(y) + ')'
self.calpts.append([x,y])
# Finish
if(len(self.calpts) == 4):
print 'Calibrated'
self.warp = cv2.getPerspectiveTransform(np.float32(self.calpts), self.worldpts)
print str(self.calpts)
self.calstate = CalState.CALIBRATED
elif(self.calstate == CalState.CALIBRATED):
print 'Already calibrated'
# Color click handler for cal window
def colorClickHandler(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
print 'Checking marker 1 color at ', str(x), ',', str(y)
pass # Get color at point
if event == cv2.EVENT_RBUTTONDOWN:
print 'Checking marker 2 color at ', str(x), ',', str(y)
pass # Get color at point
# Generic do-nothing slider handler (for )
def trackbarChangeHandler(self, x):
pass
# Gain slider handler
def gainChanged(self, gain):
uvc.set(self.camera, uvc.GAIN, gain)
# Saturation slider handler
def saturationChanged(self, sat):
uvc.set(self.camera, uvc.SATURATION, sat)
# Exposure slider handler
def exposureChanged(self, exp):
uvc.set(self.camera, uvc.EXPOSURE_ABS, exp)
# Sets the waypoint list for rendering on overlay
def setWaypoints(self, waypointEst):
self.waypointEst = vu.toImageCoordinates(waypointEst)
# Sets the estimated tag location for rendering on the overlay
def setTagLocation(self, tagEst):
self.tagLoc = (int(tagEst[0]),int(tagEst[1]))
# Stops the vision process
def stop(self):
self.vidcap.release()
cv2.release()
cv2.destroyAllWindows()
# Main function to run vision system as standalone
def main():
print 'Args:' , str(sys.argv)
for x in range(len(sys.argv)):
if(sys.argv[x] == '-c'):
ncam = int(sys.argv[x+1])
vs = VisionSystem(ncam)
self.vidcap.release()
cv2.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "324030a976af29dc93fdb637583bfaab93671cc2",
"index": 8515,
"step-1": "# Python 2.7 Doritobot Vision System\n# EECS 498 Purple Team, 2014\n# Written by Cody Hyman (hymanc@umich.edu)\n# Written against OpenCV 3.0.0-alpha\n\nimport sys\nimport os\n\nimport cv2\nimport numpy as np\n\nfrom uvcinterface import UVCInterface as uvc\nfrom visionUtil import VisionUtil as vu\nfrom collections import deque\nfrom math import *\n# Calibration state 'Enumeration'\nclass CalState(object):\n UNCAL = 1\n CAL_PROG = 2\n CALIBRATED = 3\n\n### Vision System Class ###\nclass VisionSystem(object):\n\t# Window names\n\tCAM_FEED_NAME = 'Camera Feed'\n\tCAL_NAME = 'Calibrated Image'\n\tPROC_NAME = 'Vision Processing'\n\tCTL_NAME = 'Filter Controls'\n\n\t# Constants\n\tG_CENTER = 52\n\tR_CENTER = 0\n\tSMIN = 50\n\tVMIN = 80\n\n\t#HISTORY_LENGTH = 15\n\tEMPTY_KERNEL = [0, 0, 0, 0, 0, 0, 0]\n\tRAW_KERNEL = np.array([1, 2, 3, 6, 10, 18, 20], dtype = np.float32)\n\tFIR_KERNEL = np.multiply(RAW_KERNEL,1/np.linalg.norm(RAW_KERNEL,1)) # Normalized kernel\n\t\n\tdef __init__(self, camera):\n\t\t### Instance Value initialization ###\n\t\tself.camera = camera\n\t\tself.calstate = CalState.UNCAL\n\t\tself.calpts = []\n\t\tself.XSIZE = 1000\n\t\tself.YSIZE = 1000\n\t\tself.x_est = -1\n\t\tself.y_est = -1\n\t\tself.theta_est = -1\n\t\t\n\t\t# Drawing storage\n\t\tself.waypointEst = [(300,300)] # Waypoint estimates for UI\n\t\tself.tagLoc = (10,10) # Tag location estimate\n\t\tself.fVectorStart = (0,0)\n\t\tself.fVectorEnd = (0,0)\n\t\t\n\t\t\n\t\t#self.worldpts = np.float32([\n\t\t# [0,self.YSIZE/2],\n\t\t# [0,0],\n\t\t# [self.XSIZE,0],\n\t\t# [self.XSIZE,self.YSIZE/2]\n\t\t# ])\n\t\t\n\t\t# ===== ***** Calibration points from world *****===== #\n\t\t'''self.worldpts = np.float32([\n\t\t [-5, -1. * -105],\t\t#22\n\t\t [90, -1. * -100],\t\t#27\n\t\t [90, -1. * 110],\t\t#26\n\t\t [0, -1. * 107]\t\t#25\n\t\t ])#*self.IMG_SCALE + self.IMG_OFFSET'''\n\t\t\n\t\t# Swap x-y coordinates (WTF!)\n\t\t'''self.worldpts = np.float32([\n\t\t [-105,-5],\t\t#22\n\t\t [-100, 90],\t\t#27\n\t\t [110, 90],\t\t#26\n\t\t [107, 0]\t\t#25\n\t\t ])#*self.IMG_SCALE + self.IMG_OFFSET'''\n\t\t\t\t\n\t\tself.worldpts = np.float32([\n\t\t [-104,-2], #22\n\t\t [-104,85], #27\n\t\t [115,84], #26\n\t\t [115,3] #25\n\t\t ])\n\t\tself.worldpts = vu.toImageCoordinates(self.worldpts)\n\t\ttestPts = vu.toWaypointCoordinates(self.worldpts)\n\t\tprint 'TestWorldPts', str(testPts)\n\t\t# ===== *************** ===== #\n\t\t \n\t\t### Camera initialization ###\n\t\tprint 'Opening Camera ' + str(camera)\n\t\tself.vidcap = cv2.VideoCapture(camera)# Open up specified camera\n\t\t# Check if camera is opened and exit if not\n\t\tif self.vidcap.isOpened():\n\t\t print 'Camera ' + str(camera) + ' opened successfully'\n\t\telse:\n\t\t print 'ERROR: Camera ' + str(camera) + ' not opened'\n\t\t return False\n\n\t\t# Set camera autoexposure\n\t\tuvc.set(self.camera, uvc.EXPOSURE_AUTO, 1)\n\t\tuvc.set(self.camera, uvc.EXPOSURE_AUTO_PRIORITY, 0)\n\n\t\t### Initialize UI elements ###\n\t\t# Filter Controls Window\n\t\tctlWindow = cv2.namedWindow(self.CTL_NAME)\n\t\tcv2.createTrackbar('Blue', self.CTL_NAME, 88, 180, self.trackbarChangeHandler)\n\t\tcv2.createTrackbar('Green', self.CTL_NAME, 41, 180, self.trackbarChangeHandler) \n\t\tcv2.createTrackbar('Red', self.CTL_NAME, 172, 180, self.trackbarChangeHandler)\n\t\tcv2.createTrackbar('B Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler)\n\t\tcv2.createTrackbar('G Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler)\n\t\tcv2.createTrackbar('R Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler)\n\t\tcv2.createTrackbar('Sat Cutoff', self.CTL_NAME, 100, 255, self.trackbarChangeHandler)\n\t\tcv2.createTrackbar('Show Background', self.CTL_NAME, 1, 1, self.trackbarChangeHandler)\n\t\t\n\t\t# Camera input window\n\t\tcamWindow = cv2.namedWindow(self.CAM_FEED_NAME)\n\t\tcv2.createTrackbar('Gain', self.CAM_FEED_NAME, 128, 255, self.gainChanged)\n\t\tcv2.createTrackbar('Exposure', self.CAM_FEED_NAME, 1600, 2000, self.exposureChanged)\n\t\tcv2.createTrackbar('Saturation', self.CAM_FEED_NAME, 128, 255, self.saturationChanged)\n\t\tcv2.setMouseCallback(self.CAM_FEED_NAME, self.mouseClickHandler) # Set mouse callbacks for calibration\n\t\t\n\t\t# Rectified/Calibrated Image window\n\t\t#calWindow = cv2.namedWindow(self.CAL_NAME)\n\t\t#cv2.setMouseCallback(self.CAL_NAME, self.colorClickHandler)\n\t\t\n\t\t# Image processing Window 2\n\t\tprocWindow = cv2.namedWindow(self.PROC_NAME)\n\n\t\t# History for filter bank\n\t\tself.xHistory = deque(self.EMPTY_KERNEL)\n\t\tself.yHistory = deque(self.EMPTY_KERNEL)\n\t\tself.thetaHistory = deque(self.EMPTY_KERNEL)\n\n\t# Run vision on a frame\n\tdef processFrame(self):\n\t### Main processing loop ###\n\t#while(True):\n\t frameRet, self.camImg = self.vidcap.read()\n\t #Img = self.drawCalMarkers()\n\t cv2.imshow(self.CAM_FEED_NAME, self.drawCalMarkers())\n\t if(self.calstate == CalState.CALIBRATED):\n\t\t\tself.remapImage() # Apply perspective warp\n\t\t\tbl = cv2.getTrackbarPos('Blue', self.CTL_NAME)\n\t\t\tgr = cv2.getTrackbarPos('Green', self.CTL_NAME)\n\t\t\trd = cv2.getTrackbarPos('Red', self.CTL_NAME)\n\t\t\tbvmin = cv2.getTrackbarPos('B Cutoff', self.CTL_NAME)\n\t\t\tgvmin = cv2.getTrackbarPos('G Cutoff', self.CTL_NAME)\n\t\t\trvmin = cv2.getTrackbarPos('R Cutoff', self.CTL_NAME)\n\t\t\tsmin = cv2.getTrackbarPos('Sat Cutoff', self.CTL_NAME)\n\t\t\tbgroundFlag = cv2.getTrackbarPos('Show Background', self.CTL_NAME)\n\t\t\tbCentroid, self.bTagImg = self.findMarker(self.warpImg, bl, 10, smin, bvmin)\n\t\t\tgCentroid, self.gTagImg = self.findMarker(self.warpImg, gr, 10, smin, gvmin)\n\t\t\trCentroid, self.rTagImg = self.findMarker(self.warpImg, rd, 10, smin, rvmin)\n\t\t\t#vu.printCentroids(gCentroid, rCentroid)\n\t\t\tif(bgroundFlag):\n\t\t\t self.rgbImg = vu.comboImage(self.bTagImg, self.gTagImg, self.rTagImg, self.warpImg)\n\t\t\telse:\n\t\t\t self.rgbImg = vu.comboImage(self.bTagImg, self.gTagImg, self.rTagImg)\n\t\t\tctr, theta, bCtr, gCtr, rCtr = vu.localizeRobot(bCentroid, gCentroid, rCentroid)\n\t\t\tif((ctr != None) and (theta != None)):\n\t\t\t fctr, ftheta = self.filterPoints(ctr, theta)\n\t\t\t self.x_est = ctr[0]\n\t\t\t self.y_est = ctr[1]\n\t\t\t # print 'Theta IN:', theta\n\t\t\t self.theta_est = theta#ftheta\n\t\t\t self.tagLoc = vu.computeTagLocation(ctr, bCtr) # Compute tag location\n\t\t\t vu.drawSquareMarker(self.rgbImg, int(fctr[0]), int(fctr[1]), 5, (255,0,255))\n\t\t\tif(gCentroid != None):\n\t\t\t\tvu.drawSquareMarker(self.rgbImg, int(gCentroid[0]), int(gCentroid[1]), 5, (0,0,255))\n\t\t\tif(rCentroid != None):\n\t\t\t\tvu.drawSquareMarker(self.rgbImg, int(rCentroid[0]), int(rCentroid[1]), 5, (255,0,0))\n\t\t\tif(bCentroid != None):\n\t\t\t\tvu.drawSquareMarker(self.rgbImg, int(bCentroid[0]), int(bCentroid[1]), 5, (255,255,0))\n\t\t\twpIndex = 0\n\t\t\tfor wp in self.waypointEst:\n\t\t\t wpIndex = wpIndex + 1\n\t\t\t if(wpIndex == 1):\n\t\t\t\twpcolor = (0,0,255)\n\t\t\t else:\n\t\t\t\twpcolor = (0,255,255)\n\t\t\t vu.drawFilledCircleMarker(self.rgbImg, wp[0], wp[1], 10, wpcolor) #\n\t\t\t vu.drawTextIndex(self.rgbImg, wp[0], wp[1], str(wpIndex)) # Draw waypoint index\n\t\t\tif(self.tagLoc[0] != None):\n\t\t\t vu.drawFilledCircleMarker(self.rgbImg, self.tagLoc[0], self.tagLoc[1], 5, (0,0,160))\n\t\t\t#vu.drawVector(self.rgbImg, self.fVectorStart, self.fVectorEnd, (255,128,255))\n\t\t\t#cv2.imshow(self.CAL_NAME, self.warpImg)\n\t\t\tcv2.imshow(self.PROC_NAME, self.rgbImg)\n\t #if cv2.waitKey(20) & 0xFF == ord('q'):\n\t # break\n\t\n\t# Use current perspective transform to remap image\n\tdef remapImage(self):\n\t\tif(self.calstate == CalState.CALIBRATED):\n\t\t\tself.warpImg = cv2.warpPerspective(self.camImg, self.warp,(int(300*vu.IMG_SCALE),int(300*vu.IMG_SCALE)))\n\t\t\tself.warpImg = cv2.GaussianBlur(self.warpImg, (9,9), 1)\n\t\t\tself.warpImg = cv2.medianBlur(self.warpImg, 5)\n\t\telse:\n\t\t print 'Transform not calibrated'\n\n\t# Draws calibration markers on the camera image \n\tdef drawCalMarkers(self):\n\t\tmarkedImg = self.camImg.copy()\n\t\tfor pt in self.calpts:\n\t\t vu.drawSquareMarker(markedImg, pt[0], pt[1], 5, (255,0,255))\n\t\treturn markedImg\n\n\t# Finds a marker's central moment\n\tdef findMarker(self, image, hueCenter, hueWidth, satMin, valMin):\n\t\thsvImg = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\t\tmarkerImg = cv2.inRange(hsvImg, np.array([hueCenter-hueWidth/2, satMin, valMin]), np.array([hueCenter+hueWidth/2, 255, 255]))\n\t\tcleanElement = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))\n\t\tmarkerImg = cv2.erode(markerImg, cleanElement) # Clean up marker image w/ erode-dilate-median\n\t\tmarkerImg = cv2.dilate(markerImg, cleanElement)\n\t\tmarkerImg = cv2.medianBlur(markerImg, 3)\n\t\tmMoments = cv2.moments(markerImg) # Compute moments\n\t\tm00 = mMoments['m00']\n\t\tif(m00 > 0.1):\n\t\t\treturn (mMoments['m10']/m00, mMoments['m01']/m00), markerImg\n\t\treturn None, markerImg\n\n\t# FIR on centers and angles\n\tdef filterPoints(self, ctr, theta):\n\t\tif((ctr != None) and (theta != None)):\n\t\t\tif(len(self.xHistory) == len(self.FIR_KERNEL)):\n\t\t\t\tself.xHistory.popleft()\n\t\t\tif(len(self.yHistory) == len(self.FIR_KERNEL)):\n\t\t\t\tself.yHistory.popleft()\n\t\t\tif(len(self.thetaHistory) == len(self.FIR_KERNEL)):\n\t\t\t\tself.thetaHistory.popleft()\n\t\t\tself.xHistory.append(ctr[0])\n\t\t\tself.yHistory.append(ctr[1])\n\t\t\tself.thetaHistory.append(theta)\n\t\t\txFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.xHistory)),1)\n\t\t\tyFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.yHistory)),1)\n\t\t\tthetaFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.thetaHistory)),1)\n\t\t\t#print 'Filtered Phi:', phiFilter, ' Raw Theta:', theta\n\t\t\treturn (xFilter, yFilter), thetaFilter\n\n\t# Interface to get current state estimates\n\tdef getState(self):\n\t # Give estimated [x,y,theta]\n\t if(self.tagLoc != None):\n\t\ttx = self.tagLoc[0]\n\t\tty = self.tagLoc[1]\n\t else:\n\t\ttx = None\n\t\tty = None\n\t return [self.x_est, self.y_est, self.theta_est, tx, ty] \n\t \n\t### Event Handlers ###\n\t# Camera input mouseclick handler\n\tdef mouseClickHandler(self, event, x, y, flags, param):\n\t\tif event == cv2.EVENT_RBUTTONDOWN:\n\t\t print 'Recalibration requested'\n\t\t self.calstate = CalState.CAL_PROG\n\t\t self.calpts = [] # Reset calibration points\n\t\tif event == cv2.EVENT_LBUTTONDOWN:\n\t\t print 'Mouse left click event at ' + str(x) + ',' + str(y)\n\t\t if(self.calstate == CalState.UNCAL):\n\t\t\tself.calstate = CalState.CAL_PROG\n\t\t\tprint 'Adding calibration point at (' + str(x) + ',' + str(y) + ')'\n\t\t\tself.calpts.append([x,y])\n\t\t elif(self.calstate == CalState.CAL_PROG):\n\t\t\tif(len(self.calpts) < 4):\n\t\t\t print 'Adding calibration point at (' + str(x) + ',' + str(y) + ')'\n\t\t\t self.calpts.append([x,y])\n\t\t\t # Finish\n\t\t\t if(len(self.calpts) == 4):\n\t\t\t\tprint 'Calibrated'\n\t\t\t\tself.warp = cv2.getPerspectiveTransform(np.float32(self.calpts), self.worldpts)\n\t\t\t\tprint str(self.calpts)\n\t\t\t\tself.calstate = CalState.CALIBRATED\n\t\t elif(self.calstate == CalState.CALIBRATED):\n\t\t\tprint 'Already calibrated'\t \n\t\n\t# Color click handler for cal window\n\tdef colorClickHandler(self, event, x, y, flags, param):\n\t\tif event == cv2.EVENT_LBUTTONDOWN:\n\t\t\tprint 'Checking marker 1 color at ', str(x), ',', str(y)\n\t\t\tpass # Get color at point\n\t\tif event == cv2.EVENT_RBUTTONDOWN:\n\t\t\tprint 'Checking marker 2 color at ', str(x), ',', str(y)\n\t\t\tpass # Get color at point\n\n\t# Generic do-nothing slider handler (for )\n\tdef trackbarChangeHandler(self, x):\n\t\tpass\n\n\t# Gain slider handler\n\tdef gainChanged(self, gain):\n\t\tuvc.set(self.camera, uvc.GAIN, gain)\n\t\n\t# Saturation slider handler\n\tdef saturationChanged(self, sat):\n\t\tuvc.set(self.camera, uvc.SATURATION, sat)\n\n\t# Exposure slider handler\n\tdef exposureChanged(self, exp):\n\t\tuvc.set(self.camera, uvc.EXPOSURE_ABS, exp)\n\t\t\n\t# Sets the waypoint list for rendering on overlay\n\tdef setWaypoints(self, waypointEst):\n\t self.waypointEst = vu.toImageCoordinates(waypointEst)\n\t \n\t# Sets the estimated tag location for rendering on the overlay\n\tdef setTagLocation(self, tagEst):\n\t self.tagLoc = (int(tagEst[0]),int(tagEst[1]))\n\t \n\t# Stops the vision process\n\tdef stop(self):\n\t self.vidcap.release()\n\t cv2.release()\n\t cv2.destroyAllWindows()\n\n# Main function to run vision system as standalone\ndef main():\n\tprint 'Args:' , str(sys.argv)\n\tfor x in range(len(sys.argv)):\n\t\tif(sys.argv[x] == '-c'):\n\t\t\tncam = int(sys.argv[x+1])\n\tvs = VisionSystem(ncam)\n\tself.vidcap.release()\n\tcv2.release()\n\tcv2.destroyAllWindows()\n\n\t \nif __name__ == '__main__':\n main()\n\n ",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@es_bot.event
async def on_ready():
print('es started')
@nas_bot.event
async def on_ready():
print('nas started')
@dow_bot.event
async def on_ready():
print('dow started')
@silver_bot.event
async def on_ready():
print('silver started')
@us10y_bot.event
async def on_ready():
print('us10y started')
@dollar_bot.event
async def on_Ready():
print('dollar started')
@vix_bot.event
async def on_ready():
print('vix started')
@btc_bot.event
async def on_ready():
print('btc started')
@eth_bot.event
async def on_ready():
print('eth started')
@link_bot.event
async def on_ready():
print('link started')
<|reserved_special_token_0|>
@tasks.loop(seconds=5)
async def called_second():
data = wrangle_data()
print(data)
ticker_es = data['es']
ticker_nas = data['nas']
ticker_dow = data['dow']
ticker_vix = data['vix']
ticker_us10y = data['us10y']
ticker_dollar = data['dxy']
ticker_silver = data['silver']
ticker_btc = data['btc']
ticker_eth = data['eth']
ticker_link = data['link']
if ticker_es:
guild_ids = [guild.id for guild in es_bot.guilds]
name_es = '{:20,.2f}'.format(ticker_es['last'])
watching_es = ticker_es['change%']
guild_channels = [es_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_es:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'1) {name_es}')
await es_bot.change_presence(activity=discord.Activity(type
=discord.ActivityType.watching, name=f'ES {watching_es}'))
except:
print(f'broke in {guild_channel}')
else:
print('no es data')
if ticker_nas:
guild_ids = [guild.id for guild in nas_bot.guilds]
name_nas = '{:20,.2f}'.format(ticker_nas['last'])
watching_nas = ticker_nas['change%']
guild_channels = [nas_bot.get_guild(guild_id) for guild_id in guild_ids
]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_nas:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'2) {name_nas}')
await nas_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'NQ {watching_nas}'))
except:
print(f'broke in {guild_channel}')
else:
print('no nas data')
if ticker_dow:
guild_ids = [guild.id for guild in dow_bot.guilds]
name_dow = '{:20,.2f}'.format(ticker_dow['last'])
watching_dow = ticker_dow['change%']
guild_channels = [dow_bot.get_guild(guild_id) for guild_id in guild_ids
]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_dow:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'3) {name_dow}')
await dow_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'DJI {watching_dow}'))
except:
print(f'broke in {guild_channel}')
else:
print('no dow data')
if vix:
guild_ids = [guild.id for guild in vix_bot.guilds]
name_vix = '{:20,.2f}'.format(ticker_vix['last'])
watching_vix = ticker_vix['change%']
guild_channels = [vix_bot.get_guild(guild_id) for guild_id in guild_ids
]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_vix:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'4) {name_vix}')
await vix_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'VIX {watching_vix}'))
except:
print(f'broke in {guild_channel}')
else:
print('no vix data ')
if ticker_dollar:
guild_ids = [guild.id for guild in dollar_bot.guilds]
name_dollar = '{:20,.2f}'.format(ticker_dollar['last'])
watching_dollar = ticker_dollar['change%']
guild_channels = [dollar_bot.get_guild(guild_id) for guild_id in
guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_dollar:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'5) {name_dollar}')
await dollar_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'DXY {watching_dollar}'))
except:
print(f'broke in {guild_channel}')
else:
print('no dollar data')
if ticker_us10y:
guild_ids = [guild.id for guild in us10y_bot.guilds]
name_us10y = '{:20,.2f}'.format(ticker_us10y['last'])
watching_us10y = ticker_us10y['change%']
guild_channels = [us10y_bot.get_guild(guild_id) for guild_id in
guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_us10y:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'4) {name_us10y}')
await us10y_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'US10Y {watching_us10y}'))
except:
print(f'broke in {guild_channel}')
else:
print('no us10y data')
if ticker_silver:
guild_ids = [guild.id for guild in silver_bot.guilds]
name_silver = '{:20,.2f}'.format(ticker_silver['last'])
watching_silver = ticker_silver['change%']
guild_channels = [silver_bot.get_guild(guild_id) for guild_id in
guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_silver:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'6) {name_silver}')
await silver_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f"{ticker_silver['name'].upper()} {watching_silver}"))
except:
print(f'broke in {guild_channel}')
else:
print('no silver data')
if ticker_btc:
guild_ids = [guild.id for guild in btc_bot.guilds]
name_btc = '{:20,.2f}'.format(ticker_btc['last'])
watching_btc = ticker_btc['change%']
guild_channels = [btc_bot.get_guild(guild_id) for guild_id in guild_ids
]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_btc:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'7) {name_btc}')
await btc_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'BTC {watching_btc}'))
except:
print(f'broke in {guild_channel}')
else:
print('no data for btc')
if ticker_eth:
guild_ids = [guild.id for guild in eth_bot.guilds]
name_eth = '{:20,.2f}'.format(ticker_eth['last'])
watching_eth = ticker_eth['change%']
guild_channels = [eth_bot.get_guild(guild_id) for guild_id in guild_ids
]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_eth:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'8) {name_eth}')
await eth_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'ETH {watching_eth}'))
except:
print(f'broke in {guild_channel}')
else:
print('nodata for eth')
if ticker_link:
guild_ids = [guild.id for guild in link_bot.guilds]
name_link = '{:20,.2f}'.format(ticker_link['last'])
watching_link = ticker_link['change%']
guild_channels = [link_bot.get_guild(guild_id) for guild_id in
guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_link:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'9) {name_link}')
await link_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'LINK {watching_link}'))
except:
print(f'broke in {guild_channel}')
else:
print('nodata for eth')
print(f'updated ')
@called_second.before_loop
async def before():
await es_bot.wait_until_ready()
await nas_bot.wait_until_ready()
await dow_bot.wait_until_ready()
await vix_bot.wait_until_ready()
await us10y_bot.wait_until_ready()
await dollar_bot.wait_until_ready()
await silver_bot.wait_until_ready()
await btc_bot.wait_until_ready()
await eth_bot.wait_until_ready()
await link_bot.wait_until_ready()
print('Finished waiting')
called_second.start()
async def create_bots():
es_task = loop.create_task(es_bot.start(es))
nas_task = loop.create_task(nas_bot.start(nas))
dow_task = loop.create_task(dow_bot.start(dow))
vix_task = loop.create_task(vix_bot.start(vix))
us10y_task = loop.create_task(us10y_bot.start(us10y))
dollar_task = loop.create_task(dollar_bot.start(dollar))
silver_task = loop.create_task(silver_bot.start(silver))
btc_task = loop.create_task(btc_bot.start(btc))
eth_task = loop.create_task(eth_bot.start(eth))
link_task = loop.create_task(link_bot.start(link))
await es_task
await nas_task
await dow_task
await vix_task
await us10y_task
await dollar_task
await silver_task
await btc_task
await eth_task
await link_task
loop.run_until_complete(create_bots())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
es_bot = discord.Client()
nas_bot = discord.Client()
dow_bot = discord.Client()
us10y_bot = discord.Client()
vix_bot = discord.Client()
ticker_vix = discord.Client()
dollar_bot = discord.Client()
silver_bot = discord.Client()
btc_bot = discord.Client()
eth_bot = discord.Client()
link_bot = discord.Client()
loop = asyncio.get_event_loop()
@es_bot.event
async def on_ready():
print('es started')
@nas_bot.event
async def on_ready():
print('nas started')
@dow_bot.event
async def on_ready():
print('dow started')
@silver_bot.event
async def on_ready():
print('silver started')
@us10y_bot.event
async def on_ready():
print('us10y started')
@dollar_bot.event
async def on_Ready():
print('dollar started')
@vix_bot.event
async def on_ready():
print('vix started')
@btc_bot.event
async def on_ready():
print('btc started')
@eth_bot.event
async def on_ready():
print('eth started')
@link_bot.event
async def on_ready():
print('link started')
<|reserved_special_token_0|>
@tasks.loop(seconds=5)
async def called_second():
data = wrangle_data()
print(data)
ticker_es = data['es']
ticker_nas = data['nas']
ticker_dow = data['dow']
ticker_vix = data['vix']
ticker_us10y = data['us10y']
ticker_dollar = data['dxy']
ticker_silver = data['silver']
ticker_btc = data['btc']
ticker_eth = data['eth']
ticker_link = data['link']
if ticker_es:
guild_ids = [guild.id for guild in es_bot.guilds]
name_es = '{:20,.2f}'.format(ticker_es['last'])
watching_es = ticker_es['change%']
guild_channels = [es_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_es:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'1) {name_es}')
await es_bot.change_presence(activity=discord.Activity(type
=discord.ActivityType.watching, name=f'ES {watching_es}'))
except:
print(f'broke in {guild_channel}')
else:
print('no es data')
if ticker_nas:
guild_ids = [guild.id for guild in nas_bot.guilds]
name_nas = '{:20,.2f}'.format(ticker_nas['last'])
watching_nas = ticker_nas['change%']
guild_channels = [nas_bot.get_guild(guild_id) for guild_id in guild_ids
]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_nas:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'2) {name_nas}')
await nas_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'NQ {watching_nas}'))
except:
print(f'broke in {guild_channel}')
else:
print('no nas data')
if ticker_dow:
guild_ids = [guild.id for guild in dow_bot.guilds]
name_dow = '{:20,.2f}'.format(ticker_dow['last'])
watching_dow = ticker_dow['change%']
guild_channels = [dow_bot.get_guild(guild_id) for guild_id in guild_ids
]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_dow:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'3) {name_dow}')
await dow_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'DJI {watching_dow}'))
except:
print(f'broke in {guild_channel}')
else:
print('no dow data')
if vix:
guild_ids = [guild.id for guild in vix_bot.guilds]
name_vix = '{:20,.2f}'.format(ticker_vix['last'])
watching_vix = ticker_vix['change%']
guild_channels = [vix_bot.get_guild(guild_id) for guild_id in guild_ids
]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_vix:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'4) {name_vix}')
await vix_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'VIX {watching_vix}'))
except:
print(f'broke in {guild_channel}')
else:
print('no vix data ')
if ticker_dollar:
guild_ids = [guild.id for guild in dollar_bot.guilds]
name_dollar = '{:20,.2f}'.format(ticker_dollar['last'])
watching_dollar = ticker_dollar['change%']
guild_channels = [dollar_bot.get_guild(guild_id) for guild_id in
guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_dollar:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'5) {name_dollar}')
await dollar_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'DXY {watching_dollar}'))
except:
print(f'broke in {guild_channel}')
else:
print('no dollar data')
if ticker_us10y:
guild_ids = [guild.id for guild in us10y_bot.guilds]
name_us10y = '{:20,.2f}'.format(ticker_us10y['last'])
watching_us10y = ticker_us10y['change%']
guild_channels = [us10y_bot.get_guild(guild_id) for guild_id in
guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_us10y:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'4) {name_us10y}')
await us10y_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'US10Y {watching_us10y}'))
except:
print(f'broke in {guild_channel}')
else:
print('no us10y data')
if ticker_silver:
guild_ids = [guild.id for guild in silver_bot.guilds]
name_silver = '{:20,.2f}'.format(ticker_silver['last'])
watching_silver = ticker_silver['change%']
guild_channels = [silver_bot.get_guild(guild_id) for guild_id in
guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_silver:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'6) {name_silver}')
await silver_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f"{ticker_silver['name'].upper()} {watching_silver}"))
except:
print(f'broke in {guild_channel}')
else:
print('no silver data')
if ticker_btc:
guild_ids = [guild.id for guild in btc_bot.guilds]
name_btc = '{:20,.2f}'.format(ticker_btc['last'])
watching_btc = ticker_btc['change%']
guild_channels = [btc_bot.get_guild(guild_id) for guild_id in guild_ids
]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_btc:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'7) {name_btc}')
await btc_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'BTC {watching_btc}'))
except:
print(f'broke in {guild_channel}')
else:
print('no data for btc')
if ticker_eth:
guild_ids = [guild.id for guild in eth_bot.guilds]
name_eth = '{:20,.2f}'.format(ticker_eth['last'])
watching_eth = ticker_eth['change%']
guild_channels = [eth_bot.get_guild(guild_id) for guild_id in guild_ids
]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_eth:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'8) {name_eth}')
await eth_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'ETH {watching_eth}'))
except:
print(f'broke in {guild_channel}')
else:
print('nodata for eth')
if ticker_link:
guild_ids = [guild.id for guild in link_bot.guilds]
name_link = '{:20,.2f}'.format(ticker_link['last'])
watching_link = ticker_link['change%']
guild_channels = [link_bot.get_guild(guild_id) for guild_id in
guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_link:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'9) {name_link}')
await link_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'LINK {watching_link}'))
except:
print(f'broke in {guild_channel}')
else:
print('nodata for eth')
print(f'updated ')
@called_second.before_loop
async def before():
await es_bot.wait_until_ready()
await nas_bot.wait_until_ready()
await dow_bot.wait_until_ready()
await vix_bot.wait_until_ready()
await us10y_bot.wait_until_ready()
await dollar_bot.wait_until_ready()
await silver_bot.wait_until_ready()
await btc_bot.wait_until_ready()
await eth_bot.wait_until_ready()
await link_bot.wait_until_ready()
print('Finished waiting')
called_second.start()
async def create_bots():
es_task = loop.create_task(es_bot.start(es))
nas_task = loop.create_task(nas_bot.start(nas))
dow_task = loop.create_task(dow_bot.start(dow))
vix_task = loop.create_task(vix_bot.start(vix))
us10y_task = loop.create_task(us10y_bot.start(us10y))
dollar_task = loop.create_task(dollar_bot.start(dollar))
silver_task = loop.create_task(silver_bot.start(silver))
btc_task = loop.create_task(btc_bot.start(btc))
eth_task = loop.create_task(eth_bot.start(eth))
link_task = loop.create_task(link_bot.start(link))
await es_task
await nas_task
await dow_task
await vix_task
await us10y_task
await dollar_task
await silver_task
await btc_task
await eth_task
await link_task
loop.run_until_complete(create_bots())
<|reserved_special_token_1|>
from discord.ext import commands, tasks
from discord.utils import get
import discord
import re
import json
import time
import random
import asyncio
import os
import datetime
from live_ticker_scrape import wrangle_data
from tokens import dev, dev1, es, nas, dow, us10y, dollar, vix, btc, eth, silver, link
es_bot = discord.Client()
nas_bot = discord.Client()
dow_bot = discord.Client()
us10y_bot = discord.Client()
vix_bot = discord.Client()
ticker_vix = discord.Client()
dollar_bot = discord.Client()
silver_bot = discord.Client()
btc_bot = discord.Client()
eth_bot = discord.Client()
link_bot = discord.Client()
loop = asyncio.get_event_loop()
@es_bot.event
async def on_ready():
print('es started')
@nas_bot.event
async def on_ready():
print('nas started')
@dow_bot.event
async def on_ready():
print('dow started')
@silver_bot.event
async def on_ready():
print('silver started')
@us10y_bot.event
async def on_ready():
print('us10y started')
@dollar_bot.event
async def on_Ready():
print('dollar started')
@vix_bot.event
async def on_ready():
print('vix started')
@btc_bot.event
async def on_ready():
print('btc started')
@eth_bot.event
async def on_ready():
print('eth started')
@link_bot.event
async def on_ready():
print('link started')
<|reserved_special_token_0|>
@tasks.loop(seconds=5)
async def called_second():
data = wrangle_data()
print(data)
ticker_es = data['es']
ticker_nas = data['nas']
ticker_dow = data['dow']
ticker_vix = data['vix']
ticker_us10y = data['us10y']
ticker_dollar = data['dxy']
ticker_silver = data['silver']
ticker_btc = data['btc']
ticker_eth = data['eth']
ticker_link = data['link']
if ticker_es:
guild_ids = [guild.id for guild in es_bot.guilds]
name_es = '{:20,.2f}'.format(ticker_es['last'])
watching_es = ticker_es['change%']
guild_channels = [es_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_es:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'1) {name_es}')
await es_bot.change_presence(activity=discord.Activity(type
=discord.ActivityType.watching, name=f'ES {watching_es}'))
except:
print(f'broke in {guild_channel}')
else:
print('no es data')
if ticker_nas:
guild_ids = [guild.id for guild in nas_bot.guilds]
name_nas = '{:20,.2f}'.format(ticker_nas['last'])
watching_nas = ticker_nas['change%']
guild_channels = [nas_bot.get_guild(guild_id) for guild_id in guild_ids
]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_nas:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'2) {name_nas}')
await nas_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'NQ {watching_nas}'))
except:
print(f'broke in {guild_channel}')
else:
print('no nas data')
if ticker_dow:
guild_ids = [guild.id for guild in dow_bot.guilds]
name_dow = '{:20,.2f}'.format(ticker_dow['last'])
watching_dow = ticker_dow['change%']
guild_channels = [dow_bot.get_guild(guild_id) for guild_id in guild_ids
]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_dow:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'3) {name_dow}')
await dow_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'DJI {watching_dow}'))
except:
print(f'broke in {guild_channel}')
else:
print('no dow data')
if vix:
guild_ids = [guild.id for guild in vix_bot.guilds]
name_vix = '{:20,.2f}'.format(ticker_vix['last'])
watching_vix = ticker_vix['change%']
guild_channels = [vix_bot.get_guild(guild_id) for guild_id in guild_ids
]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_vix:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'4) {name_vix}')
await vix_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'VIX {watching_vix}'))
except:
print(f'broke in {guild_channel}')
else:
print('no vix data ')
if ticker_dollar:
guild_ids = [guild.id for guild in dollar_bot.guilds]
name_dollar = '{:20,.2f}'.format(ticker_dollar['last'])
watching_dollar = ticker_dollar['change%']
guild_channels = [dollar_bot.get_guild(guild_id) for guild_id in
guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_dollar:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'5) {name_dollar}')
await dollar_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'DXY {watching_dollar}'))
except:
print(f'broke in {guild_channel}')
else:
print('no dollar data')
if ticker_us10y:
guild_ids = [guild.id for guild in us10y_bot.guilds]
name_us10y = '{:20,.2f}'.format(ticker_us10y['last'])
watching_us10y = ticker_us10y['change%']
guild_channels = [us10y_bot.get_guild(guild_id) for guild_id in
guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_us10y:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'4) {name_us10y}')
await us10y_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'US10Y {watching_us10y}'))
except:
print(f'broke in {guild_channel}')
else:
print('no us10y data')
if ticker_silver:
guild_ids = [guild.id for guild in silver_bot.guilds]
name_silver = '{:20,.2f}'.format(ticker_silver['last'])
watching_silver = ticker_silver['change%']
guild_channels = [silver_bot.get_guild(guild_id) for guild_id in
guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_silver:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'6) {name_silver}')
await silver_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f"{ticker_silver['name'].upper()} {watching_silver}"))
except:
print(f'broke in {guild_channel}')
else:
print('no silver data')
if ticker_btc:
guild_ids = [guild.id for guild in btc_bot.guilds]
name_btc = '{:20,.2f}'.format(ticker_btc['last'])
watching_btc = ticker_btc['change%']
guild_channels = [btc_bot.get_guild(guild_id) for guild_id in guild_ids
]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_btc:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'7) {name_btc}')
await btc_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'BTC {watching_btc}'))
except:
print(f'broke in {guild_channel}')
else:
print('no data for btc')
if ticker_eth:
guild_ids = [guild.id for guild in eth_bot.guilds]
name_eth = '{:20,.2f}'.format(ticker_eth['last'])
watching_eth = ticker_eth['change%']
guild_channels = [eth_bot.get_guild(guild_id) for guild_id in guild_ids
]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_eth:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'8) {name_eth}')
await eth_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'ETH {watching_eth}'))
except:
print(f'broke in {guild_channel}')
else:
print('nodata for eth')
if ticker_link:
guild_ids = [guild.id for guild in link_bot.guilds]
name_link = '{:20,.2f}'.format(ticker_link['last'])
watching_link = ticker_link['change%']
guild_channels = [link_bot.get_guild(guild_id) for guild_id in
guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if '-' in watching_link:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f'9) {name_link}')
await link_bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name=
f'LINK {watching_link}'))
except:
print(f'broke in {guild_channel}')
else:
print('nodata for eth')
print(f'updated ')
@called_second.before_loop
async def before():
await es_bot.wait_until_ready()
await nas_bot.wait_until_ready()
await dow_bot.wait_until_ready()
await vix_bot.wait_until_ready()
await us10y_bot.wait_until_ready()
await dollar_bot.wait_until_ready()
await silver_bot.wait_until_ready()
await btc_bot.wait_until_ready()
await eth_bot.wait_until_ready()
await link_bot.wait_until_ready()
print('Finished waiting')
called_second.start()
async def create_bots():
es_task = loop.create_task(es_bot.start(es))
nas_task = loop.create_task(nas_bot.start(nas))
dow_task = loop.create_task(dow_bot.start(dow))
vix_task = loop.create_task(vix_bot.start(vix))
us10y_task = loop.create_task(us10y_bot.start(us10y))
dollar_task = loop.create_task(dollar_bot.start(dollar))
silver_task = loop.create_task(silver_bot.start(silver))
btc_task = loop.create_task(btc_bot.start(btc))
eth_task = loop.create_task(eth_bot.start(eth))
link_task = loop.create_task(link_bot.start(link))
await es_task
await nas_task
await dow_task
await vix_task
await us10y_task
await dollar_task
await silver_task
await btc_task
await eth_task
await link_task
loop.run_until_complete(create_bots())
<|reserved_special_token_1|>
from discord.ext import commands, tasks
from discord.utils import get
import discord
import re
import json
import time
import random
import asyncio
import os
import datetime
from live_ticker_scrape import wrangle_data
from tokens import dev, dev1, es, nas, dow, us10y, dollar, vix, btc, eth, silver , link
es_bot = discord.Client()
nas_bot = discord.Client()
dow_bot = discord.Client()
us10y_bot = discord.Client()
vix_bot = discord.Client()
ticker_vix = discord.Client()
dollar_bot = discord.Client()
silver_bot = discord.Client()
btc_bot = discord.Client()
eth_bot= discord.Client()
link_bot = discord.Client()
loop = asyncio.get_event_loop()
@es_bot.event
async def on_ready():
print('es started')
@nas_bot.event
async def on_ready():
print('nas started')
@dow_bot.event
async def on_ready():
print('dow started')
@silver_bot.event
async def on_ready():
print('silver started')
@us10y_bot.event
async def on_ready():
print('us10y started')
@dollar_bot.event
async def on_Ready():
print('dollar started')
@vix_bot.event
async def on_ready():
print('vix started')
@btc_bot.event
async def on_ready():
print('btc started')
@eth_bot.event
async def on_ready():
print('eth started')
@link_bot.event
async def on_ready():
print('link started')
'''
@tasks.loop() can be changed to seconds, minutes, hours
https://discordpy.readthedocs.io/en/latest/ext/tasks/
'''
@tasks.loop(seconds=5)
async def called_second():
## get all guild ids that the bot is joined in
data = wrangle_data()
print(data)
ticker_es = data['es']
ticker_nas = data['nas']
ticker_dow = data['dow']
ticker_vix = data['vix']
ticker_us10y = data['us10y']
ticker_dollar = data['dxy']
ticker_silver = data['silver']
ticker_btc = data['btc']
ticker_eth = data['eth']
ticker_link = data['link']
## es
if ticker_es:
guild_ids = [guild.id for guild in es_bot.guilds]
name_es = '{:20,.2f}'.format(ticker_es['last'])
watching_es = ticker_es['change%']
guild_channels = [es_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_es:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"1) {name_es}")
await es_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"ES {watching_es}"))
except:
print(f'broke in {guild_channel}')
else:
print('no es data')
##nas
if ticker_nas:
guild_ids = [guild.id for guild in nas_bot.guilds]
name_nas = '{:20,.2f}'.format(ticker_nas['last'])
watching_nas= ticker_nas['change%']
guild_channels = [nas_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_nas:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"2) {name_nas}")
await nas_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"NQ {watching_nas}"))
except:
print(f'broke in {guild_channel}')
else:
print('no nas data')
## dow
if ticker_dow:
guild_ids = [guild.id for guild in dow_bot.guilds]
name_dow = '{:20,.2f}'.format(ticker_dow['last'])
watching_dow = ticker_dow['change%']
guild_channels = [dow_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_dow:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"3) {name_dow}")
await dow_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"DJI {watching_dow}"))
except:
print(f'broke in {guild_channel}')
else:
print('no dow data')
## vix
if vix:
guild_ids = [guild.id for guild in vix_bot.guilds]
name_vix = '{:20,.2f}'.format(ticker_vix['last'])
watching_vix = ticker_vix['change%']
guild_channels = [vix_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_vix:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"4) {name_vix}")
await vix_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"VIX {watching_vix}"))
except:
print(f'broke in {guild_channel}')
else:
print('no vix data ')
# dollar
if ticker_dollar:
guild_ids = [guild.id for guild in dollar_bot.guilds]
name_dollar = '{:20,.2f}'.format(ticker_dollar['last'])
watching_dollar = ticker_dollar['change%']
guild_channels = [dollar_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_dollar:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"5) {name_dollar}")
await dollar_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"DXY {watching_dollar}"))
except:
print(f'broke in {guild_channel}')
else:
print('no dollar data')
# us10y
if ticker_us10y:
guild_ids = [guild.id for guild in us10y_bot.guilds]
name_us10y = '{:20,.2f}'.format(ticker_us10y['last'])
watching_us10y = ticker_us10y['change%']
guild_channels = [us10y_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_us10y:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"4) {name_us10y}")
await us10y_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"US10Y {watching_us10y}"))
except:
print(f'broke in {guild_channel}')
else:
print('no us10y data')
# silver
if ticker_silver:
guild_ids = [guild.id for guild in silver_bot.guilds]
name_silver = '{:20,.2f}'.format(ticker_silver['last'])
watching_silver = ticker_silver['change%']
guild_channels = [silver_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_silver:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"6) {name_silver}")
await silver_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"{ticker_silver['name'].upper()} {watching_silver}"))
except:
print(f'broke in {guild_channel}')
else:
print('no silver data')
#shit coin stuff
# btc
if ticker_btc:
guild_ids = [guild.id for guild in btc_bot.guilds]
name_btc = '{:20,.2f}'.format(ticker_btc['last'])
watching_btc = ticker_btc['change%']
guild_channels = [btc_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_btc:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"7) {name_btc}")
await btc_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"BTC {watching_btc}"))
except:
print(f'broke in {guild_channel}')
else:
print('no data for btc')
# eth
if ticker_eth:
guild_ids = [guild.id for guild in eth_bot.guilds]
name_eth= '{:20,.2f}'.format(ticker_eth['last'])
watching_eth = ticker_eth['change%']
guild_channels = [eth_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_eth:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"8) {name_eth}")
await eth_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"ETH {watching_eth}"))
except:
print(f'broke in {guild_channel}')
else:
print('nodata for eth')
# link
if ticker_link:
guild_ids = [guild.id for guild in link_bot.guilds]
name_link = '{:20,.2f}'.format(ticker_link['last'])
watching_link = ticker_link['change%']
guild_channels = [link_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_link:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"9) {name_link}")
await link_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"LINK {watching_link}"))
except:
print(f'broke in {guild_channel}')
else:
print('nodata for eth')
print(f'updated ')
@called_second.before_loop
async def before():
await es_bot.wait_until_ready()
await nas_bot.wait_until_ready()
await dow_bot.wait_until_ready()
await vix_bot.wait_until_ready()
await us10y_bot.wait_until_ready()
await dollar_bot.wait_until_ready()
await silver_bot.wait_until_ready()
await btc_bot.wait_until_ready()
await eth_bot.wait_until_ready()
await link_bot.wait_until_ready()
print("Finished waiting")
called_second.start()
async def create_bots():
es_task= loop.create_task(es_bot.start(es))
nas_task = loop.create_task(nas_bot.start(nas))
dow_task = loop.create_task(dow_bot.start(dow))
vix_task = loop.create_task(vix_bot.start(vix))
us10y_task = loop.create_task(us10y_bot.start(us10y))
dollar_task = loop.create_task(dollar_bot.start(dollar))
silver_task = loop.create_task(silver_bot.start(silver))
btc_task = loop.create_task(btc_bot.start(btc))
eth_task = loop.create_task(eth_bot.start(eth))
link_task = loop.create_task(link_bot.start(link))
await es_task
await nas_task
await dow_task
await vix_task
await us10y_task
await dollar_task
await silver_task
await btc_task
await eth_task
await link_task
loop.run_until_complete(create_bots())
|
flexible
|
{
"blob_id": "e57109f1c5c2e1468ef1cf9f10fba743633ca150",
"index": 8094,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@es_bot.event\nasync def on_ready():\n print('es started')\n\n\n@nas_bot.event\nasync def on_ready():\n print('nas started')\n\n\n@dow_bot.event\nasync def on_ready():\n print('dow started')\n\n\n@silver_bot.event\nasync def on_ready():\n print('silver started')\n\n\n@us10y_bot.event\nasync def on_ready():\n print('us10y started')\n\n\n@dollar_bot.event\nasync def on_Ready():\n print('dollar started')\n\n\n@vix_bot.event\nasync def on_ready():\n print('vix started')\n\n\n@btc_bot.event\nasync def on_ready():\n print('btc started')\n\n\n@eth_bot.event\nasync def on_ready():\n print('eth started')\n\n\n@link_bot.event\nasync def on_ready():\n print('link started')\n\n\n<mask token>\n\n\n@tasks.loop(seconds=5)\nasync def called_second():\n data = wrangle_data()\n print(data)\n ticker_es = data['es']\n ticker_nas = data['nas']\n ticker_dow = data['dow']\n ticker_vix = data['vix']\n ticker_us10y = data['us10y']\n ticker_dollar = data['dxy']\n ticker_silver = data['silver']\n ticker_btc = data['btc']\n ticker_eth = data['eth']\n ticker_link = data['link']\n if ticker_es:\n guild_ids = [guild.id for guild in es_bot.guilds]\n name_es = '{:20,.2f}'.format(ticker_es['last'])\n watching_es = ticker_es['change%']\n guild_channels = [es_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_es:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'1) {name_es}')\n await es_bot.change_presence(activity=discord.Activity(type\n =discord.ActivityType.watching, name=f'ES {watching_es}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no es data')\n if ticker_nas:\n guild_ids = [guild.id for guild in nas_bot.guilds]\n name_nas = '{:20,.2f}'.format(ticker_nas['last'])\n watching_nas = ticker_nas['change%']\n guild_channels = [nas_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_nas:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'2) {name_nas}')\n await nas_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'NQ {watching_nas}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no nas data')\n if ticker_dow:\n guild_ids = [guild.id for guild in dow_bot.guilds]\n name_dow = '{:20,.2f}'.format(ticker_dow['last'])\n watching_dow = ticker_dow['change%']\n guild_channels = [dow_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_dow:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'3) {name_dow}')\n await dow_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'DJI {watching_dow}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dow data')\n if vix:\n guild_ids = [guild.id for guild in vix_bot.guilds]\n name_vix = '{:20,.2f}'.format(ticker_vix['last'])\n watching_vix = ticker_vix['change%']\n guild_channels = [vix_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_vix:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'4) {name_vix}')\n await vix_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'VIX {watching_vix}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no vix data ')\n if ticker_dollar:\n guild_ids = [guild.id for guild in dollar_bot.guilds]\n name_dollar = '{:20,.2f}'.format(ticker_dollar['last'])\n watching_dollar = ticker_dollar['change%']\n guild_channels = [dollar_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_dollar:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'5) {name_dollar}')\n await dollar_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'DXY {watching_dollar}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dollar data')\n if ticker_us10y:\n guild_ids = [guild.id for guild in us10y_bot.guilds]\n name_us10y = '{:20,.2f}'.format(ticker_us10y['last'])\n watching_us10y = ticker_us10y['change%']\n guild_channels = [us10y_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_us10y:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'4) {name_us10y}')\n await us10y_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'US10Y {watching_us10y}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no us10y data')\n if ticker_silver:\n guild_ids = [guild.id for guild in silver_bot.guilds]\n name_silver = '{:20,.2f}'.format(ticker_silver['last'])\n watching_silver = ticker_silver['change%']\n guild_channels = [silver_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_silver:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'6) {name_silver}')\n await silver_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f\"{ticker_silver['name'].upper()} {watching_silver}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no silver data')\n if ticker_btc:\n guild_ids = [guild.id for guild in btc_bot.guilds]\n name_btc = '{:20,.2f}'.format(ticker_btc['last'])\n watching_btc = ticker_btc['change%']\n guild_channels = [btc_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_btc:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'7) {name_btc}')\n await btc_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'BTC {watching_btc}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no data for btc')\n if ticker_eth:\n guild_ids = [guild.id for guild in eth_bot.guilds]\n name_eth = '{:20,.2f}'.format(ticker_eth['last'])\n watching_eth = ticker_eth['change%']\n guild_channels = [eth_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_eth:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'8) {name_eth}')\n await eth_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'ETH {watching_eth}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n if ticker_link:\n guild_ids = [guild.id for guild in link_bot.guilds]\n name_link = '{:20,.2f}'.format(ticker_link['last'])\n watching_link = ticker_link['change%']\n guild_channels = [link_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_link:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'9) {name_link}')\n await link_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'LINK {watching_link}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n print(f'updated ')\n\n\n@called_second.before_loop\nasync def before():\n await es_bot.wait_until_ready()\n await nas_bot.wait_until_ready()\n await dow_bot.wait_until_ready()\n await vix_bot.wait_until_ready()\n await us10y_bot.wait_until_ready()\n await dollar_bot.wait_until_ready()\n await silver_bot.wait_until_ready()\n await btc_bot.wait_until_ready()\n await eth_bot.wait_until_ready()\n await link_bot.wait_until_ready()\n print('Finished waiting')\n\n\ncalled_second.start()\n\n\nasync def create_bots():\n es_task = loop.create_task(es_bot.start(es))\n nas_task = loop.create_task(nas_bot.start(nas))\n dow_task = loop.create_task(dow_bot.start(dow))\n vix_task = loop.create_task(vix_bot.start(vix))\n us10y_task = loop.create_task(us10y_bot.start(us10y))\n dollar_task = loop.create_task(dollar_bot.start(dollar))\n silver_task = loop.create_task(silver_bot.start(silver))\n btc_task = loop.create_task(btc_bot.start(btc))\n eth_task = loop.create_task(eth_bot.start(eth))\n link_task = loop.create_task(link_bot.start(link))\n await es_task\n await nas_task\n await dow_task\n await vix_task\n await us10y_task\n await dollar_task\n await silver_task\n await btc_task\n await eth_task\n await link_task\n\n\nloop.run_until_complete(create_bots())\n",
"step-3": "<mask token>\nes_bot = discord.Client()\nnas_bot = discord.Client()\ndow_bot = discord.Client()\nus10y_bot = discord.Client()\nvix_bot = discord.Client()\nticker_vix = discord.Client()\ndollar_bot = discord.Client()\nsilver_bot = discord.Client()\nbtc_bot = discord.Client()\neth_bot = discord.Client()\nlink_bot = discord.Client()\nloop = asyncio.get_event_loop()\n\n\n@es_bot.event\nasync def on_ready():\n print('es started')\n\n\n@nas_bot.event\nasync def on_ready():\n print('nas started')\n\n\n@dow_bot.event\nasync def on_ready():\n print('dow started')\n\n\n@silver_bot.event\nasync def on_ready():\n print('silver started')\n\n\n@us10y_bot.event\nasync def on_ready():\n print('us10y started')\n\n\n@dollar_bot.event\nasync def on_Ready():\n print('dollar started')\n\n\n@vix_bot.event\nasync def on_ready():\n print('vix started')\n\n\n@btc_bot.event\nasync def on_ready():\n print('btc started')\n\n\n@eth_bot.event\nasync def on_ready():\n print('eth started')\n\n\n@link_bot.event\nasync def on_ready():\n print('link started')\n\n\n<mask token>\n\n\n@tasks.loop(seconds=5)\nasync def called_second():\n data = wrangle_data()\n print(data)\n ticker_es = data['es']\n ticker_nas = data['nas']\n ticker_dow = data['dow']\n ticker_vix = data['vix']\n ticker_us10y = data['us10y']\n ticker_dollar = data['dxy']\n ticker_silver = data['silver']\n ticker_btc = data['btc']\n ticker_eth = data['eth']\n ticker_link = data['link']\n if ticker_es:\n guild_ids = [guild.id for guild in es_bot.guilds]\n name_es = '{:20,.2f}'.format(ticker_es['last'])\n watching_es = ticker_es['change%']\n guild_channels = [es_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_es:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'1) {name_es}')\n await es_bot.change_presence(activity=discord.Activity(type\n =discord.ActivityType.watching, name=f'ES {watching_es}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no es data')\n if ticker_nas:\n guild_ids = [guild.id for guild in nas_bot.guilds]\n name_nas = '{:20,.2f}'.format(ticker_nas['last'])\n watching_nas = ticker_nas['change%']\n guild_channels = [nas_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_nas:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'2) {name_nas}')\n await nas_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'NQ {watching_nas}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no nas data')\n if ticker_dow:\n guild_ids = [guild.id for guild in dow_bot.guilds]\n name_dow = '{:20,.2f}'.format(ticker_dow['last'])\n watching_dow = ticker_dow['change%']\n guild_channels = [dow_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_dow:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'3) {name_dow}')\n await dow_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'DJI {watching_dow}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dow data')\n if vix:\n guild_ids = [guild.id for guild in vix_bot.guilds]\n name_vix = '{:20,.2f}'.format(ticker_vix['last'])\n watching_vix = ticker_vix['change%']\n guild_channels = [vix_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_vix:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'4) {name_vix}')\n await vix_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'VIX {watching_vix}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no vix data ')\n if ticker_dollar:\n guild_ids = [guild.id for guild in dollar_bot.guilds]\n name_dollar = '{:20,.2f}'.format(ticker_dollar['last'])\n watching_dollar = ticker_dollar['change%']\n guild_channels = [dollar_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_dollar:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'5) {name_dollar}')\n await dollar_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'DXY {watching_dollar}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dollar data')\n if ticker_us10y:\n guild_ids = [guild.id for guild in us10y_bot.guilds]\n name_us10y = '{:20,.2f}'.format(ticker_us10y['last'])\n watching_us10y = ticker_us10y['change%']\n guild_channels = [us10y_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_us10y:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'4) {name_us10y}')\n await us10y_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'US10Y {watching_us10y}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no us10y data')\n if ticker_silver:\n guild_ids = [guild.id for guild in silver_bot.guilds]\n name_silver = '{:20,.2f}'.format(ticker_silver['last'])\n watching_silver = ticker_silver['change%']\n guild_channels = [silver_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_silver:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'6) {name_silver}')\n await silver_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f\"{ticker_silver['name'].upper()} {watching_silver}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no silver data')\n if ticker_btc:\n guild_ids = [guild.id for guild in btc_bot.guilds]\n name_btc = '{:20,.2f}'.format(ticker_btc['last'])\n watching_btc = ticker_btc['change%']\n guild_channels = [btc_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_btc:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'7) {name_btc}')\n await btc_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'BTC {watching_btc}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no data for btc')\n if ticker_eth:\n guild_ids = [guild.id for guild in eth_bot.guilds]\n name_eth = '{:20,.2f}'.format(ticker_eth['last'])\n watching_eth = ticker_eth['change%']\n guild_channels = [eth_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_eth:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'8) {name_eth}')\n await eth_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'ETH {watching_eth}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n if ticker_link:\n guild_ids = [guild.id for guild in link_bot.guilds]\n name_link = '{:20,.2f}'.format(ticker_link['last'])\n watching_link = ticker_link['change%']\n guild_channels = [link_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_link:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'9) {name_link}')\n await link_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'LINK {watching_link}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n print(f'updated ')\n\n\n@called_second.before_loop\nasync def before():\n await es_bot.wait_until_ready()\n await nas_bot.wait_until_ready()\n await dow_bot.wait_until_ready()\n await vix_bot.wait_until_ready()\n await us10y_bot.wait_until_ready()\n await dollar_bot.wait_until_ready()\n await silver_bot.wait_until_ready()\n await btc_bot.wait_until_ready()\n await eth_bot.wait_until_ready()\n await link_bot.wait_until_ready()\n print('Finished waiting')\n\n\ncalled_second.start()\n\n\nasync def create_bots():\n es_task = loop.create_task(es_bot.start(es))\n nas_task = loop.create_task(nas_bot.start(nas))\n dow_task = loop.create_task(dow_bot.start(dow))\n vix_task = loop.create_task(vix_bot.start(vix))\n us10y_task = loop.create_task(us10y_bot.start(us10y))\n dollar_task = loop.create_task(dollar_bot.start(dollar))\n silver_task = loop.create_task(silver_bot.start(silver))\n btc_task = loop.create_task(btc_bot.start(btc))\n eth_task = loop.create_task(eth_bot.start(eth))\n link_task = loop.create_task(link_bot.start(link))\n await es_task\n await nas_task\n await dow_task\n await vix_task\n await us10y_task\n await dollar_task\n await silver_task\n await btc_task\n await eth_task\n await link_task\n\n\nloop.run_until_complete(create_bots())\n",
"step-4": "from discord.ext import commands, tasks\nfrom discord.utils import get\nimport discord\nimport re\nimport json\nimport time\nimport random\nimport asyncio\nimport os\nimport datetime\nfrom live_ticker_scrape import wrangle_data\nfrom tokens import dev, dev1, es, nas, dow, us10y, dollar, vix, btc, eth, silver, link\nes_bot = discord.Client()\nnas_bot = discord.Client()\ndow_bot = discord.Client()\nus10y_bot = discord.Client()\nvix_bot = discord.Client()\nticker_vix = discord.Client()\ndollar_bot = discord.Client()\nsilver_bot = discord.Client()\nbtc_bot = discord.Client()\neth_bot = discord.Client()\nlink_bot = discord.Client()\nloop = asyncio.get_event_loop()\n\n\n@es_bot.event\nasync def on_ready():\n print('es started')\n\n\n@nas_bot.event\nasync def on_ready():\n print('nas started')\n\n\n@dow_bot.event\nasync def on_ready():\n print('dow started')\n\n\n@silver_bot.event\nasync def on_ready():\n print('silver started')\n\n\n@us10y_bot.event\nasync def on_ready():\n print('us10y started')\n\n\n@dollar_bot.event\nasync def on_Ready():\n print('dollar started')\n\n\n@vix_bot.event\nasync def on_ready():\n print('vix started')\n\n\n@btc_bot.event\nasync def on_ready():\n print('btc started')\n\n\n@eth_bot.event\nasync def on_ready():\n print('eth started')\n\n\n@link_bot.event\nasync def on_ready():\n print('link started')\n\n\n<mask token>\n\n\n@tasks.loop(seconds=5)\nasync def called_second():\n data = wrangle_data()\n print(data)\n ticker_es = data['es']\n ticker_nas = data['nas']\n ticker_dow = data['dow']\n ticker_vix = data['vix']\n ticker_us10y = data['us10y']\n ticker_dollar = data['dxy']\n ticker_silver = data['silver']\n ticker_btc = data['btc']\n ticker_eth = data['eth']\n ticker_link = data['link']\n if ticker_es:\n guild_ids = [guild.id for guild in es_bot.guilds]\n name_es = '{:20,.2f}'.format(ticker_es['last'])\n watching_es = ticker_es['change%']\n guild_channels = [es_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_es:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'1) {name_es}')\n await es_bot.change_presence(activity=discord.Activity(type\n =discord.ActivityType.watching, name=f'ES {watching_es}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no es data')\n if ticker_nas:\n guild_ids = [guild.id for guild in nas_bot.guilds]\n name_nas = '{:20,.2f}'.format(ticker_nas['last'])\n watching_nas = ticker_nas['change%']\n guild_channels = [nas_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_nas:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'2) {name_nas}')\n await nas_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'NQ {watching_nas}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no nas data')\n if ticker_dow:\n guild_ids = [guild.id for guild in dow_bot.guilds]\n name_dow = '{:20,.2f}'.format(ticker_dow['last'])\n watching_dow = ticker_dow['change%']\n guild_channels = [dow_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_dow:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'3) {name_dow}')\n await dow_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'DJI {watching_dow}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dow data')\n if vix:\n guild_ids = [guild.id for guild in vix_bot.guilds]\n name_vix = '{:20,.2f}'.format(ticker_vix['last'])\n watching_vix = ticker_vix['change%']\n guild_channels = [vix_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_vix:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'4) {name_vix}')\n await vix_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'VIX {watching_vix}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no vix data ')\n if ticker_dollar:\n guild_ids = [guild.id for guild in dollar_bot.guilds]\n name_dollar = '{:20,.2f}'.format(ticker_dollar['last'])\n watching_dollar = ticker_dollar['change%']\n guild_channels = [dollar_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_dollar:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'5) {name_dollar}')\n await dollar_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'DXY {watching_dollar}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dollar data')\n if ticker_us10y:\n guild_ids = [guild.id for guild in us10y_bot.guilds]\n name_us10y = '{:20,.2f}'.format(ticker_us10y['last'])\n watching_us10y = ticker_us10y['change%']\n guild_channels = [us10y_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_us10y:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'4) {name_us10y}')\n await us10y_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'US10Y {watching_us10y}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no us10y data')\n if ticker_silver:\n guild_ids = [guild.id for guild in silver_bot.guilds]\n name_silver = '{:20,.2f}'.format(ticker_silver['last'])\n watching_silver = ticker_silver['change%']\n guild_channels = [silver_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_silver:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'6) {name_silver}')\n await silver_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f\"{ticker_silver['name'].upper()} {watching_silver}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no silver data')\n if ticker_btc:\n guild_ids = [guild.id for guild in btc_bot.guilds]\n name_btc = '{:20,.2f}'.format(ticker_btc['last'])\n watching_btc = ticker_btc['change%']\n guild_channels = [btc_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_btc:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'7) {name_btc}')\n await btc_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'BTC {watching_btc}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no data for btc')\n if ticker_eth:\n guild_ids = [guild.id for guild in eth_bot.guilds]\n name_eth = '{:20,.2f}'.format(ticker_eth['last'])\n watching_eth = ticker_eth['change%']\n guild_channels = [eth_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_eth:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'8) {name_eth}')\n await eth_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'ETH {watching_eth}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n if ticker_link:\n guild_ids = [guild.id for guild in link_bot.guilds]\n name_link = '{:20,.2f}'.format(ticker_link['last'])\n watching_link = ticker_link['change%']\n guild_channels = [link_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_link:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'9) {name_link}')\n await link_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'LINK {watching_link}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n print(f'updated ')\n\n\n@called_second.before_loop\nasync def before():\n await es_bot.wait_until_ready()\n await nas_bot.wait_until_ready()\n await dow_bot.wait_until_ready()\n await vix_bot.wait_until_ready()\n await us10y_bot.wait_until_ready()\n await dollar_bot.wait_until_ready()\n await silver_bot.wait_until_ready()\n await btc_bot.wait_until_ready()\n await eth_bot.wait_until_ready()\n await link_bot.wait_until_ready()\n print('Finished waiting')\n\n\ncalled_second.start()\n\n\nasync def create_bots():\n es_task = loop.create_task(es_bot.start(es))\n nas_task = loop.create_task(nas_bot.start(nas))\n dow_task = loop.create_task(dow_bot.start(dow))\n vix_task = loop.create_task(vix_bot.start(vix))\n us10y_task = loop.create_task(us10y_bot.start(us10y))\n dollar_task = loop.create_task(dollar_bot.start(dollar))\n silver_task = loop.create_task(silver_bot.start(silver))\n btc_task = loop.create_task(btc_bot.start(btc))\n eth_task = loop.create_task(eth_bot.start(eth))\n link_task = loop.create_task(link_bot.start(link))\n await es_task\n await nas_task\n await dow_task\n await vix_task\n await us10y_task\n await dollar_task\n await silver_task\n await btc_task\n await eth_task\n await link_task\n\n\nloop.run_until_complete(create_bots())\n",
"step-5": "from discord.ext import commands, tasks\nfrom discord.utils import get\nimport discord\nimport re\nimport json \nimport time \nimport random\nimport asyncio\nimport os\nimport datetime\n\nfrom live_ticker_scrape import wrangle_data\nfrom tokens import dev, dev1, es, nas, dow, us10y, dollar, vix, btc, eth, silver , link\n\nes_bot = discord.Client()\nnas_bot = discord.Client()\ndow_bot = discord.Client()\n\n\nus10y_bot = discord.Client()\nvix_bot = discord.Client()\nticker_vix = discord.Client()\n\n\ndollar_bot = discord.Client()\nsilver_bot = discord.Client()\n\nbtc_bot = discord.Client()\neth_bot= discord.Client()\nlink_bot = discord.Client()\n\nloop = asyncio.get_event_loop()\n\n@es_bot.event\nasync def on_ready():\n print('es started') \n\n@nas_bot.event\nasync def on_ready():\n print('nas started')\n\n@dow_bot.event\nasync def on_ready():\n print('dow started')\n\n@silver_bot.event\nasync def on_ready():\n print('silver started')\n\n@us10y_bot.event\nasync def on_ready():\n print('us10y started')\n\n@dollar_bot.event\nasync def on_Ready():\n print('dollar started')\n\n@vix_bot.event\nasync def on_ready():\n print('vix started')\n\n@btc_bot.event\nasync def on_ready():\n print('btc started')\n\n@eth_bot.event\nasync def on_ready():\n print('eth started')\n@link_bot.event\nasync def on_ready():\n print('link started')\n \n'''\n@tasks.loop() can be changed to seconds, minutes, hours\nhttps://discordpy.readthedocs.io/en/latest/ext/tasks/\n'''\n\n@tasks.loop(seconds=5)\nasync def called_second():\n ## get all guild ids that the bot is joined in \n\n\n data = wrangle_data()\n print(data)\n\n ticker_es = data['es']\n ticker_nas = data['nas'] \n ticker_dow = data['dow'] \n ticker_vix = data['vix']\n ticker_us10y = data['us10y']\n ticker_dollar = data['dxy']\n ticker_silver = data['silver']\n ticker_btc = data['btc']\n ticker_eth = data['eth']\n ticker_link = data['link']\n ## es\n if ticker_es:\n guild_ids = [guild.id for guild in es_bot.guilds] \n name_es = '{:20,.2f}'.format(ticker_es['last'])\n watching_es = ticker_es['change%']\n guild_channels = [es_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_es:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f\"1) {name_es}\") \n await es_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"ES {watching_es}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no es data')\n ##nas\n if ticker_nas:\n guild_ids = [guild.id for guild in nas_bot.guilds] \n name_nas = '{:20,.2f}'.format(ticker_nas['last'])\n watching_nas= ticker_nas['change%']\n guild_channels = [nas_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_nas:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f\"2) {name_nas}\")\n await nas_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"NQ {watching_nas}\"))\n except:\n print(f'broke in {guild_channel}')\n else: \n print('no nas data')\n ## dow\n if ticker_dow: \n guild_ids = [guild.id for guild in dow_bot.guilds] \n name_dow = '{:20,.2f}'.format(ticker_dow['last'])\n watching_dow = ticker_dow['change%']\n\n guild_channels = [dow_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_dow:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f\"3) {name_dow}\")\n await dow_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"DJI {watching_dow}\"))\n\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dow data')\n\n ## vix \n if vix:\n guild_ids = [guild.id for guild in vix_bot.guilds] \n name_vix = '{:20,.2f}'.format(ticker_vix['last'])\n watching_vix = ticker_vix['change%']\n\n guild_channels = [vix_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_vix:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n\n await guild_channel.me.edit(nick=f\"4) {name_vix}\")\n await vix_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"VIX {watching_vix}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no vix data ')\n\n # dollar \n if ticker_dollar:\n guild_ids = [guild.id for guild in dollar_bot.guilds] \n name_dollar = '{:20,.2f}'.format(ticker_dollar['last'])\n watching_dollar = ticker_dollar['change%']\n guild_channels = [dollar_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_dollar:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n\n await guild_channel.me.edit(nick=f\"5) {name_dollar}\")\n await dollar_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"DXY {watching_dollar}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dollar data')\n # us10y \n if ticker_us10y:\n guild_ids = [guild.id for guild in us10y_bot.guilds] \n name_us10y = '{:20,.2f}'.format(ticker_us10y['last'])\n watching_us10y = ticker_us10y['change%']\n guild_channels = [us10y_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_us10y:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n\n await guild_channel.me.edit(nick=f\"4) {name_us10y}\")\n await us10y_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"US10Y {watching_us10y}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no us10y data')\n\n # silver \n if ticker_silver:\n guild_ids = [guild.id for guild in silver_bot.guilds] \n name_silver = '{:20,.2f}'.format(ticker_silver['last'])\n watching_silver = ticker_silver['change%']\n \n guild_channels = [silver_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_silver:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f\"6) {name_silver}\")\n await silver_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"{ticker_silver['name'].upper()} {watching_silver}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no silver data')\n #shit coin stuff\n # btc\n if ticker_btc:\n guild_ids = [guild.id for guild in btc_bot.guilds] \n name_btc = '{:20,.2f}'.format(ticker_btc['last'])\n watching_btc = ticker_btc['change%']\n guild_channels = [btc_bot.get_guild(guild_id) for guild_id in guild_ids]\n\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_btc:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f\"7) {name_btc}\")\n await btc_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"BTC {watching_btc}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no data for btc')\n # eth \n if ticker_eth:\n guild_ids = [guild.id for guild in eth_bot.guilds] \n name_eth= '{:20,.2f}'.format(ticker_eth['last'])\n watching_eth = ticker_eth['change%']\n guild_channels = [eth_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_eth:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f\"8) {name_eth}\")\n await eth_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"ETH {watching_eth}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n # link\n if ticker_link:\n guild_ids = [guild.id for guild in link_bot.guilds] \n name_link = '{:20,.2f}'.format(ticker_link['last'])\n watching_link = ticker_link['change%']\n guild_channels = [link_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_link:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f\"9) {name_link}\")\n await link_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"LINK {watching_link}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n print(f'updated ')\n\n@called_second.before_loop\nasync def before():\n await es_bot.wait_until_ready()\n await nas_bot.wait_until_ready()\n await dow_bot.wait_until_ready()\n await vix_bot.wait_until_ready()\n\n await us10y_bot.wait_until_ready()\n await dollar_bot.wait_until_ready()\n await silver_bot.wait_until_ready() \n\n await btc_bot.wait_until_ready()\n await eth_bot.wait_until_ready()\n await link_bot.wait_until_ready()\n\n print(\"Finished waiting\")\n\ncalled_second.start()\n\nasync def create_bots():\n es_task= loop.create_task(es_bot.start(es))\n nas_task = loop.create_task(nas_bot.start(nas))\n dow_task = loop.create_task(dow_bot.start(dow))\n vix_task = loop.create_task(vix_bot.start(vix))\n\n us10y_task = loop.create_task(us10y_bot.start(us10y)) \n dollar_task = loop.create_task(dollar_bot.start(dollar))\n silver_task = loop.create_task(silver_bot.start(silver))\n\n btc_task = loop.create_task(btc_bot.start(btc))\n eth_task = loop.create_task(eth_bot.start(eth))\n link_task = loop.create_task(link_bot.start(link))\n \n await es_task \n await nas_task\n await dow_task\n await vix_task\n\n await us10y_task\n await dollar_task \n await silver_task\n\n await btc_task \n await eth_task\n await link_task \n\nloop.run_until_complete(create_bots())",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import re
text = 'Macademia nuts, Honey tuile, Cocoa powder, Pistachio nuts'
search_pattern = re.compile('nuts')
search_match_object = search_pattern.search(text)
if search_match_object:
print(search_match_object.span())
print(search_match_object.start())
print(search_match_object.end())
print(search_match_object.group())
# Other methods of pattern
print(search_pattern.findall(text))
print(search_pattern.fullmatch('nuts')) # The entire string must match
print(search_pattern.match('nuts...')) # Start of the string must match
|
normal
|
{
"blob_id": "ef5d235f09eea827b240290218c397f880f1046d",
"index": 4433,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif search_match_object:\n print(search_match_object.span())\n print(search_match_object.start())\n print(search_match_object.end())\n print(search_match_object.group())\nprint(search_pattern.findall(text))\nprint(search_pattern.fullmatch('nuts'))\nprint(search_pattern.match('nuts...'))\n",
"step-3": "<mask token>\ntext = 'Macademia nuts, Honey tuile, Cocoa powder, Pistachio nuts'\nsearch_pattern = re.compile('nuts')\nsearch_match_object = search_pattern.search(text)\nif search_match_object:\n print(search_match_object.span())\n print(search_match_object.start())\n print(search_match_object.end())\n print(search_match_object.group())\nprint(search_pattern.findall(text))\nprint(search_pattern.fullmatch('nuts'))\nprint(search_pattern.match('nuts...'))\n",
"step-4": "import re\ntext = 'Macademia nuts, Honey tuile, Cocoa powder, Pistachio nuts'\nsearch_pattern = re.compile('nuts')\nsearch_match_object = search_pattern.search(text)\nif search_match_object:\n print(search_match_object.span())\n print(search_match_object.start())\n print(search_match_object.end())\n print(search_match_object.group())\nprint(search_pattern.findall(text))\nprint(search_pattern.fullmatch('nuts'))\nprint(search_pattern.match('nuts...'))\n",
"step-5": "import re\n\ntext = 'Macademia nuts, Honey tuile, Cocoa powder, Pistachio nuts'\nsearch_pattern = re.compile('nuts')\nsearch_match_object = search_pattern.search(text)\n\nif search_match_object:\n print(search_match_object.span())\n print(search_match_object.start())\n print(search_match_object.end())\n print(search_match_object.group())\n\n# Other methods of pattern\nprint(search_pattern.findall(text))\nprint(search_pattern.fullmatch('nuts')) # The entire string must match\nprint(search_pattern.match('nuts...')) # Start of the string must match\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(getsizeof(a))
<|reserved_special_token_0|>
print(getsizeof(c))
for b in a:
print(b)
print(sum(a))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
a = (b for b in range(10))
print(getsizeof(a))
c = [b for b in range(10)]
print(getsizeof(c))
for b in a:
print(b)
print(sum(a))
<|reserved_special_token_1|>
from sys import getsizeof
a = (b for b in range(10))
print(getsizeof(a))
c = [b for b in range(10)]
print(getsizeof(c))
for b in a:
print(b)
print(sum(a))
<|reserved_special_token_1|>
from sys import getsizeof
# using parenthesis indicates that we are creating a generator
a = (b for b in range(10))
print(getsizeof(a))
c = [b for b in range(10)]
# c uses more memory than a
print(getsizeof(c))
for b in a:
print(b)
print(sum(a)) # the sequence has disappeared
|
flexible
|
{
"blob_id": "2ee4b31f880441e87c437d7cc4601f260f34ae24",
"index": 6574,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(getsizeof(a))\n<mask token>\nprint(getsizeof(c))\nfor b in a:\n print(b)\nprint(sum(a))\n",
"step-3": "<mask token>\na = (b for b in range(10))\nprint(getsizeof(a))\nc = [b for b in range(10)]\nprint(getsizeof(c))\nfor b in a:\n print(b)\nprint(sum(a))\n",
"step-4": "from sys import getsizeof\na = (b for b in range(10))\nprint(getsizeof(a))\nc = [b for b in range(10)]\nprint(getsizeof(c))\nfor b in a:\n print(b)\nprint(sum(a))\n",
"step-5": "from sys import getsizeof\n\n# using parenthesis indicates that we are creating a generator\na = (b for b in range(10))\n\nprint(getsizeof(a))\n\nc = [b for b in range(10)]\n\n# c uses more memory than a\nprint(getsizeof(c))\n\nfor b in a:\n print(b)\n\nprint(sum(a)) # the sequence has disappeared\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import json
import yaml
import argparse
import sys
def json2yaml(json_input, yaml_input):
json_data = json.load(open(json_input, 'r'))
yaml_file = open(yaml_input, 'w')
yaml.safe_dump(json_data, yaml_file, allow_unicode=True, default_flow_style=False)
yaml_data = yaml.load_all(open(yaml_input, 'r'), Loader=yaml.FullLoader)
print("\n" + yaml.dump_all(yaml_data))
print("\n############################################################################")
print("\nOUTPUT: JSON FILE " + json_input.split('/')[-1] + " CONVERTED TO YAML FILE " + yaml_input.split('/')[-1] + "\n")
print("############################################################################\n")
def yaml2json(json_input, yaml_input):
yaml_data = yaml.safe_load(open(yaml_input, 'r'))
# print(yaml_data)
json_file = open(json_input, 'w')
json.dump(yaml_data, json_file, indent=2)
json_file.close()
json_data = open(json_input, 'r').read()
print("\n" + json_data)
print("\n############################################################################")
print("\nOUTPUT: YAML FILE " + yaml_input.split('/')[-1] + " CONVERTED TO JSON FILE " + json_input.split('/')[
-1] + "\n")
print("############################################################################\n")
def run():
argParse = argparse.ArgumentParser(description="CONVERT JSON TO YAML & YAML TO JSON")
argParse.add_argument('-u', '--usage', help="COMMAND USAGE FORMAT")
req_args_grp = argParse.add_argument_group('REQUIRED ARGUMENTS')
req_args_grp.add_argument('-j', '--json', help="JSON FILE", required=True)
req_args_grp.add_argument('-y', '--yaml', help="YAML FILE", required=True)
req_args_grp.add_argument('-m', '--mode', help="CONVERSION MODE", choices=['j2y','json2yaml', 'y2j', 'yaml2json'], required=True)
if len(sys.argv) == 1:
argParse.print_help()
sys.exit(1)
elif '-h' in sys.argv or '--help' in sys.argv:
argParse.print_help()
sys.exit(1)
elif '-u' in sys.argv or '--usage' in sys.argv:
argParse.print_usage()
sys.exit(1)
elif '-j' in sys.argv or '--json' in sys.argv and '-y' in sys.argv or '--yaml' in sys.argv:
arguments = argParse.parse_args()
json_input = arguments.json
yaml_input = arguments.yaml
mode_input = arguments.mode
if 'j2y' in mode_input or 'json2yaml' in mode_input:
json2yaml(json_input, yaml_input)
elif 'y2j' in mode_input or 'yaml2json' in mode_input:
yaml2json(json_input, yaml_input)
if __name__ == '__main__':
run()
|
normal
|
{
"blob_id": "5c15252611bee9cd9fbb5d91a19850c242bb51f1",
"index": 4940,
"step-1": "<mask token>\n\n\ndef json2yaml(json_input, yaml_input):\n json_data = json.load(open(json_input, 'r'))\n yaml_file = open(yaml_input, 'w')\n yaml.safe_dump(json_data, yaml_file, allow_unicode=True,\n default_flow_style=False)\n yaml_data = yaml.load_all(open(yaml_input, 'r'), Loader=yaml.FullLoader)\n print('\\n' + yaml.dump_all(yaml_data))\n print(\n '\\n############################################################################'\n )\n print('\\nOUTPUT: JSON FILE ' + json_input.split('/')[-1] +\n ' CONVERTED TO YAML FILE ' + yaml_input.split('/')[-1] + '\\n')\n print(\n '############################################################################\\n'\n )\n\n\n<mask token>\n\n\ndef run():\n argParse = argparse.ArgumentParser(description=\n 'CONVERT JSON TO YAML & YAML TO JSON')\n argParse.add_argument('-u', '--usage', help='COMMAND USAGE FORMAT')\n req_args_grp = argParse.add_argument_group('REQUIRED ARGUMENTS')\n req_args_grp.add_argument('-j', '--json', help='JSON FILE', required=True)\n req_args_grp.add_argument('-y', '--yaml', help='YAML FILE', required=True)\n req_args_grp.add_argument('-m', '--mode', help='CONVERSION MODE',\n choices=['j2y', 'json2yaml', 'y2j', 'yaml2json'], required=True)\n if len(sys.argv) == 1:\n argParse.print_help()\n sys.exit(1)\n elif '-h' in sys.argv or '--help' in sys.argv:\n argParse.print_help()\n sys.exit(1)\n elif '-u' in sys.argv or '--usage' in sys.argv:\n argParse.print_usage()\n sys.exit(1)\n elif '-j' in sys.argv or '--json' in sys.argv and '-y' in sys.argv or '--yaml' in sys.argv:\n arguments = argParse.parse_args()\n json_input = arguments.json\n yaml_input = arguments.yaml\n mode_input = arguments.mode\n if 'j2y' in mode_input or 'json2yaml' in mode_input:\n json2yaml(json_input, yaml_input)\n elif 'y2j' in mode_input or 'yaml2json' in mode_input:\n yaml2json(json_input, yaml_input)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef json2yaml(json_input, yaml_input):\n json_data = json.load(open(json_input, 'r'))\n yaml_file = open(yaml_input, 'w')\n yaml.safe_dump(json_data, yaml_file, allow_unicode=True,\n default_flow_style=False)\n yaml_data = yaml.load_all(open(yaml_input, 'r'), Loader=yaml.FullLoader)\n print('\\n' + yaml.dump_all(yaml_data))\n print(\n '\\n############################################################################'\n )\n print('\\nOUTPUT: JSON FILE ' + json_input.split('/')[-1] +\n ' CONVERTED TO YAML FILE ' + yaml_input.split('/')[-1] + '\\n')\n print(\n '############################################################################\\n'\n )\n\n\ndef yaml2json(json_input, yaml_input):\n yaml_data = yaml.safe_load(open(yaml_input, 'r'))\n json_file = open(json_input, 'w')\n json.dump(yaml_data, json_file, indent=2)\n json_file.close()\n json_data = open(json_input, 'r').read()\n print('\\n' + json_data)\n print(\n '\\n############################################################################'\n )\n print('\\nOUTPUT: YAML FILE ' + yaml_input.split('/')[-1] +\n ' CONVERTED TO JSON FILE ' + json_input.split('/')[-1] + '\\n')\n print(\n '############################################################################\\n'\n )\n\n\ndef run():\n argParse = argparse.ArgumentParser(description=\n 'CONVERT JSON TO YAML & YAML TO JSON')\n argParse.add_argument('-u', '--usage', help='COMMAND USAGE FORMAT')\n req_args_grp = argParse.add_argument_group('REQUIRED ARGUMENTS')\n req_args_grp.add_argument('-j', '--json', help='JSON FILE', required=True)\n req_args_grp.add_argument('-y', '--yaml', help='YAML FILE', required=True)\n req_args_grp.add_argument('-m', '--mode', help='CONVERSION MODE',\n choices=['j2y', 'json2yaml', 'y2j', 'yaml2json'], required=True)\n if len(sys.argv) == 1:\n argParse.print_help()\n sys.exit(1)\n elif '-h' in sys.argv or '--help' in sys.argv:\n argParse.print_help()\n sys.exit(1)\n elif '-u' in sys.argv or '--usage' in sys.argv:\n argParse.print_usage()\n sys.exit(1)\n elif '-j' in sys.argv or '--json' in sys.argv and '-y' in sys.argv or '--yaml' in sys.argv:\n arguments = argParse.parse_args()\n json_input = arguments.json\n yaml_input = arguments.yaml\n mode_input = arguments.mode\n if 'j2y' in mode_input or 'json2yaml' in mode_input:\n json2yaml(json_input, yaml_input)\n elif 'y2j' in mode_input or 'yaml2json' in mode_input:\n yaml2json(json_input, yaml_input)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef json2yaml(json_input, yaml_input):\n json_data = json.load(open(json_input, 'r'))\n yaml_file = open(yaml_input, 'w')\n yaml.safe_dump(json_data, yaml_file, allow_unicode=True,\n default_flow_style=False)\n yaml_data = yaml.load_all(open(yaml_input, 'r'), Loader=yaml.FullLoader)\n print('\\n' + yaml.dump_all(yaml_data))\n print(\n '\\n############################################################################'\n )\n print('\\nOUTPUT: JSON FILE ' + json_input.split('/')[-1] +\n ' CONVERTED TO YAML FILE ' + yaml_input.split('/')[-1] + '\\n')\n print(\n '############################################################################\\n'\n )\n\n\ndef yaml2json(json_input, yaml_input):\n yaml_data = yaml.safe_load(open(yaml_input, 'r'))\n json_file = open(json_input, 'w')\n json.dump(yaml_data, json_file, indent=2)\n json_file.close()\n json_data = open(json_input, 'r').read()\n print('\\n' + json_data)\n print(\n '\\n############################################################################'\n )\n print('\\nOUTPUT: YAML FILE ' + yaml_input.split('/')[-1] +\n ' CONVERTED TO JSON FILE ' + json_input.split('/')[-1] + '\\n')\n print(\n '############################################################################\\n'\n )\n\n\ndef run():\n argParse = argparse.ArgumentParser(description=\n 'CONVERT JSON TO YAML & YAML TO JSON')\n argParse.add_argument('-u', '--usage', help='COMMAND USAGE FORMAT')\n req_args_grp = argParse.add_argument_group('REQUIRED ARGUMENTS')\n req_args_grp.add_argument('-j', '--json', help='JSON FILE', required=True)\n req_args_grp.add_argument('-y', '--yaml', help='YAML FILE', required=True)\n req_args_grp.add_argument('-m', '--mode', help='CONVERSION MODE',\n choices=['j2y', 'json2yaml', 'y2j', 'yaml2json'], required=True)\n if len(sys.argv) == 1:\n argParse.print_help()\n sys.exit(1)\n elif '-h' in sys.argv or '--help' in sys.argv:\n argParse.print_help()\n sys.exit(1)\n elif '-u' in sys.argv or '--usage' in sys.argv:\n argParse.print_usage()\n sys.exit(1)\n elif '-j' in sys.argv or '--json' in sys.argv and '-y' in sys.argv or '--yaml' in sys.argv:\n arguments = argParse.parse_args()\n json_input = arguments.json\n yaml_input = arguments.yaml\n mode_input = arguments.mode\n if 'j2y' in mode_input or 'json2yaml' in mode_input:\n json2yaml(json_input, yaml_input)\n elif 'y2j' in mode_input or 'yaml2json' in mode_input:\n yaml2json(json_input, yaml_input)\n\n\nif __name__ == '__main__':\n run()\n",
"step-4": "import json\nimport yaml\nimport argparse\nimport sys\n\n\ndef json2yaml(json_input, yaml_input):\n json_data = json.load(open(json_input, 'r'))\n yaml_file = open(yaml_input, 'w')\n yaml.safe_dump(json_data, yaml_file, allow_unicode=True,\n default_flow_style=False)\n yaml_data = yaml.load_all(open(yaml_input, 'r'), Loader=yaml.FullLoader)\n print('\\n' + yaml.dump_all(yaml_data))\n print(\n '\\n############################################################################'\n )\n print('\\nOUTPUT: JSON FILE ' + json_input.split('/')[-1] +\n ' CONVERTED TO YAML FILE ' + yaml_input.split('/')[-1] + '\\n')\n print(\n '############################################################################\\n'\n )\n\n\ndef yaml2json(json_input, yaml_input):\n yaml_data = yaml.safe_load(open(yaml_input, 'r'))\n json_file = open(json_input, 'w')\n json.dump(yaml_data, json_file, indent=2)\n json_file.close()\n json_data = open(json_input, 'r').read()\n print('\\n' + json_data)\n print(\n '\\n############################################################################'\n )\n print('\\nOUTPUT: YAML FILE ' + yaml_input.split('/')[-1] +\n ' CONVERTED TO JSON FILE ' + json_input.split('/')[-1] + '\\n')\n print(\n '############################################################################\\n'\n )\n\n\ndef run():\n argParse = argparse.ArgumentParser(description=\n 'CONVERT JSON TO YAML & YAML TO JSON')\n argParse.add_argument('-u', '--usage', help='COMMAND USAGE FORMAT')\n req_args_grp = argParse.add_argument_group('REQUIRED ARGUMENTS')\n req_args_grp.add_argument('-j', '--json', help='JSON FILE', required=True)\n req_args_grp.add_argument('-y', '--yaml', help='YAML FILE', required=True)\n req_args_grp.add_argument('-m', '--mode', help='CONVERSION MODE',\n choices=['j2y', 'json2yaml', 'y2j', 'yaml2json'], required=True)\n if len(sys.argv) == 1:\n argParse.print_help()\n sys.exit(1)\n elif '-h' in sys.argv or '--help' in sys.argv:\n argParse.print_help()\n sys.exit(1)\n elif '-u' in sys.argv or '--usage' in sys.argv:\n argParse.print_usage()\n sys.exit(1)\n elif '-j' in sys.argv or '--json' in sys.argv and '-y' in sys.argv or '--yaml' in sys.argv:\n arguments = argParse.parse_args()\n json_input = arguments.json\n yaml_input = arguments.yaml\n mode_input = arguments.mode\n if 'j2y' in mode_input or 'json2yaml' in mode_input:\n json2yaml(json_input, yaml_input)\n elif 'y2j' in mode_input or 'yaml2json' in mode_input:\n yaml2json(json_input, yaml_input)\n\n\nif __name__ == '__main__':\n run()\n",
"step-5": "import json\nimport yaml\nimport argparse\nimport sys\n\ndef json2yaml(json_input, yaml_input):\n json_data = json.load(open(json_input, 'r'))\n yaml_file = open(yaml_input, 'w')\n yaml.safe_dump(json_data, yaml_file, allow_unicode=True, default_flow_style=False)\n\n yaml_data = yaml.load_all(open(yaml_input, 'r'), Loader=yaml.FullLoader)\n print(\"\\n\" + yaml.dump_all(yaml_data))\n print(\"\\n############################################################################\")\n print(\"\\nOUTPUT: JSON FILE \" + json_input.split('/')[-1] + \" CONVERTED TO YAML FILE \" + yaml_input.split('/')[-1] + \"\\n\")\n print(\"############################################################################\\n\")\n\ndef yaml2json(json_input, yaml_input):\n yaml_data = yaml.safe_load(open(yaml_input, 'r'))\n # print(yaml_data)\n json_file = open(json_input, 'w')\n json.dump(yaml_data, json_file, indent=2)\n json_file.close()\n\n json_data = open(json_input, 'r').read()\n print(\"\\n\" + json_data)\n\n print(\"\\n############################################################################\")\n print(\"\\nOUTPUT: YAML FILE \" + yaml_input.split('/')[-1] + \" CONVERTED TO JSON FILE \" + json_input.split('/')[\n -1] + \"\\n\")\n print(\"############################################################################\\n\")\n\ndef run():\n argParse = argparse.ArgumentParser(description=\"CONVERT JSON TO YAML & YAML TO JSON\")\n argParse.add_argument('-u', '--usage', help=\"COMMAND USAGE FORMAT\")\n req_args_grp = argParse.add_argument_group('REQUIRED ARGUMENTS')\n req_args_grp.add_argument('-j', '--json', help=\"JSON FILE\", required=True)\n req_args_grp.add_argument('-y', '--yaml', help=\"YAML FILE\", required=True)\n req_args_grp.add_argument('-m', '--mode', help=\"CONVERSION MODE\", choices=['j2y','json2yaml', 'y2j', 'yaml2json'], required=True)\n\n if len(sys.argv) == 1:\n argParse.print_help()\n sys.exit(1)\n elif '-h' in sys.argv or '--help' in sys.argv:\n argParse.print_help()\n sys.exit(1)\n elif '-u' in sys.argv or '--usage' in sys.argv:\n argParse.print_usage()\n sys.exit(1)\n elif '-j' in sys.argv or '--json' in sys.argv and '-y' in sys.argv or '--yaml' in sys.argv:\n arguments = argParse.parse_args()\n json_input = arguments.json\n yaml_input = arguments.yaml\n mode_input = arguments.mode\n if 'j2y' in mode_input or 'json2yaml' in mode_input:\n json2yaml(json_input, yaml_input)\n elif 'y2j' in mode_input or 'yaml2json' in mode_input:\n yaml2json(json_input, yaml_input)\n\nif __name__ == '__main__':\n run()",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import datetime
import os
# os.getcwd()
class LMS: # This class is used to keep records of the books in the Library
def __init__(self, list_of_books, library_name):
self.list_of_books = "List_of_books.txt"
self.library_name = library_name
self.books_dict = {}
Id = 101
with open(self.list_of_books) as bk:
content = bk.readlines()
for line in content:
# print(line)
self.books_dict.update({str(Id):{'Books_title':line.replace("\n",""), 'Lender_name':"", 'Issue_date':"", 'Status':"Available"}})
Id = Id+1
def display_books(self): #Display Book ID and Title of the Books
print("------------List of Books-----------")
print("Book ID", "\t", "Title")
print("------------------------------------")
for key, value in self.books_dict.items():
print(key, "\t\t", value.get("Books_title"), "- [", value.get("Status"), "]")
def issue_books(self): #Issues the book according to Book ID if available, if someone has already issued than it will show who and when the person has issued, and if someone give an unknown book is than it will show that this Book ID not found
books_id = input("Enter book's ID: ")
current_date = datetime.datetime.now().strftime("%d-%m-%Y %H:%M:%S")
if books_id in self.books_dict.keys():
if not self.books_dict[books_id]["Status"] == "Available":
print(f"This book is already issued to {self.books_dict[books_id]['Lender_name']} on {self.books_dict[books_id]['Issue_date']}")
return self.issue_books()
elif self.books_dict[books_id]["Status"] == "Available":
your_name = input("Enter your name: ")
self.books_dict[books_id]["Lender_name"] = your_name
self.books_dict[books_id]["Issue_date"] = current_date
self.books_dict[books_id]["Status"] = "Already Issued"
print("Books Issued Successfully !!!\n")
else:
print("Book ID not found !!!")
return self.issue_books()
def add_books(self): #To add New Books to the library
new_books = input("Enter Book's Title: ")
if new_books == "":
return self.add_books()
elif len(new_books)>30:
print("Length of Book's Title is too long !!! Title length must min 3 characters and max 25 characters...")
return self.add_books
else:
with open(self.list_of_books, "a") as bk:
bk.writelines(f"{new_books}\n")
self.books_dict.update({str(int(max(self.books_dict))+1):{'Books_title':new_books, 'Lender_name':"", 'Issue_date':"", 'Status':"Available"}})
print(f"This book '{new_books}' has been added successfully !!!")
def return_books(self): #To return a Book
books_id = input("Enter books ID: ")
if books_id in self.books_dict.keys():
if self.books_dict[books_id]["Status"] == "Available":
print("This book is already available in library. Please check your Book ID !!!")
return self.return_books()
elif not self.books_dict[books_id]["Status"] == "Available":
self.books_dict[books_id]["Lender_name"] = ""
self.books_dict[books_id]["Issued_date"] = ""
self.books_dict[books_id]["Status"] = "Available"
print("Successfully Updated !!!\n")
else:
print("Book ID is not found")
try:
myLMS = LMS("list_of_books.txt", "Satyam's")
press_key_list = {"D":"Display Books", "I":"Issue Books", "A":"Add Books", "R":"Return Books", "Q":"Quit"}
key_press = False
while not (key_press == "q"):
print(f"\n--------------- Welcome to {myLMS.library_name} Library Management System----------------\n")
for key, value in press_key_list.items():
print("Press", key, "To", value)
key_press = input("Press key: ").lower()
if key_press == "i":
print("\nCurrent Selection : Issuing of Book\n")
myLMS.issue_books()
elif key_press == "a":
print("\nCurrent Selection : Adding a Book\n")
myLMS.add_books()
elif key_press == "d":
print("\nCurrent Selection : Displaying the Book\n")
myLMS.display_books()
elif key_press == "r":
print("\nCurrent Selection : Returning the Book\n")
myLMS.return_books()
elif key_press == "q":
break
else:
continue
except Exception as e:
print("Something went wrong. Please check your input !!!")
# l = LMS("List_of_books.txt", "Python's Library")
# print(l.display_books())
|
normal
|
{
"blob_id": "1a2616472c8d432c91e2b48260cbae61d3ecfd90",
"index": 1746,
"step-1": "<mask token>\n\n\nclass LMS:\n <mask token>\n <mask token>\n\n def issue_books(self):\n books_id = input(\"Enter book's ID: \")\n current_date = datetime.datetime.now().strftime('%d-%m-%Y %H:%M:%S')\n if books_id in self.books_dict.keys():\n if not self.books_dict[books_id]['Status'] == 'Available':\n print(\n f\"This book is already issued to {self.books_dict[books_id]['Lender_name']} on {self.books_dict[books_id]['Issue_date']}\"\n )\n return self.issue_books()\n elif self.books_dict[books_id]['Status'] == 'Available':\n your_name = input('Enter your name: ')\n self.books_dict[books_id]['Lender_name'] = your_name\n self.books_dict[books_id]['Issue_date'] = current_date\n self.books_dict[books_id]['Status'] = 'Already Issued'\n print('Books Issued Successfully !!!\\n')\n else:\n print('Book ID not found !!!')\n return self.issue_books()\n\n def add_books(self):\n new_books = input(\"Enter Book's Title: \")\n if new_books == '':\n return self.add_books()\n elif len(new_books) > 30:\n print(\n \"Length of Book's Title is too long !!! Title length must min 3 characters and max 25 characters...\"\n )\n return self.add_books\n else:\n with open(self.list_of_books, 'a') as bk:\n bk.writelines(f'{new_books}\\n')\n self.books_dict.update({str(int(max(self.books_dict)) + 1):\n {'Books_title': new_books, 'Lender_name': '',\n 'Issue_date': '', 'Status': 'Available'}})\n print(\n f\"This book '{new_books}' has been added successfully !!!\")\n\n def return_books(self):\n books_id = input('Enter books ID: ')\n if books_id in self.books_dict.keys():\n if self.books_dict[books_id]['Status'] == 'Available':\n print(\n 'This book is already available in library. Please check your Book ID !!!'\n )\n return self.return_books()\n elif not self.books_dict[books_id]['Status'] == 'Available':\n self.books_dict[books_id]['Lender_name'] = ''\n self.books_dict[books_id]['Issued_date'] = ''\n self.books_dict[books_id]['Status'] = 'Available'\n print('Successfully Updated !!!\\n')\n else:\n print('Book ID is not found')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass LMS:\n\n def __init__(self, list_of_books, library_name):\n self.list_of_books = 'List_of_books.txt'\n self.library_name = library_name\n self.books_dict = {}\n Id = 101\n with open(self.list_of_books) as bk:\n content = bk.readlines()\n for line in content:\n self.books_dict.update({str(Id): {'Books_title': line.replace(\n '\\n', ''), 'Lender_name': '', 'Issue_date': '', 'Status':\n 'Available'}})\n Id = Id + 1\n <mask token>\n\n def issue_books(self):\n books_id = input(\"Enter book's ID: \")\n current_date = datetime.datetime.now().strftime('%d-%m-%Y %H:%M:%S')\n if books_id in self.books_dict.keys():\n if not self.books_dict[books_id]['Status'] == 'Available':\n print(\n f\"This book is already issued to {self.books_dict[books_id]['Lender_name']} on {self.books_dict[books_id]['Issue_date']}\"\n )\n return self.issue_books()\n elif self.books_dict[books_id]['Status'] == 'Available':\n your_name = input('Enter your name: ')\n self.books_dict[books_id]['Lender_name'] = your_name\n self.books_dict[books_id]['Issue_date'] = current_date\n self.books_dict[books_id]['Status'] = 'Already Issued'\n print('Books Issued Successfully !!!\\n')\n else:\n print('Book ID not found !!!')\n return self.issue_books()\n\n def add_books(self):\n new_books = input(\"Enter Book's Title: \")\n if new_books == '':\n return self.add_books()\n elif len(new_books) > 30:\n print(\n \"Length of Book's Title is too long !!! Title length must min 3 characters and max 25 characters...\"\n )\n return self.add_books\n else:\n with open(self.list_of_books, 'a') as bk:\n bk.writelines(f'{new_books}\\n')\n self.books_dict.update({str(int(max(self.books_dict)) + 1):\n {'Books_title': new_books, 'Lender_name': '',\n 'Issue_date': '', 'Status': 'Available'}})\n print(\n f\"This book '{new_books}' has been added successfully !!!\")\n\n def return_books(self):\n books_id = input('Enter books ID: ')\n if books_id in self.books_dict.keys():\n if self.books_dict[books_id]['Status'] == 'Available':\n print(\n 'This book is already available in library. Please check your Book ID !!!'\n )\n return self.return_books()\n elif not self.books_dict[books_id]['Status'] == 'Available':\n self.books_dict[books_id]['Lender_name'] = ''\n self.books_dict[books_id]['Issued_date'] = ''\n self.books_dict[books_id]['Status'] = 'Available'\n print('Successfully Updated !!!\\n')\n else:\n print('Book ID is not found')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass LMS:\n\n def __init__(self, list_of_books, library_name):\n self.list_of_books = 'List_of_books.txt'\n self.library_name = library_name\n self.books_dict = {}\n Id = 101\n with open(self.list_of_books) as bk:\n content = bk.readlines()\n for line in content:\n self.books_dict.update({str(Id): {'Books_title': line.replace(\n '\\n', ''), 'Lender_name': '', 'Issue_date': '', 'Status':\n 'Available'}})\n Id = Id + 1\n\n def display_books(self):\n print('------------List of Books-----------')\n print('Book ID', '\\t', 'Title')\n print('------------------------------------')\n for key, value in self.books_dict.items():\n print(key, '\\t\\t', value.get('Books_title'), '- [', value.get(\n 'Status'), ']')\n\n def issue_books(self):\n books_id = input(\"Enter book's ID: \")\n current_date = datetime.datetime.now().strftime('%d-%m-%Y %H:%M:%S')\n if books_id in self.books_dict.keys():\n if not self.books_dict[books_id]['Status'] == 'Available':\n print(\n f\"This book is already issued to {self.books_dict[books_id]['Lender_name']} on {self.books_dict[books_id]['Issue_date']}\"\n )\n return self.issue_books()\n elif self.books_dict[books_id]['Status'] == 'Available':\n your_name = input('Enter your name: ')\n self.books_dict[books_id]['Lender_name'] = your_name\n self.books_dict[books_id]['Issue_date'] = current_date\n self.books_dict[books_id]['Status'] = 'Already Issued'\n print('Books Issued Successfully !!!\\n')\n else:\n print('Book ID not found !!!')\n return self.issue_books()\n\n def add_books(self):\n new_books = input(\"Enter Book's Title: \")\n if new_books == '':\n return self.add_books()\n elif len(new_books) > 30:\n print(\n \"Length of Book's Title is too long !!! Title length must min 3 characters and max 25 characters...\"\n )\n return self.add_books\n else:\n with open(self.list_of_books, 'a') as bk:\n bk.writelines(f'{new_books}\\n')\n self.books_dict.update({str(int(max(self.books_dict)) + 1):\n {'Books_title': new_books, 'Lender_name': '',\n 'Issue_date': '', 'Status': 'Available'}})\n print(\n f\"This book '{new_books}' has been added successfully !!!\")\n\n def return_books(self):\n books_id = input('Enter books ID: ')\n if books_id in self.books_dict.keys():\n if self.books_dict[books_id]['Status'] == 'Available':\n print(\n 'This book is already available in library. Please check your Book ID !!!'\n )\n return self.return_books()\n elif not self.books_dict[books_id]['Status'] == 'Available':\n self.books_dict[books_id]['Lender_name'] = ''\n self.books_dict[books_id]['Issued_date'] = ''\n self.books_dict[books_id]['Status'] = 'Available'\n print('Successfully Updated !!!\\n')\n else:\n print('Book ID is not found')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass LMS:\n\n def __init__(self, list_of_books, library_name):\n self.list_of_books = 'List_of_books.txt'\n self.library_name = library_name\n self.books_dict = {}\n Id = 101\n with open(self.list_of_books) as bk:\n content = bk.readlines()\n for line in content:\n self.books_dict.update({str(Id): {'Books_title': line.replace(\n '\\n', ''), 'Lender_name': '', 'Issue_date': '', 'Status':\n 'Available'}})\n Id = Id + 1\n\n def display_books(self):\n print('------------List of Books-----------')\n print('Book ID', '\\t', 'Title')\n print('------------------------------------')\n for key, value in self.books_dict.items():\n print(key, '\\t\\t', value.get('Books_title'), '- [', value.get(\n 'Status'), ']')\n\n def issue_books(self):\n books_id = input(\"Enter book's ID: \")\n current_date = datetime.datetime.now().strftime('%d-%m-%Y %H:%M:%S')\n if books_id in self.books_dict.keys():\n if not self.books_dict[books_id]['Status'] == 'Available':\n print(\n f\"This book is already issued to {self.books_dict[books_id]['Lender_name']} on {self.books_dict[books_id]['Issue_date']}\"\n )\n return self.issue_books()\n elif self.books_dict[books_id]['Status'] == 'Available':\n your_name = input('Enter your name: ')\n self.books_dict[books_id]['Lender_name'] = your_name\n self.books_dict[books_id]['Issue_date'] = current_date\n self.books_dict[books_id]['Status'] = 'Already Issued'\n print('Books Issued Successfully !!!\\n')\n else:\n print('Book ID not found !!!')\n return self.issue_books()\n\n def add_books(self):\n new_books = input(\"Enter Book's Title: \")\n if new_books == '':\n return self.add_books()\n elif len(new_books) > 30:\n print(\n \"Length of Book's Title is too long !!! Title length must min 3 characters and max 25 characters...\"\n )\n return self.add_books\n else:\n with open(self.list_of_books, 'a') as bk:\n bk.writelines(f'{new_books}\\n')\n self.books_dict.update({str(int(max(self.books_dict)) + 1):\n {'Books_title': new_books, 'Lender_name': '',\n 'Issue_date': '', 'Status': 'Available'}})\n print(\n f\"This book '{new_books}' has been added successfully !!!\")\n\n def return_books(self):\n books_id = input('Enter books ID: ')\n if books_id in self.books_dict.keys():\n if self.books_dict[books_id]['Status'] == 'Available':\n print(\n 'This book is already available in library. Please check your Book ID !!!'\n )\n return self.return_books()\n elif not self.books_dict[books_id]['Status'] == 'Available':\n self.books_dict[books_id]['Lender_name'] = ''\n self.books_dict[books_id]['Issued_date'] = ''\n self.books_dict[books_id]['Status'] = 'Available'\n print('Successfully Updated !!!\\n')\n else:\n print('Book ID is not found')\n\n\ntry:\n myLMS = LMS('list_of_books.txt', \"Satyam's\")\n press_key_list = {'D': 'Display Books', 'I': 'Issue Books', 'A':\n 'Add Books', 'R': 'Return Books', 'Q': 'Quit'}\n key_press = False\n while not key_press == 'q':\n print(\n f\"\"\"\n--------------- Welcome to {myLMS.library_name} Library Management System----------------\n\"\"\"\n )\n for key, value in press_key_list.items():\n print('Press', key, 'To', value)\n key_press = input('Press key: ').lower()\n if key_press == 'i':\n print('\\nCurrent Selection : Issuing of Book\\n')\n myLMS.issue_books()\n elif key_press == 'a':\n print('\\nCurrent Selection : Adding a Book\\n')\n myLMS.add_books()\n elif key_press == 'd':\n print('\\nCurrent Selection : Displaying the Book\\n')\n myLMS.display_books()\n elif key_press == 'r':\n print('\\nCurrent Selection : Returning the Book\\n')\n myLMS.return_books()\n elif key_press == 'q':\n break\n else:\n continue\nexcept Exception as e:\n print('Something went wrong. Please check your input !!!')\n",
"step-5": "import datetime\nimport os\n# os.getcwd()\n\nclass LMS: # This class is used to keep records of the books in the Library\n def __init__(self, list_of_books, library_name):\n self.list_of_books = \"List_of_books.txt\"\n self.library_name = library_name\n self.books_dict = {}\n Id = 101\n with open(self.list_of_books) as bk:\n content = bk.readlines()\n for line in content:\n # print(line)\n self.books_dict.update({str(Id):{'Books_title':line.replace(\"\\n\",\"\"), 'Lender_name':\"\", 'Issue_date':\"\", 'Status':\"Available\"}})\n Id = Id+1\n\n def display_books(self): #Display Book ID and Title of the Books\n print(\"------------List of Books-----------\")\n print(\"Book ID\", \"\\t\", \"Title\")\n print(\"------------------------------------\")\n for key, value in self.books_dict.items():\n print(key, \"\\t\\t\", value.get(\"Books_title\"), \"- [\", value.get(\"Status\"), \"]\")\n\n\n def issue_books(self): #Issues the book according to Book ID if available, if someone has already issued than it will show who and when the person has issued, and if someone give an unknown book is than it will show that this Book ID not found\n books_id = input(\"Enter book's ID: \")\n current_date = datetime.datetime.now().strftime(\"%d-%m-%Y %H:%M:%S\")\n\n if books_id in self.books_dict.keys():\n if not self.books_dict[books_id][\"Status\"] == \"Available\":\n print(f\"This book is already issued to {self.books_dict[books_id]['Lender_name']} on {self.books_dict[books_id]['Issue_date']}\")\n return self.issue_books()\n\n elif self.books_dict[books_id][\"Status\"] == \"Available\":\n your_name = input(\"Enter your name: \")\n self.books_dict[books_id][\"Lender_name\"] = your_name\n self.books_dict[books_id][\"Issue_date\"] = current_date\n self.books_dict[books_id][\"Status\"] = \"Already Issued\"\n print(\"Books Issued Successfully !!!\\n\")\n\n else:\n print(\"Book ID not found !!!\")\n return self.issue_books()\n\n \n def add_books(self): #To add New Books to the library\n new_books = input(\"Enter Book's Title: \")\n if new_books == \"\":\n return self.add_books()\n elif len(new_books)>30:\n print(\"Length of Book's Title is too long !!! Title length must min 3 characters and max 25 characters...\")\n return self.add_books\n else:\n with open(self.list_of_books, \"a\") as bk:\n bk.writelines(f\"{new_books}\\n\")\n self.books_dict.update({str(int(max(self.books_dict))+1):{'Books_title':new_books, 'Lender_name':\"\", 'Issue_date':\"\", 'Status':\"Available\"}})\n print(f\"This book '{new_books}' has been added successfully !!!\")\n\n\n def return_books(self): #To return a Book\n books_id = input(\"Enter books ID: \")\n if books_id in self.books_dict.keys():\n if self.books_dict[books_id][\"Status\"] == \"Available\":\n print(\"This book is already available in library. Please check your Book ID !!!\") \n return self.return_books()\n elif not self.books_dict[books_id][\"Status\"] == \"Available\":\n self.books_dict[books_id][\"Lender_name\"] = \"\"\n self.books_dict[books_id][\"Issued_date\"] = \"\"\n self.books_dict[books_id][\"Status\"] = \"Available\"\n print(\"Successfully Updated !!!\\n\")\n else:\n print(\"Book ID is not found\")\n\ntry:\n myLMS = LMS(\"list_of_books.txt\", \"Satyam's\")\n press_key_list = {\"D\":\"Display Books\", \"I\":\"Issue Books\", \"A\":\"Add Books\", \"R\":\"Return Books\", \"Q\":\"Quit\"}\n key_press = False\n while not (key_press == \"q\"):\n print(f\"\\n--------------- Welcome to {myLMS.library_name} Library Management System----------------\\n\")\n for key, value in press_key_list.items():\n print(\"Press\", key, \"To\", value)\n key_press = input(\"Press key: \").lower()\n if key_press == \"i\":\n print(\"\\nCurrent Selection : Issuing of Book\\n\")\n myLMS.issue_books()\n elif key_press == \"a\":\n print(\"\\nCurrent Selection : Adding a Book\\n\")\n myLMS.add_books()\n elif key_press == \"d\":\n print(\"\\nCurrent Selection : Displaying the Book\\n\")\n myLMS.display_books()\n elif key_press == \"r\":\n print(\"\\nCurrent Selection : Returning the Book\\n\")\n myLMS.return_books()\n elif key_press == \"q\":\n break\n else:\n continue\nexcept Exception as e:\n print(\"Something went wrong. Please check your input !!!\")\n\n\n# l = LMS(\"List_of_books.txt\", \"Python's Library\")\n# print(l.display_books())",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
<|reserved_special_token_0|>
class BinarySearchTree:
<|reserved_special_token_0|>
def __init__(self):
self._root = None
def __str__(self):
""" yield 迭代器 """
tree2list = [x.data for x in self._generate_node()]
return 'the BinarySearchTree is %s' % tree2list
def __bool__(self):
if self._root is not None:
return True
else:
return False
@staticmethod
def _redirect(pre_node, is_left, target):
"""
将target节点赋值成 pre_node的is_left/right子节点
:param is_left: 将target赋成父节点 pre_node 的 left 还是 right 子节点
"""
if is_left:
pre_node.left = target
else:
pre_node.right = target
def _generate_node(self):
queue = [self._root]
while queue:
node = queue.pop(0)
yield node
queue.extend([x for x in (node.left, node.right) if x != None])
<|reserved_special_token_0|>
def find(self, value):
"""暴露给外面的接口,按值查找,返回节点"""
result, *_ = self._metal_find(value, self._root)
return result
@check_null
def _insert(self, value, node):
"""
recursive insert method
:param node: 树中存在的某个节点
:return: node: 插入的节点node 这样其实插入的node(value) 是叶节点
"""
if node is None:
node = Node(value)
elif value < node.data:
node.left = self._insert(value, node.left)
elif value > node.data:
node.right = self._insert(value, node.right)
else:
print('have the same value')
return node
@check_null
def _insert2(self, value):
"""
Iterative insert method
先 _metal_find() 迭代找到 value, 找到 value说明已存在,没找到 _redirect() 新建节点
"""
result, pre_node, is_left = self._metal_find(value, self._root, False)
if result is None:
self._redirect(pre_node, is_left, Node(value))
else:
print('already have the value')
def insert(self, value, isrecursion=False):
if isrecursion:
self._insert(value, self._root)
else:
self._insert2(value)
@check_null
def _find_extremum(self, node, by='max'):
"""
找 max min 节点
:return node:
"""
if by == 'max':
while node.right:
node = node.right
elif by == 'min':
while node.left:
node = node.left
return node
def findmax(self):
return self._find_extremum(self._root)
<|reserved_special_token_0|>
@check_null
def _delete(self, value, node):
""" recursion delete
step1: 通过value 与 node.data比较来找到要删除的节点
step2: 要删除的节点又有三种situations
situation1: 要删除的节点 是叶节点,没有子节点。
situation2: 要删除的节点 只有一个子节点。
situation3: 要删除的节点 有两个子节点。
:return: 删除完value以后的新的node
"""
if not node:
print("can't find")
elif value < node.data:
node.left = self._delete(value, node.left)
elif value > node.data:
node.right = self._delete(value, node.right)
elif node.left and node.right:
tmp = self._find_extremum(node.left)
node.data = tmp.data
node.left = self._delete(tmp.data, node.left)
elif node.left is None:
node = node.right
else:
node = node.left
return node
@check_null
def _delete2(self, value, node):
"""非递归删除
首先: 找到要删除的节点result
再次: 找到并删除result的successor,再将successor的data赋给要删除的节点result
讨论复杂的2个节点的情况:
1 找到value所在的节点result,该节点有两个子节点
2 找到result的左子节点的max记为tmp,tmp只有0或1个节点
3 从result中删除tmp,tmp只有0或1个节点,
4 ...
"""
result, pre_node, is_left = self._metal_find(value, node)
if result is None:
return
if result.left and result.right:
tmp = self._find_extremum(result.left)
self._delete2(tmp.data, result)
result.data = tmp.data
else:
if result.left is None:
result = result.right
else:
result = result.left
self._redirect(pre_node, is_left, result)
def delete(self, value, isrecursion=False):
if isrecursion:
return self._delete(value, self._root)
else:
return self._delete2(value, self._root)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Node:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class BinarySearchTree:
"""
如果非空,那么左子树的所有节点都小于根节点,右子树的所有节点都大于根节点,数为二叉搜索树。
左右子树都为二叉搜索树。
"""
def __init__(self):
self._root = None
def __str__(self):
""" yield 迭代器 """
tree2list = [x.data for x in self._generate_node()]
return 'the BinarySearchTree is %s' % tree2list
def __bool__(self):
if self._root is not None:
return True
else:
return False
@staticmethod
def _redirect(pre_node, is_left, target):
"""
将target节点赋值成 pre_node的is_left/right子节点
:param is_left: 将target赋成父节点 pre_node 的 left 还是 right 子节点
"""
if is_left:
pre_node.left = target
else:
pre_node.right = target
def _generate_node(self):
queue = [self._root]
while queue:
node = queue.pop(0)
yield node
queue.extend([x for x in (node.left, node.right) if x != None])
@check_null
def _metal_find(self, value, node, alert=True):
"""
内部接口: 实现了基本的查找功能,并且实现了跟踪父节点和判断是否为左右子节点的功能
思 路: 比较简单
:param value:
:param node:
:param alert:
:return: node, _pre_node, is_left
找到的node, 该节点的父节点_pre_node, 该节点是_pre_node的左还是右节点bool(is_left)
"""
is_left, _pre_node = None, None
while node and value != node.data:
_pre_node = node
if value < node.data:
node = node.left
is_left = True
elif value > node.data:
node = node.right
is_left = False
if alert and node is None:
print('There is no node<%s>' % value)
return node, _pre_node, is_left
def find(self, value):
"""暴露给外面的接口,按值查找,返回节点"""
result, *_ = self._metal_find(value, self._root)
return result
@check_null
def _insert(self, value, node):
"""
recursive insert method
:param node: 树中存在的某个节点
:return: node: 插入的节点node 这样其实插入的node(value) 是叶节点
"""
if node is None:
node = Node(value)
elif value < node.data:
node.left = self._insert(value, node.left)
elif value > node.data:
node.right = self._insert(value, node.right)
else:
print('have the same value')
return node
@check_null
def _insert2(self, value):
"""
Iterative insert method
先 _metal_find() 迭代找到 value, 找到 value说明已存在,没找到 _redirect() 新建节点
"""
result, pre_node, is_left = self._metal_find(value, self._root, False)
if result is None:
self._redirect(pre_node, is_left, Node(value))
else:
print('already have the value')
def insert(self, value, isrecursion=False):
if isrecursion:
self._insert(value, self._root)
else:
self._insert2(value)
@check_null
def _find_extremum(self, node, by='max'):
"""
找 max min 节点
:return node:
"""
if by == 'max':
while node.right:
node = node.right
elif by == 'min':
while node.left:
node = node.left
return node
def findmax(self):
return self._find_extremum(self._root)
def findmin(self):
return self._find_extremum(self._root, by='min')
@check_null
def _delete(self, value, node):
""" recursion delete
step1: 通过value 与 node.data比较来找到要删除的节点
step2: 要删除的节点又有三种situations
situation1: 要删除的节点 是叶节点,没有子节点。
situation2: 要删除的节点 只有一个子节点。
situation3: 要删除的节点 有两个子节点。
:return: 删除完value以后的新的node
"""
if not node:
print("can't find")
elif value < node.data:
node.left = self._delete(value, node.left)
elif value > node.data:
node.right = self._delete(value, node.right)
elif node.left and node.right:
tmp = self._find_extremum(node.left)
node.data = tmp.data
node.left = self._delete(tmp.data, node.left)
elif node.left is None:
node = node.right
else:
node = node.left
return node
@check_null
def _delete2(self, value, node):
"""非递归删除
首先: 找到要删除的节点result
再次: 找到并删除result的successor,再将successor的data赋给要删除的节点result
讨论复杂的2个节点的情况:
1 找到value所在的节点result,该节点有两个子节点
2 找到result的左子节点的max记为tmp,tmp只有0或1个节点
3 从result中删除tmp,tmp只有0或1个节点,
4 ...
"""
result, pre_node, is_left = self._metal_find(value, node)
if result is None:
return
if result.left and result.right:
tmp = self._find_extremum(result.left)
self._delete2(tmp.data, result)
result.data = tmp.data
else:
if result.left is None:
result = result.right
else:
result = result.left
self._redirect(pre_node, is_left, result)
def delete(self, value, isrecursion=False):
if isrecursion:
return self._delete(value, self._root)
else:
return self._delete2(value, self._root)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Node:
def __init__(self, data=None):
self._data = data
self._left, self._right = None, None
def __str__(self):
return 'Node:<data:%s>, <left:%s>, <right:%s>' % (str(self._data),
str(self._left), str(self._right))
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
<|reserved_special_token_0|>
@left.setter
def left(self, value):
self._left = value
@property
def right(self):
return self._right
@right.setter
def right(self, value):
self._right = value
<|reserved_special_token_0|>
class BinarySearchTree:
"""
如果非空,那么左子树的所有节点都小于根节点,右子树的所有节点都大于根节点,数为二叉搜索树。
左右子树都为二叉搜索树。
"""
def __init__(self):
self._root = None
def __str__(self):
""" yield 迭代器 """
tree2list = [x.data for x in self._generate_node()]
return 'the BinarySearchTree is %s' % tree2list
def __bool__(self):
if self._root is not None:
return True
else:
return False
@staticmethod
def _redirect(pre_node, is_left, target):
"""
将target节点赋值成 pre_node的is_left/right子节点
:param is_left: 将target赋成父节点 pre_node 的 left 还是 right 子节点
"""
if is_left:
pre_node.left = target
else:
pre_node.right = target
def _generate_node(self):
queue = [self._root]
while queue:
node = queue.pop(0)
yield node
queue.extend([x for x in (node.left, node.right) if x != None])
@check_null
def _metal_find(self, value, node, alert=True):
"""
内部接口: 实现了基本的查找功能,并且实现了跟踪父节点和判断是否为左右子节点的功能
思 路: 比较简单
:param value:
:param node:
:param alert:
:return: node, _pre_node, is_left
找到的node, 该节点的父节点_pre_node, 该节点是_pre_node的左还是右节点bool(is_left)
"""
is_left, _pre_node = None, None
while node and value != node.data:
_pre_node = node
if value < node.data:
node = node.left
is_left = True
elif value > node.data:
node = node.right
is_left = False
if alert and node is None:
print('There is no node<%s>' % value)
return node, _pre_node, is_left
def find(self, value):
"""暴露给外面的接口,按值查找,返回节点"""
result, *_ = self._metal_find(value, self._root)
return result
@check_null
def _insert(self, value, node):
"""
recursive insert method
:param node: 树中存在的某个节点
:return: node: 插入的节点node 这样其实插入的node(value) 是叶节点
"""
if node is None:
node = Node(value)
elif value < node.data:
node.left = self._insert(value, node.left)
elif value > node.data:
node.right = self._insert(value, node.right)
else:
print('have the same value')
return node
@check_null
def _insert2(self, value):
"""
Iterative insert method
先 _metal_find() 迭代找到 value, 找到 value说明已存在,没找到 _redirect() 新建节点
"""
result, pre_node, is_left = self._metal_find(value, self._root, False)
if result is None:
self._redirect(pre_node, is_left, Node(value))
else:
print('already have the value')
def insert(self, value, isrecursion=False):
if isrecursion:
self._insert(value, self._root)
else:
self._insert2(value)
@check_null
def _find_extremum(self, node, by='max'):
"""
找 max min 节点
:return node:
"""
if by == 'max':
while node.right:
node = node.right
elif by == 'min':
while node.left:
node = node.left
return node
def findmax(self):
return self._find_extremum(self._root)
def findmin(self):
return self._find_extremum(self._root, by='min')
@check_null
def _delete(self, value, node):
""" recursion delete
step1: 通过value 与 node.data比较来找到要删除的节点
step2: 要删除的节点又有三种situations
situation1: 要删除的节点 是叶节点,没有子节点。
situation2: 要删除的节点 只有一个子节点。
situation3: 要删除的节点 有两个子节点。
:return: 删除完value以后的新的node
"""
if not node:
print("can't find")
elif value < node.data:
node.left = self._delete(value, node.left)
elif value > node.data:
node.right = self._delete(value, node.right)
elif node.left and node.right:
tmp = self._find_extremum(node.left)
node.data = tmp.data
node.left = self._delete(tmp.data, node.left)
elif node.left is None:
node = node.right
else:
node = node.left
return node
@check_null
def _delete2(self, value, node):
"""非递归删除
首先: 找到要删除的节点result
再次: 找到并删除result的successor,再将successor的data赋给要删除的节点result
讨论复杂的2个节点的情况:
1 找到value所在的节点result,该节点有两个子节点
2 找到result的左子节点的max记为tmp,tmp只有0或1个节点
3 从result中删除tmp,tmp只有0或1个节点,
4 ...
"""
result, pre_node, is_left = self._metal_find(value, node)
if result is None:
return
if result.left and result.right:
tmp = self._find_extremum(result.left)
self._delete2(tmp.data, result)
result.data = tmp.data
else:
if result.left is None:
result = result.right
else:
result = result.left
self._redirect(pre_node, is_left, result)
def delete(self, value, isrecursion=False):
if isrecursion:
return self._delete(value, self._root)
else:
return self._delete2(value, self._root)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logging.basicConfig(level=logging.ERROR, format=
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
<|reserved_special_token_0|>
class Node:
def __init__(self, data=None):
self._data = data
self._left, self._right = None, None
def __str__(self):
return 'Node:<data:%s>, <left:%s>, <right:%s>' % (str(self._data),
str(self._left), str(self._right))
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def left(self):
return self._left
@left.setter
def left(self, value):
self._left = value
@property
def right(self):
return self._right
@right.setter
def right(self, value):
self._right = value
def check_null(func):
@functools.wraps(func)
def wrapper(self, *args, **kw):
if self.__bool__():
return func(self, *args, **kw)
elif func.__name__ in ['_insert', '_insert2']:
self._root = Node(args[0])
else:
print('The tree is empty')
return wrapper
class BinarySearchTree:
"""
如果非空,那么左子树的所有节点都小于根节点,右子树的所有节点都大于根节点,数为二叉搜索树。
左右子树都为二叉搜索树。
"""
def __init__(self):
self._root = None
def __str__(self):
""" yield 迭代器 """
tree2list = [x.data for x in self._generate_node()]
return 'the BinarySearchTree is %s' % tree2list
def __bool__(self):
if self._root is not None:
return True
else:
return False
@staticmethod
def _redirect(pre_node, is_left, target):
"""
将target节点赋值成 pre_node的is_left/right子节点
:param is_left: 将target赋成父节点 pre_node 的 left 还是 right 子节点
"""
if is_left:
pre_node.left = target
else:
pre_node.right = target
def _generate_node(self):
queue = [self._root]
while queue:
node = queue.pop(0)
yield node
queue.extend([x for x in (node.left, node.right) if x != None])
@check_null
def _metal_find(self, value, node, alert=True):
"""
内部接口: 实现了基本的查找功能,并且实现了跟踪父节点和判断是否为左右子节点的功能
思 路: 比较简单
:param value:
:param node:
:param alert:
:return: node, _pre_node, is_left
找到的node, 该节点的父节点_pre_node, 该节点是_pre_node的左还是右节点bool(is_left)
"""
is_left, _pre_node = None, None
while node and value != node.data:
_pre_node = node
if value < node.data:
node = node.left
is_left = True
elif value > node.data:
node = node.right
is_left = False
if alert and node is None:
print('There is no node<%s>' % value)
return node, _pre_node, is_left
def find(self, value):
"""暴露给外面的接口,按值查找,返回节点"""
result, *_ = self._metal_find(value, self._root)
return result
@check_null
def _insert(self, value, node):
"""
recursive insert method
:param node: 树中存在的某个节点
:return: node: 插入的节点node 这样其实插入的node(value) 是叶节点
"""
if node is None:
node = Node(value)
elif value < node.data:
node.left = self._insert(value, node.left)
elif value > node.data:
node.right = self._insert(value, node.right)
else:
print('have the same value')
return node
@check_null
def _insert2(self, value):
"""
Iterative insert method
先 _metal_find() 迭代找到 value, 找到 value说明已存在,没找到 _redirect() 新建节点
"""
result, pre_node, is_left = self._metal_find(value, self._root, False)
if result is None:
self._redirect(pre_node, is_left, Node(value))
else:
print('already have the value')
def insert(self, value, isrecursion=False):
if isrecursion:
self._insert(value, self._root)
else:
self._insert2(value)
@check_null
def _find_extremum(self, node, by='max'):
"""
找 max min 节点
:return node:
"""
if by == 'max':
while node.right:
node = node.right
elif by == 'min':
while node.left:
node = node.left
return node
def findmax(self):
return self._find_extremum(self._root)
def findmin(self):
return self._find_extremum(self._root, by='min')
@check_null
def _delete(self, value, node):
""" recursion delete
step1: 通过value 与 node.data比较来找到要删除的节点
step2: 要删除的节点又有三种situations
situation1: 要删除的节点 是叶节点,没有子节点。
situation2: 要删除的节点 只有一个子节点。
situation3: 要删除的节点 有两个子节点。
:return: 删除完value以后的新的node
"""
if not node:
print("can't find")
elif value < node.data:
node.left = self._delete(value, node.left)
elif value > node.data:
node.right = self._delete(value, node.right)
elif node.left and node.right:
tmp = self._find_extremum(node.left)
node.data = tmp.data
node.left = self._delete(tmp.data, node.left)
elif node.left is None:
node = node.right
else:
node = node.left
return node
@check_null
def _delete2(self, value, node):
"""非递归删除
首先: 找到要删除的节点result
再次: 找到并删除result的successor,再将successor的data赋给要删除的节点result
讨论复杂的2个节点的情况:
1 找到value所在的节点result,该节点有两个子节点
2 找到result的左子节点的max记为tmp,tmp只有0或1个节点
3 从result中删除tmp,tmp只有0或1个节点,
4 ...
"""
result, pre_node, is_left = self._metal_find(value, node)
if result is None:
return
if result.left and result.right:
tmp = self._find_extremum(result.left)
self._delete2(tmp.data, result)
result.data = tmp.data
else:
if result.left is None:
result = result.right
else:
result = result.left
self._redirect(pre_node, is_left, result)
def delete(self, value, isrecursion=False):
if isrecursion:
return self._delete(value, self._root)
else:
return self._delete2(value, self._root)
def test_insert(value):
def _test(value, control=False):
tree = BinarySearchTree()
start = time.time()
for i in range(value):
tree.insert(i, isrecursion=control)
end = time.time()
print('the isrecursion control=%s, the time is: %s' % (control, end -
start))
_test(value)
_test(value, control=True)
def main():
tree = BinarySearchTree()
nums = [7, 2, 9, 1, 4, 8, 10]
for i in nums:
tree.insert(i)
print(tree)
print(tree.find(4))
tree.insert(3)
print(tree)
tree.delete(2)
print(tree)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
# 来源知乎 https://zhuanlan.zhihu.com/p/51987247
# 博客 https://www.cnblogs.com/yangecnu/p/Introduce-Binary-Search-Tree.html
"""
二叉查找树 (Binary Search Tree, BST)
特点 : left < root < right
若任意节点的左子树不空,则左子树上所有结点的 值均小于它的根结点的值;
若任意节点的右子树不空,则右子树上所有结点的值均大于它的根结点的值;
任意节点的左、右子树也分别为二叉查找树;
没有键值相等的节点(no duplicate nodes)。
缺点: 不平衡 所以引入平衡二叉树(常用实现方法有红黑树、AVL、替罪羊树、Treap、伸展树等)
本代码实现了 BST
查找 : 任意值 / 最大值 / 最小值 (查找所需最大次数等于高度)
插入 (递归 迭代) : 插入结果一定是插成叶节点了
删除 (递归 迭代): 当删除的节点没有子节点时 当删除的节点只有1个子节点时 当删除的节点有2个子节点时
"""
import logging
import functools
import time
logging.basicConfig(
level=logging.ERROR,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class Node():
def __init__(self, data=None):
self._data = data
self._left, self._right = None, None
def __str__(self):
return 'Node:<data:%s>, <left:%s>, <right:%s>' % (
str(self._data), str(self._left), str(self._right))
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def left(self):
return self._left
@left.setter
def left(self, value):
self._left = value
@property
def right(self):
return self._right
@right.setter
def right(self, value):
self._right = value
def check_null(func):
@functools.wraps(func)
def wrapper(self, *args, **kw):
if self.__bool__(): # check if the BinarySearchTree() object is None
return func(self, *args, **kw)
else:
if func.__name__ in ['_insert', '_insert2']:
self._root = Node(args[0])
else:
print('The tree is empty')
return wrapper
# class Ad():
# def nam(self):
# pass
#
# print(Ad().nam.__name__)
# # nam
class BinarySearchTree():
"""
如果非空,那么左子树的所有节点都小于根节点,右子树的所有节点都大于根节点,数为二叉搜索树。
左右子树都为二叉搜索树。
"""
def __init__(self):
self._root = None
def __str__(self):
""" yield 迭代器 """
tree2list = [x.data for x in self._generate_node()]
return 'the BinarySearchTree is %s' % tree2list
def __bool__(self):
if self._root is not None:
return True
else:
return False
@staticmethod
def _redirect(pre_node, is_left, target): # staticmethod no need pass self 与类对象无关
"""
将target节点赋值成 pre_node的is_left/right子节点
:param is_left: 将target赋成父节点 pre_node 的 left 还是 right 子节点
"""
if is_left:
pre_node.left = target
else:
pre_node.right = target
def _generate_node(self):
queue = [self._root]
while queue:
node = queue.pop(0)
yield node
queue.extend([x for x in (node.left, node.right) if x != None])
# (node.left, node.right) is tuple
@check_null
def _metal_find(self, value, node, alert=True):
"""
内部接口: 实现了基本的查找功能,并且实现了跟踪父节点和判断是否为左右子节点的功能
思 路: 比较简单
:param value:
:param node:
:param alert:
:return: node, _pre_node, is_left
找到的node, 该节点的父节点_pre_node, 该节点是_pre_node的左还是右节点bool(is_left)
"""
# if you want the pre_node and is_left get the specific value, let the node=root
is_left, _pre_node = None, None
while node and value != node.data:
# _pre_node 作用跟踪父节点
_pre_node = node
if value < node.data:
node = node.left
# is_left 作用跟踪是否为左子节点
is_left = True
elif value > node.data:
node = node.right
is_left = False
# while 循环完没找到,则node is None
# while 循环完找到的话,则node is not None 跳过if,return 找到的node
if alert and node is None: # alert and (node is None)
print('There is no node<%s>' % value)
return node, _pre_node, is_left
def find(self, value):
"""暴露给外面的接口,按值查找,返回节点"""
# *_ 除第一个外的其他返回值
result, *_ = self._metal_find(value, self._root)
return result
@check_null
def _insert(self, value, node): # node 实际往往是root
"""
recursive insert method
:param node: 树中存在的某个节点
:return: node: 插入的节点node 这样其实插入的node(value) 是叶节点
"""
# _insert函数最终结果是
# 1 找到value==node.data的节点即已有这个节点,执行print(),再返回这个节点
# 2 node is None,然后将此节点新建出来,执行node = Node(value)
if node is None:
node = Node(value)
else:
if value < node.data:
# _insert()返回待插入的节点 当前节点的左子节点 指向待插入的节点
node.left = self._insert(value, node.left)
elif value > node.data:
# _insert()返回待插入的节点 当前节点的右子节点 指向待插入的节点
node.right = self._insert(value, node.right)
else:
print('have the same value')
return node # 注意将node返回
@check_null
def _insert2(self, value):
"""
Iterative insert method
先 _metal_find() 迭代找到 value, 找到 value说明已存在,没找到 _redirect() 新建节点
"""
result, pre_node, is_left = self._metal_find(value, self._root, False) # 查找
if result is None: # 没找到通过self._redirect() 赋值
self._redirect(pre_node, is_left, Node(value))
else: # 找到说明已经存在
print('already have the value')
# 默认走循环的实现, 递归的程序栈很容易爆掉,并且test_insert()测试了下循环比递归快很多
def insert(self, value, isrecursion=False):
if isrecursion:
self._insert(value, self._root)
else:
self._insert2(value)
@check_null
def _find_extremum(self, node, by='max'):
"""
找 max min 节点
:return node:
"""
if by == 'max':
while node.right:
node = node.right
elif by == 'min':
while node.left:
node = node.left
return node
def findmax(self):
return self._find_extremum(self._root)
def findmin(self):
return self._find_extremum(self._root, by='min')
@check_null
def _delete(self, value, node):
""" recursion delete
step1: 通过value 与 node.data比较来找到要删除的节点
step2: 要删除的节点又有三种situations
situation1: 要删除的节点 是叶节点,没有子节点。
situation2: 要删除的节点 只有一个子节点。
situation3: 要删除的节点 有两个子节点。
:return: 删除完value以后的新的node
"""
if not node:
print('can\'t find')
else: # step1
# If the key to be deleted is smaller than the root's
# key then it lies in left subtree
if value < node.data:
node.left = self._delete(value, node.left)
# If the kye to be delete is greater than the root's key
# then it lies in right subtree
elif value > node.data:
node.right = self._delete(value, node.right)
# If key is same as root's key, then this is the node
# to be deleted
else: # step2
# Node with two children: Get the inorder successor 中序继承者
# 最后node.left = self._delete(tmp.data, node.left)其实转化成了
# 后边 Node with only one child or no child 的情形
### 可以找左子树的最大值或者右子树的最小值作为successor
### 而左子树的最大值或者右子树的最小值必然只有一个或零个节点
### 所以转化成了前边 Node with only one child or no child 的情形
if node.left and node.right:
# find the largest in the left subtree as successor
tmp = self._find_extremum(node.left) # default by max
# Copy the inorder successor's content to this node
node.data = tmp.data
# Delete the inorder successor
node.left = self._delete(tmp.data, node.left)
# Node with only one child or no child
else:
if node.left is None:
node = node.right
else:
node = node.left
return node # 最后层层返回
@check_null
def _delete2(self, value, node):
"""非递归删除
首先: 找到要删除的节点result
再次: 找到并删除result的successor,再将successor的data赋给要删除的节点result
讨论复杂的2个节点的情况:
1 找到value所在的节点result,该节点有两个子节点
2 找到result的左子节点的max记为tmp,tmp只有0或1个节点
3 从result中删除tmp,tmp只有0或1个节点,
4 ...
"""
# 首先: 找到要删除的节点result
result, pre_node, is_left = self._metal_find(value, node)
if result is None:
return
# 有2个节点的情况
if result.left and result.right:
tmp = self._find_extremum(result.left) # 再次: 找到result的successor
self._delete2(tmp.data, result) # 再次: 删除result的successor 这步会走后边else里 "# 有1个或者没有" 的情形
result.data = tmp.data # 再将successor的data赋给要删除的节点result
# 有1个或者没有
else:
if result.left is None:
# print('---')
# print(id(result),id(result.right)) # 46446408 1352705168
result = result.right
# print(id(result)) # 1352705168
else:
result = result.left
# 将 result 赋成 pre_node 的 is_left节点 维护
self._redirect(pre_node, is_left, result) # 对节点pre_node的子节点进行赋值
def delete(self, value, isrecursion=False):
if isrecursion:
return self._delete(value, self._root)
else:
return self._delete2(value, self._root)
def test_insert(value):
def _test(value, control=False):
tree = BinarySearchTree()
start = time.time()
for i in range(value):
tree.insert(i, isrecursion=control)
end = time.time()
print('the isrecursion control=%s, the time is: %s' % (control, end - start))
_test(value)
_test(value, control=True)
def main():
# test_insert(100)
tree = BinarySearchTree()
nums = [7, 2, 9, 1, 4, 8, 10]
for i in nums:
tree.insert(i)
print(tree)
print(tree.find(4))
tree.insert(3)
print(tree)
tree.delete(2)
print(tree)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "fa46bd784dcfeee4f9012ffb6ab6731d2764c9fa",
"index": 8484,
"step-1": "<mask token>\n\n\nclass BinarySearchTree:\n <mask token>\n\n def __init__(self):\n self._root = None\n\n def __str__(self):\n \"\"\" yield 迭代器 \"\"\"\n tree2list = [x.data for x in self._generate_node()]\n return 'the BinarySearchTree is %s' % tree2list\n\n def __bool__(self):\n if self._root is not None:\n return True\n else:\n return False\n\n @staticmethod\n def _redirect(pre_node, is_left, target):\n \"\"\"\n 将target节点赋值成 pre_node的is_left/right子节点\n :param is_left: 将target赋成父节点 pre_node 的 left 还是 right 子节点\n \"\"\"\n if is_left:\n pre_node.left = target\n else:\n pre_node.right = target\n\n def _generate_node(self):\n queue = [self._root]\n while queue:\n node = queue.pop(0)\n yield node\n queue.extend([x for x in (node.left, node.right) if x != None])\n <mask token>\n\n def find(self, value):\n \"\"\"暴露给外面的接口,按值查找,返回节点\"\"\"\n result, *_ = self._metal_find(value, self._root)\n return result\n\n @check_null\n def _insert(self, value, node):\n \"\"\"\n recursive insert method\n :param node: 树中存在的某个节点\n :return: node: 插入的节点node 这样其实插入的node(value) 是叶节点\n \"\"\"\n if node is None:\n node = Node(value)\n elif value < node.data:\n node.left = self._insert(value, node.left)\n elif value > node.data:\n node.right = self._insert(value, node.right)\n else:\n print('have the same value')\n return node\n\n @check_null\n def _insert2(self, value):\n \"\"\"\n Iterative insert method\n 先 _metal_find() 迭代找到 value, 找到 value说明已存在,没找到 _redirect() 新建节点\n \"\"\"\n result, pre_node, is_left = self._metal_find(value, self._root, False)\n if result is None:\n self._redirect(pre_node, is_left, Node(value))\n else:\n print('already have the value')\n\n def insert(self, value, isrecursion=False):\n if isrecursion:\n self._insert(value, self._root)\n else:\n self._insert2(value)\n\n @check_null\n def _find_extremum(self, node, by='max'):\n \"\"\"\n 找 max min 节点\n :return node:\n \"\"\"\n if by == 'max':\n while node.right:\n node = node.right\n elif by == 'min':\n while node.left:\n node = node.left\n return node\n\n def findmax(self):\n return self._find_extremum(self._root)\n <mask token>\n\n @check_null\n def _delete(self, value, node):\n \"\"\" recursion delete\n step1: 通过value 与 node.data比较来找到要删除的节点\n step2: 要删除的节点又有三种situations\n situation1: 要删除的节点 是叶节点,没有子节点。\n situation2: 要删除的节点 只有一个子节点。\n situation3: 要删除的节点 有两个子节点。\n :return: 删除完value以后的新的node\n \"\"\"\n if not node:\n print(\"can't find\")\n elif value < node.data:\n node.left = self._delete(value, node.left)\n elif value > node.data:\n node.right = self._delete(value, node.right)\n elif node.left and node.right:\n tmp = self._find_extremum(node.left)\n node.data = tmp.data\n node.left = self._delete(tmp.data, node.left)\n elif node.left is None:\n node = node.right\n else:\n node = node.left\n return node\n\n @check_null\n def _delete2(self, value, node):\n \"\"\"非递归删除\n 首先: 找到要删除的节点result\n 再次: 找到并删除result的successor,再将successor的data赋给要删除的节点result\n 讨论复杂的2个节点的情况:\n 1 找到value所在的节点result,该节点有两个子节点\n 2 找到result的左子节点的max记为tmp,tmp只有0或1个节点\n 3 从result中删除tmp,tmp只有0或1个节点,\n 4 ...\n \"\"\"\n result, pre_node, is_left = self._metal_find(value, node)\n if result is None:\n return\n if result.left and result.right:\n tmp = self._find_extremum(result.left)\n self._delete2(tmp.data, result)\n result.data = tmp.data\n else:\n if result.left is None:\n result = result.right\n else:\n result = result.left\n self._redirect(pre_node, is_left, result)\n\n def delete(self, value, isrecursion=False):\n if isrecursion:\n return self._delete(value, self._root)\n else:\n return self._delete2(value, self._root)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Node:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass BinarySearchTree:\n \"\"\"\n 如果非空,那么左子树的所有节点都小于根节点,右子树的所有节点都大于根节点,数为二叉搜索树。\n 左右子树都为二叉搜索树。\n \"\"\"\n\n def __init__(self):\n self._root = None\n\n def __str__(self):\n \"\"\" yield 迭代器 \"\"\"\n tree2list = [x.data for x in self._generate_node()]\n return 'the BinarySearchTree is %s' % tree2list\n\n def __bool__(self):\n if self._root is not None:\n return True\n else:\n return False\n\n @staticmethod\n def _redirect(pre_node, is_left, target):\n \"\"\"\n 将target节点赋值成 pre_node的is_left/right子节点\n :param is_left: 将target赋成父节点 pre_node 的 left 还是 right 子节点\n \"\"\"\n if is_left:\n pre_node.left = target\n else:\n pre_node.right = target\n\n def _generate_node(self):\n queue = [self._root]\n while queue:\n node = queue.pop(0)\n yield node\n queue.extend([x for x in (node.left, node.right) if x != None])\n\n @check_null\n def _metal_find(self, value, node, alert=True):\n \"\"\"\n 内部接口: 实现了基本的查找功能,并且实现了跟踪父节点和判断是否为左右子节点的功能\n 思 路: 比较简单\n :param value:\n :param node:\n :param alert:\n :return: node, _pre_node, is_left\n 找到的node, 该节点的父节点_pre_node, 该节点是_pre_node的左还是右节点bool(is_left)\n \"\"\"\n is_left, _pre_node = None, None\n while node and value != node.data:\n _pre_node = node\n if value < node.data:\n node = node.left\n is_left = True\n elif value > node.data:\n node = node.right\n is_left = False\n if alert and node is None:\n print('There is no node<%s>' % value)\n return node, _pre_node, is_left\n\n def find(self, value):\n \"\"\"暴露给外面的接口,按值查找,返回节点\"\"\"\n result, *_ = self._metal_find(value, self._root)\n return result\n\n @check_null\n def _insert(self, value, node):\n \"\"\"\n recursive insert method\n :param node: 树中存在的某个节点\n :return: node: 插入的节点node 这样其实插入的node(value) 是叶节点\n \"\"\"\n if node is None:\n node = Node(value)\n elif value < node.data:\n node.left = self._insert(value, node.left)\n elif value > node.data:\n node.right = self._insert(value, node.right)\n else:\n print('have the same value')\n return node\n\n @check_null\n def _insert2(self, value):\n \"\"\"\n Iterative insert method\n 先 _metal_find() 迭代找到 value, 找到 value说明已存在,没找到 _redirect() 新建节点\n \"\"\"\n result, pre_node, is_left = self._metal_find(value, self._root, False)\n if result is None:\n self._redirect(pre_node, is_left, Node(value))\n else:\n print('already have the value')\n\n def insert(self, value, isrecursion=False):\n if isrecursion:\n self._insert(value, self._root)\n else:\n self._insert2(value)\n\n @check_null\n def _find_extremum(self, node, by='max'):\n \"\"\"\n 找 max min 节点\n :return node:\n \"\"\"\n if by == 'max':\n while node.right:\n node = node.right\n elif by == 'min':\n while node.left:\n node = node.left\n return node\n\n def findmax(self):\n return self._find_extremum(self._root)\n\n def findmin(self):\n return self._find_extremum(self._root, by='min')\n\n @check_null\n def _delete(self, value, node):\n \"\"\" recursion delete\n step1: 通过value 与 node.data比较来找到要删除的节点\n step2: 要删除的节点又有三种situations\n situation1: 要删除的节点 是叶节点,没有子节点。\n situation2: 要删除的节点 只有一个子节点。\n situation3: 要删除的节点 有两个子节点。\n :return: 删除完value以后的新的node\n \"\"\"\n if not node:\n print(\"can't find\")\n elif value < node.data:\n node.left = self._delete(value, node.left)\n elif value > node.data:\n node.right = self._delete(value, node.right)\n elif node.left and node.right:\n tmp = self._find_extremum(node.left)\n node.data = tmp.data\n node.left = self._delete(tmp.data, node.left)\n elif node.left is None:\n node = node.right\n else:\n node = node.left\n return node\n\n @check_null\n def _delete2(self, value, node):\n \"\"\"非递归删除\n 首先: 找到要删除的节点result\n 再次: 找到并删除result的successor,再将successor的data赋给要删除的节点result\n 讨论复杂的2个节点的情况:\n 1 找到value所在的节点result,该节点有两个子节点\n 2 找到result的左子节点的max记为tmp,tmp只有0或1个节点\n 3 从result中删除tmp,tmp只有0或1个节点,\n 4 ...\n \"\"\"\n result, pre_node, is_left = self._metal_find(value, node)\n if result is None:\n return\n if result.left and result.right:\n tmp = self._find_extremum(result.left)\n self._delete2(tmp.data, result)\n result.data = tmp.data\n else:\n if result.left is None:\n result = result.right\n else:\n result = result.left\n self._redirect(pre_node, is_left, result)\n\n def delete(self, value, isrecursion=False):\n if isrecursion:\n return self._delete(value, self._root)\n else:\n return self._delete2(value, self._root)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Node:\n\n def __init__(self, data=None):\n self._data = data\n self._left, self._right = None, None\n\n def __str__(self):\n return 'Node:<data:%s>, <left:%s>, <right:%s>' % (str(self._data),\n str(self._left), str(self._right))\n\n @property\n def data(self):\n return self._data\n\n @data.setter\n def data(self, value):\n self._data = value\n <mask token>\n\n @left.setter\n def left(self, value):\n self._left = value\n\n @property\n def right(self):\n return self._right\n\n @right.setter\n def right(self, value):\n self._right = value\n\n\n<mask token>\n\n\nclass BinarySearchTree:\n \"\"\"\n 如果非空,那么左子树的所有节点都小于根节点,右子树的所有节点都大于根节点,数为二叉搜索树。\n 左右子树都为二叉搜索树。\n \"\"\"\n\n def __init__(self):\n self._root = None\n\n def __str__(self):\n \"\"\" yield 迭代器 \"\"\"\n tree2list = [x.data for x in self._generate_node()]\n return 'the BinarySearchTree is %s' % tree2list\n\n def __bool__(self):\n if self._root is not None:\n return True\n else:\n return False\n\n @staticmethod\n def _redirect(pre_node, is_left, target):\n \"\"\"\n 将target节点赋值成 pre_node的is_left/right子节点\n :param is_left: 将target赋成父节点 pre_node 的 left 还是 right 子节点\n \"\"\"\n if is_left:\n pre_node.left = target\n else:\n pre_node.right = target\n\n def _generate_node(self):\n queue = [self._root]\n while queue:\n node = queue.pop(0)\n yield node\n queue.extend([x for x in (node.left, node.right) if x != None])\n\n @check_null\n def _metal_find(self, value, node, alert=True):\n \"\"\"\n 内部接口: 实现了基本的查找功能,并且实现了跟踪父节点和判断是否为左右子节点的功能\n 思 路: 比较简单\n :param value:\n :param node:\n :param alert:\n :return: node, _pre_node, is_left\n 找到的node, 该节点的父节点_pre_node, 该节点是_pre_node的左还是右节点bool(is_left)\n \"\"\"\n is_left, _pre_node = None, None\n while node and value != node.data:\n _pre_node = node\n if value < node.data:\n node = node.left\n is_left = True\n elif value > node.data:\n node = node.right\n is_left = False\n if alert and node is None:\n print('There is no node<%s>' % value)\n return node, _pre_node, is_left\n\n def find(self, value):\n \"\"\"暴露给外面的接口,按值查找,返回节点\"\"\"\n result, *_ = self._metal_find(value, self._root)\n return result\n\n @check_null\n def _insert(self, value, node):\n \"\"\"\n recursive insert method\n :param node: 树中存在的某个节点\n :return: node: 插入的节点node 这样其实插入的node(value) 是叶节点\n \"\"\"\n if node is None:\n node = Node(value)\n elif value < node.data:\n node.left = self._insert(value, node.left)\n elif value > node.data:\n node.right = self._insert(value, node.right)\n else:\n print('have the same value')\n return node\n\n @check_null\n def _insert2(self, value):\n \"\"\"\n Iterative insert method\n 先 _metal_find() 迭代找到 value, 找到 value说明已存在,没找到 _redirect() 新建节点\n \"\"\"\n result, pre_node, is_left = self._metal_find(value, self._root, False)\n if result is None:\n self._redirect(pre_node, is_left, Node(value))\n else:\n print('already have the value')\n\n def insert(self, value, isrecursion=False):\n if isrecursion:\n self._insert(value, self._root)\n else:\n self._insert2(value)\n\n @check_null\n def _find_extremum(self, node, by='max'):\n \"\"\"\n 找 max min 节点\n :return node:\n \"\"\"\n if by == 'max':\n while node.right:\n node = node.right\n elif by == 'min':\n while node.left:\n node = node.left\n return node\n\n def findmax(self):\n return self._find_extremum(self._root)\n\n def findmin(self):\n return self._find_extremum(self._root, by='min')\n\n @check_null\n def _delete(self, value, node):\n \"\"\" recursion delete\n step1: 通过value 与 node.data比较来找到要删除的节点\n step2: 要删除的节点又有三种situations\n situation1: 要删除的节点 是叶节点,没有子节点。\n situation2: 要删除的节点 只有一个子节点。\n situation3: 要删除的节点 有两个子节点。\n :return: 删除完value以后的新的node\n \"\"\"\n if not node:\n print(\"can't find\")\n elif value < node.data:\n node.left = self._delete(value, node.left)\n elif value > node.data:\n node.right = self._delete(value, node.right)\n elif node.left and node.right:\n tmp = self._find_extremum(node.left)\n node.data = tmp.data\n node.left = self._delete(tmp.data, node.left)\n elif node.left is None:\n node = node.right\n else:\n node = node.left\n return node\n\n @check_null\n def _delete2(self, value, node):\n \"\"\"非递归删除\n 首先: 找到要删除的节点result\n 再次: 找到并删除result的successor,再将successor的data赋给要删除的节点result\n 讨论复杂的2个节点的情况:\n 1 找到value所在的节点result,该节点有两个子节点\n 2 找到result的左子节点的max记为tmp,tmp只有0或1个节点\n 3 从result中删除tmp,tmp只有0或1个节点,\n 4 ...\n \"\"\"\n result, pre_node, is_left = self._metal_find(value, node)\n if result is None:\n return\n if result.left and result.right:\n tmp = self._find_extremum(result.left)\n self._delete2(tmp.data, result)\n result.data = tmp.data\n else:\n if result.left is None:\n result = result.right\n else:\n result = result.left\n self._redirect(pre_node, is_left, result)\n\n def delete(self, value, isrecursion=False):\n if isrecursion:\n return self._delete(value, self._root)\n else:\n return self._delete2(value, self._root)\n\n\n<mask token>\n",
"step-4": "<mask token>\nlogging.basicConfig(level=logging.ERROR, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n<mask token>\n\n\nclass Node:\n\n def __init__(self, data=None):\n self._data = data\n self._left, self._right = None, None\n\n def __str__(self):\n return 'Node:<data:%s>, <left:%s>, <right:%s>' % (str(self._data),\n str(self._left), str(self._right))\n\n @property\n def data(self):\n return self._data\n\n @data.setter\n def data(self, value):\n self._data = value\n\n @property\n def left(self):\n return self._left\n\n @left.setter\n def left(self, value):\n self._left = value\n\n @property\n def right(self):\n return self._right\n\n @right.setter\n def right(self, value):\n self._right = value\n\n\ndef check_null(func):\n\n @functools.wraps(func)\n def wrapper(self, *args, **kw):\n if self.__bool__():\n return func(self, *args, **kw)\n elif func.__name__ in ['_insert', '_insert2']:\n self._root = Node(args[0])\n else:\n print('The tree is empty')\n return wrapper\n\n\nclass BinarySearchTree:\n \"\"\"\n 如果非空,那么左子树的所有节点都小于根节点,右子树的所有节点都大于根节点,数为二叉搜索树。\n 左右子树都为二叉搜索树。\n \"\"\"\n\n def __init__(self):\n self._root = None\n\n def __str__(self):\n \"\"\" yield 迭代器 \"\"\"\n tree2list = [x.data for x in self._generate_node()]\n return 'the BinarySearchTree is %s' % tree2list\n\n def __bool__(self):\n if self._root is not None:\n return True\n else:\n return False\n\n @staticmethod\n def _redirect(pre_node, is_left, target):\n \"\"\"\n 将target节点赋值成 pre_node的is_left/right子节点\n :param is_left: 将target赋成父节点 pre_node 的 left 还是 right 子节点\n \"\"\"\n if is_left:\n pre_node.left = target\n else:\n pre_node.right = target\n\n def _generate_node(self):\n queue = [self._root]\n while queue:\n node = queue.pop(0)\n yield node\n queue.extend([x for x in (node.left, node.right) if x != None])\n\n @check_null\n def _metal_find(self, value, node, alert=True):\n \"\"\"\n 内部接口: 实现了基本的查找功能,并且实现了跟踪父节点和判断是否为左右子节点的功能\n 思 路: 比较简单\n :param value:\n :param node:\n :param alert:\n :return: node, _pre_node, is_left\n 找到的node, 该节点的父节点_pre_node, 该节点是_pre_node的左还是右节点bool(is_left)\n \"\"\"\n is_left, _pre_node = None, None\n while node and value != node.data:\n _pre_node = node\n if value < node.data:\n node = node.left\n is_left = True\n elif value > node.data:\n node = node.right\n is_left = False\n if alert and node is None:\n print('There is no node<%s>' % value)\n return node, _pre_node, is_left\n\n def find(self, value):\n \"\"\"暴露给外面的接口,按值查找,返回节点\"\"\"\n result, *_ = self._metal_find(value, self._root)\n return result\n\n @check_null\n def _insert(self, value, node):\n \"\"\"\n recursive insert method\n :param node: 树中存在的某个节点\n :return: node: 插入的节点node 这样其实插入的node(value) 是叶节点\n \"\"\"\n if node is None:\n node = Node(value)\n elif value < node.data:\n node.left = self._insert(value, node.left)\n elif value > node.data:\n node.right = self._insert(value, node.right)\n else:\n print('have the same value')\n return node\n\n @check_null\n def _insert2(self, value):\n \"\"\"\n Iterative insert method\n 先 _metal_find() 迭代找到 value, 找到 value说明已存在,没找到 _redirect() 新建节点\n \"\"\"\n result, pre_node, is_left = self._metal_find(value, self._root, False)\n if result is None:\n self._redirect(pre_node, is_left, Node(value))\n else:\n print('already have the value')\n\n def insert(self, value, isrecursion=False):\n if isrecursion:\n self._insert(value, self._root)\n else:\n self._insert2(value)\n\n @check_null\n def _find_extremum(self, node, by='max'):\n \"\"\"\n 找 max min 节点\n :return node:\n \"\"\"\n if by == 'max':\n while node.right:\n node = node.right\n elif by == 'min':\n while node.left:\n node = node.left\n return node\n\n def findmax(self):\n return self._find_extremum(self._root)\n\n def findmin(self):\n return self._find_extremum(self._root, by='min')\n\n @check_null\n def _delete(self, value, node):\n \"\"\" recursion delete\n step1: 通过value 与 node.data比较来找到要删除的节点\n step2: 要删除的节点又有三种situations\n situation1: 要删除的节点 是叶节点,没有子节点。\n situation2: 要删除的节点 只有一个子节点。\n situation3: 要删除的节点 有两个子节点。\n :return: 删除完value以后的新的node\n \"\"\"\n if not node:\n print(\"can't find\")\n elif value < node.data:\n node.left = self._delete(value, node.left)\n elif value > node.data:\n node.right = self._delete(value, node.right)\n elif node.left and node.right:\n tmp = self._find_extremum(node.left)\n node.data = tmp.data\n node.left = self._delete(tmp.data, node.left)\n elif node.left is None:\n node = node.right\n else:\n node = node.left\n return node\n\n @check_null\n def _delete2(self, value, node):\n \"\"\"非递归删除\n 首先: 找到要删除的节点result\n 再次: 找到并删除result的successor,再将successor的data赋给要删除的节点result\n 讨论复杂的2个节点的情况:\n 1 找到value所在的节点result,该节点有两个子节点\n 2 找到result的左子节点的max记为tmp,tmp只有0或1个节点\n 3 从result中删除tmp,tmp只有0或1个节点,\n 4 ...\n \"\"\"\n result, pre_node, is_left = self._metal_find(value, node)\n if result is None:\n return\n if result.left and result.right:\n tmp = self._find_extremum(result.left)\n self._delete2(tmp.data, result)\n result.data = tmp.data\n else:\n if result.left is None:\n result = result.right\n else:\n result = result.left\n self._redirect(pre_node, is_left, result)\n\n def delete(self, value, isrecursion=False):\n if isrecursion:\n return self._delete(value, self._root)\n else:\n return self._delete2(value, self._root)\n\n\ndef test_insert(value):\n\n def _test(value, control=False):\n tree = BinarySearchTree()\n start = time.time()\n for i in range(value):\n tree.insert(i, isrecursion=control)\n end = time.time()\n print('the isrecursion control=%s, the time is: %s' % (control, end -\n start))\n _test(value)\n _test(value, control=True)\n\n\ndef main():\n tree = BinarySearchTree()\n nums = [7, 2, 9, 1, 4, 8, 10]\n for i in nums:\n tree.insert(i)\n print(tree)\n print(tree.find(4))\n tree.insert(3)\n print(tree)\n tree.delete(2)\n print(tree)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# 来源知乎 https://zhuanlan.zhihu.com/p/51987247\n# 博客 https://www.cnblogs.com/yangecnu/p/Introduce-Binary-Search-Tree.html\n\"\"\"\n二叉查找树 (Binary Search Tree, BST)\n特点 : left < root < right\n 若任意节点的左子树不空,则左子树上所有结点的 值均小于它的根结点的值;\n 若任意节点的右子树不空,则右子树上所有结点的值均大于它的根结点的值;\n 任意节点的左、右子树也分别为二叉查找树;\n 没有键值相等的节点(no duplicate nodes)。\n缺点: 不平衡 所以引入平衡二叉树(常用实现方法有红黑树、AVL、替罪羊树、Treap、伸展树等)\n\n本代码实现了 BST\n查找 : 任意值 / 最大值 / 最小值 (查找所需最大次数等于高度)\n插入 (递归 迭代) : 插入结果一定是插成叶节点了\n删除 (递归 迭代): 当删除的节点没有子节点时 当删除的节点只有1个子节点时 当删除的节点有2个子节点时\n\"\"\"\n\nimport logging\nimport functools\nimport time\n\nlogging.basicConfig(\n level=logging.ERROR,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlogger = logging.getLogger(__name__)\n\n\nclass Node():\n def __init__(self, data=None):\n self._data = data\n self._left, self._right = None, None\n\n def __str__(self):\n return 'Node:<data:%s>, <left:%s>, <right:%s>' % (\n str(self._data), str(self._left), str(self._right))\n\n @property\n def data(self):\n return self._data\n\n @data.setter\n def data(self, value):\n self._data = value\n\n @property\n def left(self):\n return self._left\n\n @left.setter\n def left(self, value):\n self._left = value\n\n @property\n def right(self):\n return self._right\n\n @right.setter\n def right(self, value):\n self._right = value\n\n\ndef check_null(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kw):\n if self.__bool__(): # check if the BinarySearchTree() object is None\n return func(self, *args, **kw)\n else:\n if func.__name__ in ['_insert', '_insert2']:\n self._root = Node(args[0])\n else:\n print('The tree is empty')\n\n return wrapper\n\n\n# class Ad():\n# def nam(self):\n# pass\n#\n# print(Ad().nam.__name__)\n# # nam\n\nclass BinarySearchTree():\n \"\"\"\n 如果非空,那么左子树的所有节点都小于根节点,右子树的所有节点都大于根节点,数为二叉搜索树。\n 左右子树都为二叉搜索树。\n \"\"\"\n\n def __init__(self):\n self._root = None\n\n def __str__(self):\n \"\"\" yield 迭代器 \"\"\"\n tree2list = [x.data for x in self._generate_node()]\n return 'the BinarySearchTree is %s' % tree2list\n\n def __bool__(self):\n if self._root is not None:\n return True\n else:\n return False\n\n @staticmethod\n def _redirect(pre_node, is_left, target): # staticmethod no need pass self 与类对象无关\n \"\"\"\n 将target节点赋值成 pre_node的is_left/right子节点\n :param is_left: 将target赋成父节点 pre_node 的 left 还是 right 子节点\n \"\"\"\n if is_left:\n pre_node.left = target\n else:\n pre_node.right = target\n\n def _generate_node(self):\n queue = [self._root]\n while queue:\n node = queue.pop(0)\n yield node\n queue.extend([x for x in (node.left, node.right) if x != None])\n # (node.left, node.right) is tuple\n\n @check_null\n def _metal_find(self, value, node, alert=True):\n \"\"\"\n 内部接口: 实现了基本的查找功能,并且实现了跟踪父节点和判断是否为左右子节点的功能\n 思 路: 比较简单\n :param value:\n :param node:\n :param alert:\n :return: node, _pre_node, is_left\n 找到的node, 该节点的父节点_pre_node, 该节点是_pre_node的左还是右节点bool(is_left)\n \"\"\"\n # if you want the pre_node and is_left get the specific value, let the node=root\n is_left, _pre_node = None, None\n while node and value != node.data:\n # _pre_node 作用跟踪父节点\n _pre_node = node\n if value < node.data:\n node = node.left\n # is_left 作用跟踪是否为左子节点\n is_left = True\n elif value > node.data:\n node = node.right\n is_left = False\n # while 循环完没找到,则node is None\n # while 循环完找到的话,则node is not None 跳过if,return 找到的node\n if alert and node is None: # alert and (node is None)\n print('There is no node<%s>' % value)\n return node, _pre_node, is_left\n\n def find(self, value):\n \"\"\"暴露给外面的接口,按值查找,返回节点\"\"\"\n # *_ 除第一个外的其他返回值\n result, *_ = self._metal_find(value, self._root)\n return result\n\n @check_null\n def _insert(self, value, node): # node 实际往往是root\n \"\"\"\n recursive insert method\n :param node: 树中存在的某个节点\n :return: node: 插入的节点node 这样其实插入的node(value) 是叶节点\n \"\"\"\n # _insert函数最终结果是\n # 1 找到value==node.data的节点即已有这个节点,执行print(),再返回这个节点\n # 2 node is None,然后将此节点新建出来,执行node = Node(value)\n if node is None:\n node = Node(value)\n else:\n if value < node.data:\n # _insert()返回待插入的节点 当前节点的左子节点 指向待插入的节点\n node.left = self._insert(value, node.left)\n elif value > node.data:\n # _insert()返回待插入的节点 当前节点的右子节点 指向待插入的节点\n node.right = self._insert(value, node.right)\n else:\n print('have the same value')\n\n return node # 注意将node返回\n\n @check_null\n def _insert2(self, value):\n \"\"\"\n Iterative insert method\n 先 _metal_find() 迭代找到 value, 找到 value说明已存在,没找到 _redirect() 新建节点\n \"\"\"\n result, pre_node, is_left = self._metal_find(value, self._root, False) # 查找\n if result is None: # 没找到通过self._redirect() 赋值\n self._redirect(pre_node, is_left, Node(value))\n else: # 找到说明已经存在\n print('already have the value')\n\n # 默认走循环的实现, 递归的程序栈很容易爆掉,并且test_insert()测试了下循环比递归快很多\n def insert(self, value, isrecursion=False):\n if isrecursion:\n self._insert(value, self._root)\n else:\n self._insert2(value)\n\n @check_null\n def _find_extremum(self, node, by='max'):\n \"\"\"\n 找 max min 节点\n :return node:\n \"\"\"\n if by == 'max':\n while node.right:\n node = node.right\n elif by == 'min':\n while node.left:\n node = node.left\n return node\n\n def findmax(self):\n return self._find_extremum(self._root)\n\n def findmin(self):\n return self._find_extremum(self._root, by='min')\n\n @check_null\n def _delete(self, value, node):\n \"\"\" recursion delete\n step1: 通过value 与 node.data比较来找到要删除的节点\n step2: 要删除的节点又有三种situations\n situation1: 要删除的节点 是叶节点,没有子节点。\n situation2: 要删除的节点 只有一个子节点。\n situation3: 要删除的节点 有两个子节点。\n :return: 删除完value以后的新的node\n \"\"\"\n if not node:\n print('can\\'t find')\n else: # step1\n\n # If the key to be deleted is smaller than the root's\n # key then it lies in left subtree\n if value < node.data:\n node.left = self._delete(value, node.left)\n\n # If the kye to be delete is greater than the root's key\n # then it lies in right subtree\n elif value > node.data:\n node.right = self._delete(value, node.right)\n\n # If key is same as root's key, then this is the node\n # to be deleted\n else: # step2\n\n # Node with two children: Get the inorder successor 中序继承者\n\n # 最后node.left = self._delete(tmp.data, node.left)其实转化成了\n # 后边 Node with only one child or no child 的情形\n ### 可以找左子树的最大值或者右子树的最小值作为successor\n ### 而左子树的最大值或者右子树的最小值必然只有一个或零个节点\n ### 所以转化成了前边 Node with only one child or no child 的情形\n\n if node.left and node.right:\n # find the largest in the left subtree as successor\n tmp = self._find_extremum(node.left) # default by max\n # Copy the inorder successor's content to this node\n node.data = tmp.data\n # Delete the inorder successor\n node.left = self._delete(tmp.data, node.left)\n\n # Node with only one child or no child\n else:\n if node.left is None:\n node = node.right\n else:\n node = node.left\n return node # 最后层层返回\n\n @check_null\n def _delete2(self, value, node):\n \"\"\"非递归删除\n 首先: 找到要删除的节点result\n 再次: 找到并删除result的successor,再将successor的data赋给要删除的节点result\n 讨论复杂的2个节点的情况:\n 1 找到value所在的节点result,该节点有两个子节点\n 2 找到result的左子节点的max记为tmp,tmp只有0或1个节点\n 3 从result中删除tmp,tmp只有0或1个节点,\n 4 ...\n \"\"\"\n # 首先: 找到要删除的节点result\n result, pre_node, is_left = self._metal_find(value, node)\n if result is None:\n return\n # 有2个节点的情况\n if result.left and result.right:\n tmp = self._find_extremum(result.left) # 再次: 找到result的successor\n self._delete2(tmp.data, result) # 再次: 删除result的successor 这步会走后边else里 \"# 有1个或者没有\" 的情形\n result.data = tmp.data # 再将successor的data赋给要删除的节点result\n # 有1个或者没有\n else:\n if result.left is None:\n # print('---')\n # print(id(result),id(result.right)) # 46446408 1352705168\n result = result.right\n # print(id(result)) # 1352705168\n else:\n result = result.left\n # 将 result 赋成 pre_node 的 is_left节点 维护\n self._redirect(pre_node, is_left, result) # 对节点pre_node的子节点进行赋值\n\n def delete(self, value, isrecursion=False):\n if isrecursion:\n return self._delete(value, self._root)\n else:\n return self._delete2(value, self._root)\n\n\ndef test_insert(value):\n def _test(value, control=False):\n tree = BinarySearchTree()\n start = time.time()\n for i in range(value):\n tree.insert(i, isrecursion=control)\n end = time.time()\n print('the isrecursion control=%s, the time is: %s' % (control, end - start))\n\n _test(value)\n _test(value, control=True)\n\n\ndef main():\n # test_insert(100)\n tree = BinarySearchTree()\n nums = [7, 2, 9, 1, 4, 8, 10]\n for i in nums:\n tree.insert(i)\n\n print(tree)\n print(tree.find(4))\n tree.insert(3)\n print(tree)\n tree.delete(2)\n print(tree)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
15,
19,
26,
31,
34
]
}
|
[
15,
19,
26,
31,
34
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def home_admin(request):
"""
:view home_admin: Menu principale des Administrateurs
:template home_admin.html:
"""
if not request.user.is_authenticated():
return redirect('login')
title = 'Accueil'
today = datetime.now()
etat = Etat.objects
fiche_frais = FicheFrais.objects
frais_forfait = Forfait.objects
lignes_frais_forfait = LigneFraisForfait.objects
lignes_frais_hors_forfait = LigneFraisHorsForfait.objects
context = {'title': title, 'user': request.user, 'fiche_frais':
fiche_frais, 'lignes_frais_forfait': lignes_frais_forfait,
'lignes_frais_hors_forfait': lignes_frais_hors_forfait, 'etat':
etat, 'today': today, 'frais_forfait': frais_forfait}
return render(request, 'fichefrais/administrateur/home_admin.html', context
)
<|reserved_special_token_1|>
from django.shortcuts import render, redirect
from datetime import datetime
from fichefrais.models import FicheFrais, Etat, LigneFraisForfait, LigneFraisHorsForfait, Forfait
def home_admin(request):
"""
:view home_admin: Menu principale des Administrateurs
:template home_admin.html:
"""
if not request.user.is_authenticated():
return redirect('login')
title = 'Accueil'
today = datetime.now()
etat = Etat.objects
fiche_frais = FicheFrais.objects
frais_forfait = Forfait.objects
lignes_frais_forfait = LigneFraisForfait.objects
lignes_frais_hors_forfait = LigneFraisHorsForfait.objects
context = {'title': title, 'user': request.user, 'fiche_frais':
fiche_frais, 'lignes_frais_forfait': lignes_frais_forfait,
'lignes_frais_hors_forfait': lignes_frais_hors_forfait, 'etat':
etat, 'today': today, 'frais_forfait': frais_forfait}
return render(request, 'fichefrais/administrateur/home_admin.html', context
)
<|reserved_special_token_1|>
from django.shortcuts import render, redirect
from datetime import datetime
from fichefrais.models import FicheFrais, Etat, LigneFraisForfait, LigneFraisHorsForfait, Forfait
def home_admin(request):
"""
:view home_admin: Menu principale des Administrateurs
:template home_admin.html:
"""
if not request.user.is_authenticated():
return redirect("login")
title = "Accueil"
today = datetime.now()
etat = Etat.objects
fiche_frais = FicheFrais.objects
frais_forfait = Forfait.objects
lignes_frais_forfait = LigneFraisForfait.objects
lignes_frais_hors_forfait = LigneFraisHorsForfait.objects
context = {
"title": title,
"user": request.user,
"fiche_frais": fiche_frais,
"lignes_frais_forfait": lignes_frais_forfait,
"lignes_frais_hors_forfait": lignes_frais_hors_forfait,
"etat": etat,
"today": today,
"frais_forfait": frais_forfait,
}
return render(request, "fichefrais/administrateur/home_admin.html", context)
|
flexible
|
{
"blob_id": "b453c8e9cc50066d1b5811493a89de384a000f37",
"index": 4929,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef home_admin(request):\n \"\"\"\n :view home_admin: Menu principale des Administrateurs\n :template home_admin.html:\n \"\"\"\n if not request.user.is_authenticated():\n return redirect('login')\n title = 'Accueil'\n today = datetime.now()\n etat = Etat.objects\n fiche_frais = FicheFrais.objects\n frais_forfait = Forfait.objects\n lignes_frais_forfait = LigneFraisForfait.objects\n lignes_frais_hors_forfait = LigneFraisHorsForfait.objects\n context = {'title': title, 'user': request.user, 'fiche_frais':\n fiche_frais, 'lignes_frais_forfait': lignes_frais_forfait,\n 'lignes_frais_hors_forfait': lignes_frais_hors_forfait, 'etat':\n etat, 'today': today, 'frais_forfait': frais_forfait}\n return render(request, 'fichefrais/administrateur/home_admin.html', context\n )\n",
"step-3": "from django.shortcuts import render, redirect\nfrom datetime import datetime\nfrom fichefrais.models import FicheFrais, Etat, LigneFraisForfait, LigneFraisHorsForfait, Forfait\n\n\ndef home_admin(request):\n \"\"\"\n :view home_admin: Menu principale des Administrateurs\n :template home_admin.html:\n \"\"\"\n if not request.user.is_authenticated():\n return redirect('login')\n title = 'Accueil'\n today = datetime.now()\n etat = Etat.objects\n fiche_frais = FicheFrais.objects\n frais_forfait = Forfait.objects\n lignes_frais_forfait = LigneFraisForfait.objects\n lignes_frais_hors_forfait = LigneFraisHorsForfait.objects\n context = {'title': title, 'user': request.user, 'fiche_frais':\n fiche_frais, 'lignes_frais_forfait': lignes_frais_forfait,\n 'lignes_frais_hors_forfait': lignes_frais_hors_forfait, 'etat':\n etat, 'today': today, 'frais_forfait': frais_forfait}\n return render(request, 'fichefrais/administrateur/home_admin.html', context\n )\n",
"step-4": "from django.shortcuts import render, redirect\nfrom datetime import datetime\nfrom fichefrais.models import FicheFrais, Etat, LigneFraisForfait, LigneFraisHorsForfait, Forfait\n\n\ndef home_admin(request):\n \"\"\"\n :view home_admin: Menu principale des Administrateurs\n :template home_admin.html:\n \"\"\"\n if not request.user.is_authenticated():\n return redirect(\"login\")\n\n title = \"Accueil\"\n today = datetime.now()\n\n etat = Etat.objects\n fiche_frais = FicheFrais.objects\n frais_forfait = Forfait.objects\n lignes_frais_forfait = LigneFraisForfait.objects\n lignes_frais_hors_forfait = LigneFraisHorsForfait.objects\n\n context = {\n \"title\": title,\n \"user\": request.user,\n \"fiche_frais\": fiche_frais,\n \"lignes_frais_forfait\": lignes_frais_forfait,\n \"lignes_frais_hors_forfait\": lignes_frais_hors_forfait,\n \"etat\": etat,\n \"today\": today,\n \"frais_forfait\": frais_forfait,\n }\n\n return render(request, \"fichefrais/administrateur/home_admin.html\", context)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#
# cuneiform_python.py
#
# Example showing how to create a custom Unicode set for parsing
#
# Copyright Paul McGuire, 2021
#
from typing import List, Tuple
import pyparsing as pp
class Cuneiform(pp.unicode_set):
"""Unicode set for Cuneiform Character Range"""
_ranges: List[Tuple[int, ...]] = [
(0x10380, 0x103d5),
(0x12000, 0x123FF),
(0x12400, 0x1247F),
]
# list out all valid identifier characters
# print(Cuneiform.identchars)
"""
Simple Cuneiform Python language transformer
Define Cuneiform "words"
print: 𒄑𒉿𒅔𒋫
hello: 𒀄𒂖𒆷𒁎
world: 𒍟𒁎𒉿𒆷𒀳
def: 𒁴𒈫
"""
# uncomment to show parse-time debugging
# pp.enable_diag(pp.Diagnostics.enable_debug_on_named_expressions)
# define a MINIMAL Python parser
LPAR, RPAR, COLON, EQ = map(pp.Suppress, "():=")
def_ = pp.Keyword("𒁴𒈫", ident_chars=Cuneiform.identbodychars).set_name("def")
any_keyword = def_
ident = (~any_keyword) + pp.Word(
Cuneiform.identchars, Cuneiform.identbodychars, asKeyword=True
)
str_expr = pp.infix_notation(
pp.QuotedString('"') | pp.common.integer,
[
("*", 2, pp.OpAssoc.LEFT),
("+", 2, pp.OpAssoc.LEFT),
],
)
rvalue = pp.Forward()
fn_call = (ident + pp.Group(LPAR + pp.Optional(rvalue) + RPAR)).set_name("fn_call")
rvalue <<= fn_call | ident | str_expr | pp.common.number
assignment_stmt = ident + EQ + rvalue
stmt = pp.Group(fn_call | assignment_stmt).set_name("stmt")
fn_def = pp.Group(
def_ + ident + pp.Group(LPAR + pp.Optional(rvalue) + RPAR) + COLON
).set_name("fn_def")
fn_body = pp.IndentedBlock(stmt).set_name("fn_body")
fn_expr = pp.Group(fn_def + pp.Group(fn_body))
script = fn_expr[...] + stmt[...]
# parse some Python written in Cuneiform
cuneiform_hello_world = r"""
𒁴𒈫 𒀄𒂖𒆷𒁎():
𒀁 = "𒀄𒂖𒆷𒁎, 𒍟𒁎𒉿𒆷𒀳!\n" * 3
𒄑𒉿𒅔𒋫(𒀁)
𒀄𒂖𒆷𒁎()"""
script.parseString(cuneiform_hello_world).pprint(width=40)
# use transform_string to convert keywords and builtins to runnable Python
names_map = {
"𒄑𒉿𒅔𒋫": "print",
}
ident.add_parse_action(lambda t: names_map.get(t[0], t[0]))
def_.add_parse_action(lambda: "def")
print("\nconvert Cuneiform Python to executable Python")
transformed = (
# always put ident last
(def_ | ident)
.ignore(pp.quoted_string)
.transform_string(cuneiform_hello_world)
.strip()
)
print(
"=================\n"
+ cuneiform_hello_world.strip()
+ "\n=================\n"
+ transformed
+ "\n=================\n"
)
print("# run transformed Python")
exec(transformed)
|
normal
|
{
"blob_id": "bc1aefd0b0a87b80a10cecf00407b4608a6902b5",
"index": 3897,
"step-1": "<mask token>\n\n\nclass Cuneiform(pp.unicode_set):\n <mask token>\n _ranges: List[Tuple[int, ...]] = [(66432, 66517), (73728, 74751), (\n 74752, 74879)]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Cuneiform(pp.unicode_set):\n \"\"\"Unicode set for Cuneiform Character Range\"\"\"\n _ranges: List[Tuple[int, ...]] = [(66432, 66517), (73728, 74751), (\n 74752, 74879)]\n\n\n<mask token>\nrvalue <<= fn_call | ident | str_expr | pp.common.number\n<mask token>\nscript.parseString(cuneiform_hello_world).pprint(width=40)\n<mask token>\nident.add_parse_action(lambda t: names_map.get(t[0], t[0]))\ndef_.add_parse_action(lambda : 'def')\nprint(\"\"\"\nconvert Cuneiform Python to executable Python\"\"\")\n<mask token>\nprint('=================\\n' + cuneiform_hello_world.strip() +\n \"\"\"\n=================\n\"\"\" + transformed + \"\"\"\n=================\n\"\"\")\nprint('# run transformed Python')\nexec(transformed)\n",
"step-3": "<mask token>\n\n\nclass Cuneiform(pp.unicode_set):\n \"\"\"Unicode set for Cuneiform Character Range\"\"\"\n _ranges: List[Tuple[int, ...]] = [(66432, 66517), (73728, 74751), (\n 74752, 74879)]\n\n\n<mask token>\nLPAR, RPAR, COLON, EQ = map(pp.Suppress, '():=')\ndef_ = pp.Keyword('𒁴𒈫', ident_chars=Cuneiform.identbodychars).set_name('def')\nany_keyword = def_\nident = ~any_keyword + pp.Word(Cuneiform.identchars, Cuneiform.\n identbodychars, asKeyword=True)\nstr_expr = pp.infix_notation(pp.QuotedString('\"') | pp.common.integer, [(\n '*', 2, pp.OpAssoc.LEFT), ('+', 2, pp.OpAssoc.LEFT)])\nrvalue = pp.Forward()\nfn_call = (ident + pp.Group(LPAR + pp.Optional(rvalue) + RPAR)).set_name(\n 'fn_call')\nrvalue <<= fn_call | ident | str_expr | pp.common.number\nassignment_stmt = ident + EQ + rvalue\nstmt = pp.Group(fn_call | assignment_stmt).set_name('stmt')\nfn_def = pp.Group(def_ + ident + pp.Group(LPAR + pp.Optional(rvalue) + RPAR\n ) + COLON).set_name('fn_def')\nfn_body = pp.IndentedBlock(stmt).set_name('fn_body')\nfn_expr = pp.Group(fn_def + pp.Group(fn_body))\nscript = fn_expr[...] + stmt[...]\ncuneiform_hello_world = \"\"\"\n𒁴𒈫 𒀄𒂖𒆷𒁎():\n 𒀁 = \"𒀄𒂖𒆷𒁎, 𒍟𒁎𒉿𒆷𒀳!\\\\n\" * 3\n 𒄑𒉿𒅔𒋫(𒀁)\n\n𒀄𒂖𒆷𒁎()\"\"\"\nscript.parseString(cuneiform_hello_world).pprint(width=40)\nnames_map = {'𒄑𒉿𒅔𒋫': 'print'}\nident.add_parse_action(lambda t: names_map.get(t[0], t[0]))\ndef_.add_parse_action(lambda : 'def')\nprint(\"\"\"\nconvert Cuneiform Python to executable Python\"\"\")\ntransformed = (def_ | ident).ignore(pp.quoted_string).transform_string(\n cuneiform_hello_world).strip()\nprint('=================\\n' + cuneiform_hello_world.strip() +\n \"\"\"\n=================\n\"\"\" + transformed + \"\"\"\n=================\n\"\"\")\nprint('# run transformed Python')\nexec(transformed)\n",
"step-4": "from typing import List, Tuple\nimport pyparsing as pp\n\n\nclass Cuneiform(pp.unicode_set):\n \"\"\"Unicode set for Cuneiform Character Range\"\"\"\n _ranges: List[Tuple[int, ...]] = [(66432, 66517), (73728, 74751), (\n 74752, 74879)]\n\n\n<mask token>\nLPAR, RPAR, COLON, EQ = map(pp.Suppress, '():=')\ndef_ = pp.Keyword('𒁴𒈫', ident_chars=Cuneiform.identbodychars).set_name('def')\nany_keyword = def_\nident = ~any_keyword + pp.Word(Cuneiform.identchars, Cuneiform.\n identbodychars, asKeyword=True)\nstr_expr = pp.infix_notation(pp.QuotedString('\"') | pp.common.integer, [(\n '*', 2, pp.OpAssoc.LEFT), ('+', 2, pp.OpAssoc.LEFT)])\nrvalue = pp.Forward()\nfn_call = (ident + pp.Group(LPAR + pp.Optional(rvalue) + RPAR)).set_name(\n 'fn_call')\nrvalue <<= fn_call | ident | str_expr | pp.common.number\nassignment_stmt = ident + EQ + rvalue\nstmt = pp.Group(fn_call | assignment_stmt).set_name('stmt')\nfn_def = pp.Group(def_ + ident + pp.Group(LPAR + pp.Optional(rvalue) + RPAR\n ) + COLON).set_name('fn_def')\nfn_body = pp.IndentedBlock(stmt).set_name('fn_body')\nfn_expr = pp.Group(fn_def + pp.Group(fn_body))\nscript = fn_expr[...] + stmt[...]\ncuneiform_hello_world = \"\"\"\n𒁴𒈫 𒀄𒂖𒆷𒁎():\n 𒀁 = \"𒀄𒂖𒆷𒁎, 𒍟𒁎𒉿𒆷𒀳!\\\\n\" * 3\n 𒄑𒉿𒅔𒋫(𒀁)\n\n𒀄𒂖𒆷𒁎()\"\"\"\nscript.parseString(cuneiform_hello_world).pprint(width=40)\nnames_map = {'𒄑𒉿𒅔𒋫': 'print'}\nident.add_parse_action(lambda t: names_map.get(t[0], t[0]))\ndef_.add_parse_action(lambda : 'def')\nprint(\"\"\"\nconvert Cuneiform Python to executable Python\"\"\")\ntransformed = (def_ | ident).ignore(pp.quoted_string).transform_string(\n cuneiform_hello_world).strip()\nprint('=================\\n' + cuneiform_hello_world.strip() +\n \"\"\"\n=================\n\"\"\" + transformed + \"\"\"\n=================\n\"\"\")\nprint('# run transformed Python')\nexec(transformed)\n",
"step-5": "#\n# cuneiform_python.py\n#\n# Example showing how to create a custom Unicode set for parsing\n#\n# Copyright Paul McGuire, 2021\n#\nfrom typing import List, Tuple\nimport pyparsing as pp\n\n\nclass Cuneiform(pp.unicode_set):\n \"\"\"Unicode set for Cuneiform Character Range\"\"\"\n\n _ranges: List[Tuple[int, ...]] = [\n (0x10380, 0x103d5),\n (0x12000, 0x123FF),\n (0x12400, 0x1247F),\n ]\n\n\n# list out all valid identifier characters\n# print(Cuneiform.identchars)\n\n\n\"\"\"\nSimple Cuneiform Python language transformer\n\nDefine Cuneiform \"words\"\n print: 𒄑𒉿𒅔𒋫\n hello: 𒀄𒂖𒆷𒁎\n world: 𒍟𒁎𒉿𒆷𒀳\n def: 𒁴𒈫\n\"\"\"\n\n# uncomment to show parse-time debugging\n# pp.enable_diag(pp.Diagnostics.enable_debug_on_named_expressions)\n\n# define a MINIMAL Python parser\nLPAR, RPAR, COLON, EQ = map(pp.Suppress, \"():=\")\ndef_ = pp.Keyword(\"𒁴𒈫\", ident_chars=Cuneiform.identbodychars).set_name(\"def\")\nany_keyword = def_\nident = (~any_keyword) + pp.Word(\n Cuneiform.identchars, Cuneiform.identbodychars, asKeyword=True\n)\nstr_expr = pp.infix_notation(\n pp.QuotedString('\"') | pp.common.integer,\n [\n (\"*\", 2, pp.OpAssoc.LEFT),\n (\"+\", 2, pp.OpAssoc.LEFT),\n ],\n)\n\nrvalue = pp.Forward()\nfn_call = (ident + pp.Group(LPAR + pp.Optional(rvalue) + RPAR)).set_name(\"fn_call\")\n\nrvalue <<= fn_call | ident | str_expr | pp.common.number\nassignment_stmt = ident + EQ + rvalue\n\nstmt = pp.Group(fn_call | assignment_stmt).set_name(\"stmt\")\n\nfn_def = pp.Group(\n def_ + ident + pp.Group(LPAR + pp.Optional(rvalue) + RPAR) + COLON\n).set_name(\"fn_def\")\nfn_body = pp.IndentedBlock(stmt).set_name(\"fn_body\")\nfn_expr = pp.Group(fn_def + pp.Group(fn_body))\n\nscript = fn_expr[...] + stmt[...]\n\n\n# parse some Python written in Cuneiform\ncuneiform_hello_world = r\"\"\"\n𒁴𒈫 𒀄𒂖𒆷𒁎():\n 𒀁 = \"𒀄𒂖𒆷𒁎, 𒍟𒁎𒉿𒆷𒀳!\\n\" * 3\n 𒄑𒉿𒅔𒋫(𒀁)\n\n𒀄𒂖𒆷𒁎()\"\"\"\nscript.parseString(cuneiform_hello_world).pprint(width=40)\n\n\n# use transform_string to convert keywords and builtins to runnable Python\nnames_map = {\n \"𒄑𒉿𒅔𒋫\": \"print\",\n}\nident.add_parse_action(lambda t: names_map.get(t[0], t[0]))\ndef_.add_parse_action(lambda: \"def\")\n\nprint(\"\\nconvert Cuneiform Python to executable Python\")\ntransformed = (\n # always put ident last\n (def_ | ident)\n .ignore(pp.quoted_string)\n .transform_string(cuneiform_hello_world)\n .strip()\n)\nprint(\n \"=================\\n\"\n + cuneiform_hello_world.strip()\n + \"\\n=================\\n\"\n + transformed\n + \"\\n=================\\n\"\n)\nprint(\"# run transformed Python\")\nexec(transformed)\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import numpy as np
import cv2
from camera import load_K, load_camera_dist, load_camera_ret
def undistort_img(img):
'''
Return an undistorted image given previous calibrated parameters
References from OpenCV docs
'''
ret = load_camera_ret()
K = load_K()
dist = load_camera_dist()
h,w = img.shape[:2]
new_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(K,dist,(w,h),1,(w,h))
img_undistorted = cv2.undistort(img, K, dist, None, new_camera_matrix)
return img_undistorted
|
normal
|
{
"blob_id": "844c630d3fe2dda833064556228b524608cfece9",
"index": 4671,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef undistort_img(img):\n \"\"\"\n Return an undistorted image given previous calibrated parameters \n References from OpenCV docs\n \"\"\"\n ret = load_camera_ret()\n K = load_K()\n dist = load_camera_dist()\n h, w = img.shape[:2]\n new_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(K, dist, (w, h),\n 1, (w, h))\n img_undistorted = cv2.undistort(img, K, dist, None, new_camera_matrix)\n return img_undistorted\n",
"step-3": "import numpy as np\nimport cv2\nfrom camera import load_K, load_camera_dist, load_camera_ret\n\n\ndef undistort_img(img):\n \"\"\"\n Return an undistorted image given previous calibrated parameters \n References from OpenCV docs\n \"\"\"\n ret = load_camera_ret()\n K = load_K()\n dist = load_camera_dist()\n h, w = img.shape[:2]\n new_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(K, dist, (w, h),\n 1, (w, h))\n img_undistorted = cv2.undistort(img, K, dist, None, new_camera_matrix)\n return img_undistorted\n",
"step-4": "import numpy as np\nimport cv2\n\nfrom camera import load_K, load_camera_dist, load_camera_ret\n\ndef undistort_img(img):\n '''\n Return an undistorted image given previous calibrated parameters \n References from OpenCV docs\n '''\n ret = load_camera_ret()\n K = load_K()\n dist = load_camera_dist()\n h,w = img.shape[:2]\n\n new_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(K,dist,(w,h),1,(w,h))\n img_undistorted = cv2.undistort(img, K, dist, None, new_camera_matrix)\n\n return img_undistorted\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def main():
num = int(input('dia: '))
dia(num)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def main():
num = int(input('dia: '))
dia(num)
def dia(a):
if a == 1:
print('Domingo !')
elif a == 2:
print('Segunda !')
else:
print('valor invalido !')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def main():
num = int(input('dia: '))
dia(num)
def dia(a):
if a == 1:
print('Domingo !')
elif a == 2:
print('Segunda !')
else:
print('valor invalido !')
main()
|
flexible
|
{
"blob_id": "07332e2da5458fda2112de2507037a759d3c62db",
"index": 3382,
"step-1": "<mask token>\n",
"step-2": "def main():\n num = int(input('dia: '))\n dia(num)\n\n\n<mask token>\n",
"step-3": "def main():\n num = int(input('dia: '))\n dia(num)\n\n\ndef dia(a):\n if a == 1:\n print('Domingo !')\n elif a == 2:\n print('Segunda !')\n else:\n print('valor invalido !')\n\n\n<mask token>\n",
"step-4": "def main():\n num = int(input('dia: '))\n dia(num)\n\n\ndef dia(a):\n if a == 1:\n print('Domingo !')\n elif a == 2:\n print('Segunda !')\n else:\n print('valor invalido !')\n\n\nmain()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def asymmetric_extend(q1, q2, extend_fn, backward=False):
if backward and ASYMETRIC:
return reversed(list(extend_fn(q2, q1)))
return extend_fn(q1, q2)
<|reserved_special_token_0|>
def calculate_radius(d=2):
interval = 1 - 0
vol_free = interval ** d
radius = 1.0 / 2
vol_ball = np.pi * radius ** d
gamma = 2 * ((1 + 1.0 / d) * (vol_free / vol_ball)) ** (1.0 / d)
return gamma
def default_weights(conf, weights=None, scale=1.0):
if weights is not None:
return weights
d = len(conf)
weights = scale * np.ones(d)
return weights
<|reserved_special_token_0|>
def get_distance_fn(weights, p_norm=2):
embed_fn = get_embed_fn(weights)
return lambda q1, q2: np.linalg.norm(embed_fn(q2) - embed_fn(q1), ord=
p_norm)
def distance_fn_from_extend_fn(extend_fn):
def distance_fn(q1, q2):
path = list(extend_fn(q1, q2))
return len(path)
return distance_fn
<|reserved_special_token_0|>
def get_cost_fn(distance_fn=get_distance, constant=0.0, coefficient=1.0):
def fn(q1, q2):
return constant + coefficient * distance_fn(q1, q2)
return fn
def get_duration_fn(difference_fn=get_delta, t_constant=0.0, t_min=0.0, **
kwargs):
v_max, a_max = get_default_limits(d=None, **kwargs)
def fn(q1, q2):
difference = difference_fn(q1, q2)
t_transit = 0.0
if not np.allclose(np.zeros(len(difference)), difference, atol=
1e-06, rtol=0):
t_transit = solve_linear(difference, v_max, a_max,
only_duration=True)
assert t_transit is not None
t = t_constant + t_transit
return max(t_min, t)
return fn
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def asymmetric_extend(q1, q2, extend_fn, backward=False):
if backward and ASYMETRIC:
return reversed(list(extend_fn(q2, q1)))
return extend_fn(q1, q2)
<|reserved_special_token_0|>
def calculate_radius(d=2):
interval = 1 - 0
vol_free = interval ** d
radius = 1.0 / 2
vol_ball = np.pi * radius ** d
gamma = 2 * ((1 + 1.0 / d) * (vol_free / vol_ball)) ** (1.0 / d)
return gamma
def default_weights(conf, weights=None, scale=1.0):
if weights is not None:
return weights
d = len(conf)
weights = scale * np.ones(d)
return weights
<|reserved_special_token_0|>
def get_distance_fn(weights, p_norm=2):
embed_fn = get_embed_fn(weights)
return lambda q1, q2: np.linalg.norm(embed_fn(q2) - embed_fn(q1), ord=
p_norm)
def distance_fn_from_extend_fn(extend_fn):
def distance_fn(q1, q2):
path = list(extend_fn(q1, q2))
return len(path)
return distance_fn
def get_difference_fn(circular={}):
def fn(q2, q1):
return tuple(circular_difference(v2, v1, interval=circular.get(i,
UNBOUNDED_LIMITS)) for i, (v2, v1) in enumerate(zip(q2, q1)))
return fn
def get_cost_fn(distance_fn=get_distance, constant=0.0, coefficient=1.0):
def fn(q1, q2):
return constant + coefficient * distance_fn(q1, q2)
return fn
def get_duration_fn(difference_fn=get_delta, t_constant=0.0, t_min=0.0, **
kwargs):
v_max, a_max = get_default_limits(d=None, **kwargs)
def fn(q1, q2):
difference = difference_fn(q1, q2)
t_transit = 0.0
if not np.allclose(np.zeros(len(difference)), difference, atol=
1e-06, rtol=0):
t_transit = solve_linear(difference, v_max, a_max,
only_duration=True)
assert t_transit is not None
t = t_constant + t_transit
return max(t_min, t)
return fn
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def asymmetric_extend(q1, q2, extend_fn, backward=False):
if backward and ASYMETRIC:
return reversed(list(extend_fn(q2, q1)))
return extend_fn(q1, q2)
def extend_towards(tree, target, distance_fn, extend_fn, collision_fn, swap
=False, tree_frequency=1, **kwargs):
assert tree_frequency >= 1
last = argmin(lambda n: distance_fn(n.config, target), tree)
extend = list(asymmetric_extend(last.config, target, extend_fn,
backward=swap))
safe = list(takewhile(negate(collision_fn), extend))
for i, q in enumerate(safe):
if i % tree_frequency == 0 or i == len(safe) - 1:
last = TreeNode(q, parent=last)
tree.append(last)
success = len(extend) == len(safe)
return last, success
def calculate_radius(d=2):
interval = 1 - 0
vol_free = interval ** d
radius = 1.0 / 2
vol_ball = np.pi * radius ** d
gamma = 2 * ((1 + 1.0 / d) * (vol_free / vol_ball)) ** (1.0 / d)
return gamma
def default_weights(conf, weights=None, scale=1.0):
if weights is not None:
return weights
d = len(conf)
weights = scale * np.ones(d)
return weights
def get_embed_fn(weights):
weights = np.array(weights)
return lambda q: weights * q
def get_distance_fn(weights, p_norm=2):
embed_fn = get_embed_fn(weights)
return lambda q1, q2: np.linalg.norm(embed_fn(q2) - embed_fn(q1), ord=
p_norm)
def distance_fn_from_extend_fn(extend_fn):
def distance_fn(q1, q2):
path = list(extend_fn(q1, q2))
return len(path)
return distance_fn
def get_difference_fn(circular={}):
def fn(q2, q1):
return tuple(circular_difference(v2, v1, interval=circular.get(i,
UNBOUNDED_LIMITS)) for i, (v2, v1) in enumerate(zip(q2, q1)))
return fn
def get_cost_fn(distance_fn=get_distance, constant=0.0, coefficient=1.0):
def fn(q1, q2):
return constant + coefficient * distance_fn(q1, q2)
return fn
def get_duration_fn(difference_fn=get_delta, t_constant=0.0, t_min=0.0, **
kwargs):
v_max, a_max = get_default_limits(d=None, **kwargs)
def fn(q1, q2):
difference = difference_fn(q1, q2)
t_transit = 0.0
if not np.allclose(np.zeros(len(difference)), difference, atol=
1e-06, rtol=0):
t_transit = solve_linear(difference, v_max, a_max,
only_duration=True)
assert t_transit is not None
t = t_constant + t_transit
return max(t_min, t)
return fn
<|reserved_special_token_1|>
from itertools import takewhile
import numpy as np
from .rrt import TreeNode
from .trajectory.linear import get_default_limits, solve_linear
from .trajectory.retime import spline_duration
from .utils import argmin, negate, circular_difference, UNBOUNDED_LIMITS, get_distance, get_delta
ASYMETRIC = True
def asymmetric_extend(q1, q2, extend_fn, backward=False):
if backward and ASYMETRIC:
return reversed(list(extend_fn(q2, q1)))
return extend_fn(q1, q2)
def extend_towards(tree, target, distance_fn, extend_fn, collision_fn, swap
=False, tree_frequency=1, **kwargs):
assert tree_frequency >= 1
last = argmin(lambda n: distance_fn(n.config, target), tree)
extend = list(asymmetric_extend(last.config, target, extend_fn,
backward=swap))
safe = list(takewhile(negate(collision_fn), extend))
for i, q in enumerate(safe):
if i % tree_frequency == 0 or i == len(safe) - 1:
last = TreeNode(q, parent=last)
tree.append(last)
success = len(extend) == len(safe)
return last, success
def calculate_radius(d=2):
interval = 1 - 0
vol_free = interval ** d
radius = 1.0 / 2
vol_ball = np.pi * radius ** d
gamma = 2 * ((1 + 1.0 / d) * (vol_free / vol_ball)) ** (1.0 / d)
return gamma
def default_weights(conf, weights=None, scale=1.0):
if weights is not None:
return weights
d = len(conf)
weights = scale * np.ones(d)
return weights
def get_embed_fn(weights):
weights = np.array(weights)
return lambda q: weights * q
def get_distance_fn(weights, p_norm=2):
embed_fn = get_embed_fn(weights)
return lambda q1, q2: np.linalg.norm(embed_fn(q2) - embed_fn(q1), ord=
p_norm)
def distance_fn_from_extend_fn(extend_fn):
def distance_fn(q1, q2):
path = list(extend_fn(q1, q2))
return len(path)
return distance_fn
def get_difference_fn(circular={}):
def fn(q2, q1):
return tuple(circular_difference(v2, v1, interval=circular.get(i,
UNBOUNDED_LIMITS)) for i, (v2, v1) in enumerate(zip(q2, q1)))
return fn
def get_cost_fn(distance_fn=get_distance, constant=0.0, coefficient=1.0):
def fn(q1, q2):
return constant + coefficient * distance_fn(q1, q2)
return fn
def get_duration_fn(difference_fn=get_delta, t_constant=0.0, t_min=0.0, **
kwargs):
v_max, a_max = get_default_limits(d=None, **kwargs)
def fn(q1, q2):
difference = difference_fn(q1, q2)
t_transit = 0.0
if not np.allclose(np.zeros(len(difference)), difference, atol=
1e-06, rtol=0):
t_transit = solve_linear(difference, v_max, a_max,
only_duration=True)
assert t_transit is not None
t = t_constant + t_transit
return max(t_min, t)
return fn
<|reserved_special_token_1|>
from itertools import takewhile
import numpy as np
from .rrt import TreeNode
from .trajectory.linear import get_default_limits, solve_linear
from .trajectory.retime import spline_duration
from .utils import argmin, negate, circular_difference, UNBOUNDED_LIMITS, get_distance, get_delta
ASYMETRIC = True
def asymmetric_extend(q1, q2, extend_fn, backward=False):
if backward and ASYMETRIC:
return reversed(list(extend_fn(q2, q1))) # Forward model
return extend_fn(q1, q2)
def extend_towards(tree, target, distance_fn, extend_fn, collision_fn, swap=False, tree_frequency=1, **kwargs):
assert tree_frequency >= 1
last = argmin(lambda n: distance_fn(n.config, target), tree)
extend = list(asymmetric_extend(last.config, target, extend_fn, backward=swap))
safe = list(takewhile(negate(collision_fn), extend))
for i, q in enumerate(safe):
if (i % tree_frequency == 0) or (i == len(safe) - 1):
last = TreeNode(q, parent=last)
tree.append(last)
success = len(extend) == len(safe)
return last, success
##################################################
def calculate_radius(d=2):
# TODO: unify with get_threshold_fn
# Sampling-based Algorithms for Optimal Motion Planning
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.419.5503&rep=rep1&type=pdf
# https://en.wikipedia.org/wiki/Volume_of_an_n-ball
interval = (1 - 0)
vol_free = interval ** d
radius = 1./2
vol_ball = np.pi * (radius ** d)
gamma = 2 * ((1 + 1. / d) * (vol_free / vol_ball)) ** (1. / d)
# threshold = gamma * (math.log(n) / n) ** (1. / d)
return gamma
def default_weights(conf, weights=None, scale=1.):
if weights is not None:
return weights
d = len(conf)
weights = scale*np.ones(d)
return weights
def get_embed_fn(weights):
weights = np.array(weights)
return lambda q: weights * q
def get_distance_fn(weights, p_norm=2):
embed_fn = get_embed_fn(weights)
return lambda q1, q2: np.linalg.norm(embed_fn(q2) - embed_fn(q1), ord=p_norm)
def distance_fn_from_extend_fn(extend_fn):
# TODO: can compute cost between waypoints from extend_fn
def distance_fn(q1, q2):
path = list(extend_fn(q1, q2)) # TODO: cache
return len(path) # TODO: subtract endpoints?
return distance_fn
##################################################
def get_difference_fn(circular={}):
def fn(q2, q1):
return tuple(circular_difference(v2, v1, interval=circular.get(i, UNBOUNDED_LIMITS))
for i, (v2, v1) in enumerate(zip(q2, q1)))
return fn
def get_cost_fn(distance_fn=get_distance, constant=0., coefficient=1.):
def fn(q1, q2):
return constant + coefficient*distance_fn(q1, q2)
return fn
def get_duration_fn(difference_fn=get_delta, t_constant=0., t_min=0., **kwargs):
v_max, a_max = get_default_limits(d=None, **kwargs)
def fn(q1, q2):
# TODO: be careful that not colinear with other waypoints
difference = difference_fn(q1, q2)
t_transit = 0.
if not np.allclose(np.zeros(len(difference)), difference, atol=1e-6, rtol=0):
t_transit = solve_linear(difference, v_max, a_max, only_duration=True)
assert t_transit is not None
#curve = solve_linear(difference, v_max, a_max)
#t_transit = spline_duration(curve)
t = t_constant + t_transit
return max(t_min, t) # TODO: clip function
return fn
|
flexible
|
{
"blob_id": "84febcc599aa97858ded3b6f803b6b76960878d4",
"index": 7188,
"step-1": "<mask token>\n\n\ndef asymmetric_extend(q1, q2, extend_fn, backward=False):\n if backward and ASYMETRIC:\n return reversed(list(extend_fn(q2, q1)))\n return extend_fn(q1, q2)\n\n\n<mask token>\n\n\ndef calculate_radius(d=2):\n interval = 1 - 0\n vol_free = interval ** d\n radius = 1.0 / 2\n vol_ball = np.pi * radius ** d\n gamma = 2 * ((1 + 1.0 / d) * (vol_free / vol_ball)) ** (1.0 / d)\n return gamma\n\n\ndef default_weights(conf, weights=None, scale=1.0):\n if weights is not None:\n return weights\n d = len(conf)\n weights = scale * np.ones(d)\n return weights\n\n\n<mask token>\n\n\ndef get_distance_fn(weights, p_norm=2):\n embed_fn = get_embed_fn(weights)\n return lambda q1, q2: np.linalg.norm(embed_fn(q2) - embed_fn(q1), ord=\n p_norm)\n\n\ndef distance_fn_from_extend_fn(extend_fn):\n\n def distance_fn(q1, q2):\n path = list(extend_fn(q1, q2))\n return len(path)\n return distance_fn\n\n\n<mask token>\n\n\ndef get_cost_fn(distance_fn=get_distance, constant=0.0, coefficient=1.0):\n\n def fn(q1, q2):\n return constant + coefficient * distance_fn(q1, q2)\n return fn\n\n\ndef get_duration_fn(difference_fn=get_delta, t_constant=0.0, t_min=0.0, **\n kwargs):\n v_max, a_max = get_default_limits(d=None, **kwargs)\n\n def fn(q1, q2):\n difference = difference_fn(q1, q2)\n t_transit = 0.0\n if not np.allclose(np.zeros(len(difference)), difference, atol=\n 1e-06, rtol=0):\n t_transit = solve_linear(difference, v_max, a_max,\n only_duration=True)\n assert t_transit is not None\n t = t_constant + t_transit\n return max(t_min, t)\n return fn\n",
"step-2": "<mask token>\n\n\ndef asymmetric_extend(q1, q2, extend_fn, backward=False):\n if backward and ASYMETRIC:\n return reversed(list(extend_fn(q2, q1)))\n return extend_fn(q1, q2)\n\n\n<mask token>\n\n\ndef calculate_radius(d=2):\n interval = 1 - 0\n vol_free = interval ** d\n radius = 1.0 / 2\n vol_ball = np.pi * radius ** d\n gamma = 2 * ((1 + 1.0 / d) * (vol_free / vol_ball)) ** (1.0 / d)\n return gamma\n\n\ndef default_weights(conf, weights=None, scale=1.0):\n if weights is not None:\n return weights\n d = len(conf)\n weights = scale * np.ones(d)\n return weights\n\n\n<mask token>\n\n\ndef get_distance_fn(weights, p_norm=2):\n embed_fn = get_embed_fn(weights)\n return lambda q1, q2: np.linalg.norm(embed_fn(q2) - embed_fn(q1), ord=\n p_norm)\n\n\ndef distance_fn_from_extend_fn(extend_fn):\n\n def distance_fn(q1, q2):\n path = list(extend_fn(q1, q2))\n return len(path)\n return distance_fn\n\n\ndef get_difference_fn(circular={}):\n\n def fn(q2, q1):\n return tuple(circular_difference(v2, v1, interval=circular.get(i,\n UNBOUNDED_LIMITS)) for i, (v2, v1) in enumerate(zip(q2, q1)))\n return fn\n\n\ndef get_cost_fn(distance_fn=get_distance, constant=0.0, coefficient=1.0):\n\n def fn(q1, q2):\n return constant + coefficient * distance_fn(q1, q2)\n return fn\n\n\ndef get_duration_fn(difference_fn=get_delta, t_constant=0.0, t_min=0.0, **\n kwargs):\n v_max, a_max = get_default_limits(d=None, **kwargs)\n\n def fn(q1, q2):\n difference = difference_fn(q1, q2)\n t_transit = 0.0\n if not np.allclose(np.zeros(len(difference)), difference, atol=\n 1e-06, rtol=0):\n t_transit = solve_linear(difference, v_max, a_max,\n only_duration=True)\n assert t_transit is not None\n t = t_constant + t_transit\n return max(t_min, t)\n return fn\n",
"step-3": "<mask token>\n\n\ndef asymmetric_extend(q1, q2, extend_fn, backward=False):\n if backward and ASYMETRIC:\n return reversed(list(extend_fn(q2, q1)))\n return extend_fn(q1, q2)\n\n\ndef extend_towards(tree, target, distance_fn, extend_fn, collision_fn, swap\n =False, tree_frequency=1, **kwargs):\n assert tree_frequency >= 1\n last = argmin(lambda n: distance_fn(n.config, target), tree)\n extend = list(asymmetric_extend(last.config, target, extend_fn,\n backward=swap))\n safe = list(takewhile(negate(collision_fn), extend))\n for i, q in enumerate(safe):\n if i % tree_frequency == 0 or i == len(safe) - 1:\n last = TreeNode(q, parent=last)\n tree.append(last)\n success = len(extend) == len(safe)\n return last, success\n\n\ndef calculate_radius(d=2):\n interval = 1 - 0\n vol_free = interval ** d\n radius = 1.0 / 2\n vol_ball = np.pi * radius ** d\n gamma = 2 * ((1 + 1.0 / d) * (vol_free / vol_ball)) ** (1.0 / d)\n return gamma\n\n\ndef default_weights(conf, weights=None, scale=1.0):\n if weights is not None:\n return weights\n d = len(conf)\n weights = scale * np.ones(d)\n return weights\n\n\ndef get_embed_fn(weights):\n weights = np.array(weights)\n return lambda q: weights * q\n\n\ndef get_distance_fn(weights, p_norm=2):\n embed_fn = get_embed_fn(weights)\n return lambda q1, q2: np.linalg.norm(embed_fn(q2) - embed_fn(q1), ord=\n p_norm)\n\n\ndef distance_fn_from_extend_fn(extend_fn):\n\n def distance_fn(q1, q2):\n path = list(extend_fn(q1, q2))\n return len(path)\n return distance_fn\n\n\ndef get_difference_fn(circular={}):\n\n def fn(q2, q1):\n return tuple(circular_difference(v2, v1, interval=circular.get(i,\n UNBOUNDED_LIMITS)) for i, (v2, v1) in enumerate(zip(q2, q1)))\n return fn\n\n\ndef get_cost_fn(distance_fn=get_distance, constant=0.0, coefficient=1.0):\n\n def fn(q1, q2):\n return constant + coefficient * distance_fn(q1, q2)\n return fn\n\n\ndef get_duration_fn(difference_fn=get_delta, t_constant=0.0, t_min=0.0, **\n kwargs):\n v_max, a_max = get_default_limits(d=None, **kwargs)\n\n def fn(q1, q2):\n difference = difference_fn(q1, q2)\n t_transit = 0.0\n if not np.allclose(np.zeros(len(difference)), difference, atol=\n 1e-06, rtol=0):\n t_transit = solve_linear(difference, v_max, a_max,\n only_duration=True)\n assert t_transit is not None\n t = t_constant + t_transit\n return max(t_min, t)\n return fn\n",
"step-4": "from itertools import takewhile\nimport numpy as np\nfrom .rrt import TreeNode\nfrom .trajectory.linear import get_default_limits, solve_linear\nfrom .trajectory.retime import spline_duration\nfrom .utils import argmin, negate, circular_difference, UNBOUNDED_LIMITS, get_distance, get_delta\nASYMETRIC = True\n\n\ndef asymmetric_extend(q1, q2, extend_fn, backward=False):\n if backward and ASYMETRIC:\n return reversed(list(extend_fn(q2, q1)))\n return extend_fn(q1, q2)\n\n\ndef extend_towards(tree, target, distance_fn, extend_fn, collision_fn, swap\n =False, tree_frequency=1, **kwargs):\n assert tree_frequency >= 1\n last = argmin(lambda n: distance_fn(n.config, target), tree)\n extend = list(asymmetric_extend(last.config, target, extend_fn,\n backward=swap))\n safe = list(takewhile(negate(collision_fn), extend))\n for i, q in enumerate(safe):\n if i % tree_frequency == 0 or i == len(safe) - 1:\n last = TreeNode(q, parent=last)\n tree.append(last)\n success = len(extend) == len(safe)\n return last, success\n\n\ndef calculate_radius(d=2):\n interval = 1 - 0\n vol_free = interval ** d\n radius = 1.0 / 2\n vol_ball = np.pi * radius ** d\n gamma = 2 * ((1 + 1.0 / d) * (vol_free / vol_ball)) ** (1.0 / d)\n return gamma\n\n\ndef default_weights(conf, weights=None, scale=1.0):\n if weights is not None:\n return weights\n d = len(conf)\n weights = scale * np.ones(d)\n return weights\n\n\ndef get_embed_fn(weights):\n weights = np.array(weights)\n return lambda q: weights * q\n\n\ndef get_distance_fn(weights, p_norm=2):\n embed_fn = get_embed_fn(weights)\n return lambda q1, q2: np.linalg.norm(embed_fn(q2) - embed_fn(q1), ord=\n p_norm)\n\n\ndef distance_fn_from_extend_fn(extend_fn):\n\n def distance_fn(q1, q2):\n path = list(extend_fn(q1, q2))\n return len(path)\n return distance_fn\n\n\ndef get_difference_fn(circular={}):\n\n def fn(q2, q1):\n return tuple(circular_difference(v2, v1, interval=circular.get(i,\n UNBOUNDED_LIMITS)) for i, (v2, v1) in enumerate(zip(q2, q1)))\n return fn\n\n\ndef get_cost_fn(distance_fn=get_distance, constant=0.0, coefficient=1.0):\n\n def fn(q1, q2):\n return constant + coefficient * distance_fn(q1, q2)\n return fn\n\n\ndef get_duration_fn(difference_fn=get_delta, t_constant=0.0, t_min=0.0, **\n kwargs):\n v_max, a_max = get_default_limits(d=None, **kwargs)\n\n def fn(q1, q2):\n difference = difference_fn(q1, q2)\n t_transit = 0.0\n if not np.allclose(np.zeros(len(difference)), difference, atol=\n 1e-06, rtol=0):\n t_transit = solve_linear(difference, v_max, a_max,\n only_duration=True)\n assert t_transit is not None\n t = t_constant + t_transit\n return max(t_min, t)\n return fn\n",
"step-5": "from itertools import takewhile\n\nimport numpy as np\n\nfrom .rrt import TreeNode\nfrom .trajectory.linear import get_default_limits, solve_linear\nfrom .trajectory.retime import spline_duration\nfrom .utils import argmin, negate, circular_difference, UNBOUNDED_LIMITS, get_distance, get_delta\n\nASYMETRIC = True\n\n\ndef asymmetric_extend(q1, q2, extend_fn, backward=False):\n if backward and ASYMETRIC:\n return reversed(list(extend_fn(q2, q1))) # Forward model\n return extend_fn(q1, q2)\n\n\ndef extend_towards(tree, target, distance_fn, extend_fn, collision_fn, swap=False, tree_frequency=1, **kwargs):\n assert tree_frequency >= 1\n last = argmin(lambda n: distance_fn(n.config, target), tree)\n extend = list(asymmetric_extend(last.config, target, extend_fn, backward=swap))\n safe = list(takewhile(negate(collision_fn), extend))\n for i, q in enumerate(safe):\n if (i % tree_frequency == 0) or (i == len(safe) - 1):\n last = TreeNode(q, parent=last)\n tree.append(last)\n success = len(extend) == len(safe)\n return last, success\n\n##################################################\n\ndef calculate_radius(d=2):\n # TODO: unify with get_threshold_fn\n # Sampling-based Algorithms for Optimal Motion Planning\n # http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.419.5503&rep=rep1&type=pdf\n # https://en.wikipedia.org/wiki/Volume_of_an_n-ball\n interval = (1 - 0)\n vol_free = interval ** d\n radius = 1./2\n vol_ball = np.pi * (radius ** d)\n gamma = 2 * ((1 + 1. / d) * (vol_free / vol_ball)) ** (1. / d)\n # threshold = gamma * (math.log(n) / n) ** (1. / d)\n return gamma\n\n\ndef default_weights(conf, weights=None, scale=1.):\n if weights is not None:\n return weights\n d = len(conf)\n weights = scale*np.ones(d)\n return weights\n\n\ndef get_embed_fn(weights):\n weights = np.array(weights)\n return lambda q: weights * q\n\n\ndef get_distance_fn(weights, p_norm=2):\n embed_fn = get_embed_fn(weights)\n return lambda q1, q2: np.linalg.norm(embed_fn(q2) - embed_fn(q1), ord=p_norm)\n\n\ndef distance_fn_from_extend_fn(extend_fn):\n # TODO: can compute cost between waypoints from extend_fn\n def distance_fn(q1, q2):\n path = list(extend_fn(q1, q2)) # TODO: cache\n return len(path) # TODO: subtract endpoints?\n return distance_fn\n\n##################################################\n\ndef get_difference_fn(circular={}):\n def fn(q2, q1):\n return tuple(circular_difference(v2, v1, interval=circular.get(i, UNBOUNDED_LIMITS))\n for i, (v2, v1) in enumerate(zip(q2, q1)))\n return fn\n\n\ndef get_cost_fn(distance_fn=get_distance, constant=0., coefficient=1.):\n def fn(q1, q2):\n return constant + coefficient*distance_fn(q1, q2)\n return fn\n\n\ndef get_duration_fn(difference_fn=get_delta, t_constant=0., t_min=0., **kwargs):\n v_max, a_max = get_default_limits(d=None, **kwargs)\n def fn(q1, q2):\n # TODO: be careful that not colinear with other waypoints\n difference = difference_fn(q1, q2)\n t_transit = 0.\n if not np.allclose(np.zeros(len(difference)), difference, atol=1e-6, rtol=0):\n t_transit = solve_linear(difference, v_max, a_max, only_duration=True)\n assert t_transit is not None\n #curve = solve_linear(difference, v_max, a_max)\n #t_transit = spline_duration(curve)\n t = t_constant + t_transit\n return max(t_min, t) # TODO: clip function\n return fn",
"step-ids": [
7,
8,
10,
12,
13
]
}
|
[
7,
8,
10,
12,
13
] |
#!/usr/bin/env python
"""Server that accepts and executes control-type commands on the bot."""
import sys
import os
from inspect import getmembers, ismethod
from simplejson.decoder import JSONDecodeError
import zmq
import signal
# This is required to make imports work
sys.path = [os.getcwd()] + sys.path
import bot.lib.lib as lib
import pub_server as pub_server_mod
import bot.lib.messages as msgs
from bot.driver.mec_driver import MecDriver
def is_api_method(obj, name):
"""Tests whether named method exists in obj and is flagged for API export.
:param obj: API-exported object to search for the given method on.
:type ojb: string
:param name: Name of method to check for.
:type name: string
:returns: True if given method is on given obj and is exported, else False.
"""
try:
method = getattr(obj, name)
except AttributeError:
return False
return (ismethod(method) and hasattr(method, "__api_call"))
class CtrlServer(object):
"""Exports bot control via ZMQ.
Most functionally exported by CtrlServer is in the form of methods
exposed by the API. @lib.api_call decorators can be added to bot
systems, which tags them for export. They can then be called
remotely via CtrlClient, which is typically owned by an interface
like the CLI, which typically accepts commands from an agent like
a human.
Some control is exported directly by CtrlServer, not through the
API. For example, CtrlServer responds directly to ping messages,
list messages (which give the objects/methods exposed by the API),
and exit messages.
CtrlServer is the primary owner of bot resources, which we call
systems. For example, it's CtrlServer that instantiates gunner
and follower. Through those two, CtrlServer owns the gun, the
IR hub, the turret and basically every other bot system.
The messages that CtrlServer accepts and responds with are fully
specified in lib.messages. Make any changes to messages there.
CtrlServer can be instructed (via the API) to spawn a new thread
for a PubServer. When that happens, CtrlServer passes its systems
to PubServer, which can read their state and publish it over a
ZMQ PUB socket.
"""
def __init__(self, testing=None, config_file="bot/config.yaml"):
"""Build ZMQ REP socket and instantiate bot systems.
:param testing: True if running on simulated HW, False if on bot.
:type testing: boolean
:param config_file: Name of file to read configuration from.
:type config_file: string
"""
# Register signal handler, shut down cleanly (think motors)
signal.signal(signal.SIGINT, self.signal_handler)
# Load configuration and logger
self.config = lib.get_config(config_file)
self.logger = lib.get_logger()
# Testing flag will cause objects to run on simulated hardware
if testing is True or testing == "True":
self.logger.info("CtrlServer running in test mode")
lib.set_testing(True)
elif testing is None:
self.logger.info(
"Defaulting to config testing flag: {}".format(
self.config["testing"]))
lib.set_testing(self.config["testing"])
else:
self.logger.info("CtrlServer running in non-test mode")
lib.set_testing(False)
# Build socket to listen for requests
self.context = zmq.Context()
self.ctrl_sock = self.context.socket(zmq.REP)
self.server_bind_addr = "{protocol}://{host}:{port}".format(
protocol=self.config["server_protocol"],
host=self.config["server_bind_host"],
port=self.config["ctrl_server_port"])
try:
self.ctrl_sock.bind(self.server_bind_addr)
except zmq.ZMQError:
self.logger.error("ZMQ error. Is a server already running?")
self.logger.warning("May be connected to an old server instance.")
sys.exit(1)
self.systems = self.assign_subsystems()
self.logger.info("Control server initialized")
# Don't spawn pub_server until told to
self.pub_server = None
def signal_handler(self, signal, frame):
self.logger.info("Caught SIGINT (Ctrl+C), closing cleanly")
self.clean_up()
self.logger.info("Cleaned up bot, exiting...")
sys.exit(0)
def assign_subsystems(self):
"""Instantiates and stores references to bot subsystems.
:returns: Dict of subsystems, maps system name to instantiated object.
"""
self.driver = MecDriver()
systems = {}
systems["ctrl"] = self
systems["driver"] = self.driver
self.logger.debug("Systems: {}".format(systems))
return systems
def listen(self):
"""Perpetually listen for messages, pass them to generic handler."""
self.logger.info("Control server: {}".format(self.server_bind_addr))
while True:
try:
msg = self.ctrl_sock.recv_json()
reply = self.handle_msg(msg)
self.logger.debug("Sending: {}".format(reply))
self.ctrl_sock.send_json(reply)
except JSONDecodeError:
err_msg = "Not a JSON message!"
self.logger.warning(err_msg)
self.ctrl_sock.send_json(msgs.error(err_msg))
except KeyboardInterrupt:
self.logger.info("Exiting control server. Bye!")
self.clean_up()
sys.exit(0)
def handle_msg(self, msg):
"""Generic message handler. Hands-off based on type of message.
:param msg: Message, received via ZMQ from client, to handle.
:type msg: dict
:returns: An appropriate message reply dict, from lib.messages.
"""
self.logger.debug("Received: {}".format(msg))
try:
msg_type = msg["type"]
except KeyError as e:
return msgs.error(e)
if msg_type == "ping_req":
reply = msgs.ping_reply()
elif msg_type == "list_req":
reply = self.list_callables()
elif msg_type == "call_req":
try:
obj_name = msg["obj_name"]
method = msg["method"]
params = msg["params"]
reply = self.call_method(obj_name, method, params)
except KeyError as e:
return msgs.error(e)
elif msg_type == "exit_req":
self.logger.info("Received message to die. Bye!")
reply = msgs.exit_reply()
# Need to actually send reply here as we're about to exit
self.logger.debug("Sending: {}".format(reply))
self.ctrl_sock.send_json(reply)
self.clean_up()
sys.exit(0)
else:
err_msg = "Unrecognized message: {}".format(msg)
self.logger.warning(err_msg)
reply = msgs.error(err_msg)
return reply
def list_callables(self):
"""Build list of callable methods on each exported subsystem object.
Uses introspection to create a list of callable methods for each
registered subsystem object. Only methods which are flagged using the
@lib.api_call decorator will be included.
:returns: list_reply message with callable objects and their methods.
"""
self.logger.debug("List of callable API objects requested")
# Dict of subsystem object names to their callable methods.
callables = {}
for name, obj in self.systems.items():
methods = []
# Filter out methods which are not explicitly flagged for export
for member in getmembers(obj):
if is_api_method(obj, member[0]):
methods.append(member[0])
callables[name] = methods
return msgs.list_reply(callables)
def call_method(self, name, method, params):
"""Call a previously registered subsystem method by name. Only
methods tagged with the @api_call decorator can be called.
:param name: Assigned name of the registered subsystem.
:type name: string
:param method: Subsystem method to be called.
:type method: string
:param params: Additional parameters for the called method.
:type params: dict
:returns: call_reply or error message dict to be sent to caller.
"""
self.logger.debug("API call: {}.{}({})".format(name, method, params))
if name in self.systems:
obj = self.systems[name]
if is_api_method(obj, method):
try:
# Calls given obj.method, unpacking and passing params dict
call_return = getattr(obj, method)(**params)
msg = "Called {}.{}".format(name, method)
self.logger.debug(msg + ",returned:{}".format(call_return))
return msgs.call_reply(msg, call_return)
except TypeError:
# Raised when we have a mismatch of the method's kwargs
# TODO: Return argspec here?
err_msg = "Invalid params for {}.{}".format(name, method)
self.logger.warning(err_msg)
return msgs.error(err_msg)
except Exception as e:
# Catch exception raised by called method, notify client
err_msg = "Exception: '{}'".format(str(e))
self.logger.warning(err_msg)
return msgs.error(err_msg)
else:
err_msg = "Invalid method: '{}.{}'".format(name, method)
self.logger.warning(err_msg)
return msgs.error(err_msg)
else:
err_msg = "Invalid object: '{}'".format(name)
self.logger.warning(err_msg)
return msgs.error(err_msg)
@lib.api_call
def echo(self, msg=None):
"""Echo a message back to the caller.
:param msg: Message to be echoed back to caller, default is None.
:returns: Message given by param, defaults to None.
"""
return msg
@lib.api_call
def exception(self):
"""Raise a test exception which will be returned to the caller."""
raise Exception("Exception test")
@lib.api_call
def spawn_pub_server(self):
"""Spawn publisher thread."""
if self.pub_server is None:
self.pub_server = pub_server_mod.PubServer(self.systems)
# Prevent pub_server thread from blocking the process from closing
self.pub_server.setDaemon(True)
self.pub_server.start()
msg = "Spawned pub server"
self.logger.info(msg)
return msg
else:
err_msg = "PubServer is already running"
self.logger.warning(err_msg)
return err_msg
@lib.api_call
def stop_full(self):
"""Stop all drive and gun motors, set turret to safe state."""
self.systems["driver"].move(0, 0)
def clean_up(self):
"""Tear down ZMQ socket."""
self.stop_full()
self.ctrl_sock.close()
self.context.term()
if __name__ == "__main__":
if len(sys.argv) == 2:
server = CtrlServer(sys.argv[1])
else:
server = CtrlServer()
server.listen()
|
normal
|
{
"blob_id": "ddb81e3ce0df44ee503c558b68b41c35935358a0",
"index": 8663,
"step-1": "<mask token>\n\n\nclass CtrlServer(object):\n <mask token>\n <mask token>\n <mask token>\n\n def assign_subsystems(self):\n \"\"\"Instantiates and stores references to bot subsystems.\n\n :returns: Dict of subsystems, maps system name to instantiated object.\n\n \"\"\"\n self.driver = MecDriver()\n systems = {}\n systems['ctrl'] = self\n systems['driver'] = self.driver\n self.logger.debug('Systems: {}'.format(systems))\n return systems\n\n def listen(self):\n \"\"\"Perpetually listen for messages, pass them to generic handler.\"\"\"\n self.logger.info('Control server: {}'.format(self.server_bind_addr))\n while True:\n try:\n msg = self.ctrl_sock.recv_json()\n reply = self.handle_msg(msg)\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n except JSONDecodeError:\n err_msg = 'Not a JSON message!'\n self.logger.warning(err_msg)\n self.ctrl_sock.send_json(msgs.error(err_msg))\n except KeyboardInterrupt:\n self.logger.info('Exiting control server. Bye!')\n self.clean_up()\n sys.exit(0)\n\n def handle_msg(self, msg):\n \"\"\"Generic message handler. Hands-off based on type of message.\n\n :param msg: Message, received via ZMQ from client, to handle.\n :type msg: dict\n :returns: An appropriate message reply dict, from lib.messages.\n\n \"\"\"\n self.logger.debug('Received: {}'.format(msg))\n try:\n msg_type = msg['type']\n except KeyError as e:\n return msgs.error(e)\n if msg_type == 'ping_req':\n reply = msgs.ping_reply()\n elif msg_type == 'list_req':\n reply = self.list_callables()\n elif msg_type == 'call_req':\n try:\n obj_name = msg['obj_name']\n method = msg['method']\n params = msg['params']\n reply = self.call_method(obj_name, method, params)\n except KeyError as e:\n return msgs.error(e)\n elif msg_type == 'exit_req':\n self.logger.info('Received message to die. Bye!')\n reply = msgs.exit_reply()\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n self.clean_up()\n sys.exit(0)\n else:\n err_msg = 'Unrecognized message: {}'.format(msg)\n self.logger.warning(err_msg)\n reply = msgs.error(err_msg)\n return reply\n\n def list_callables(self):\n \"\"\"Build list of callable methods on each exported subsystem object.\n\n Uses introspection to create a list of callable methods for each\n registered subsystem object. Only methods which are flagged using the\n @lib.api_call decorator will be included.\n\n :returns: list_reply message with callable objects and their methods.\n\n \"\"\"\n self.logger.debug('List of callable API objects requested')\n callables = {}\n for name, obj in self.systems.items():\n methods = []\n for member in getmembers(obj):\n if is_api_method(obj, member[0]):\n methods.append(member[0])\n callables[name] = methods\n return msgs.list_reply(callables)\n <mask token>\n\n @lib.api_call\n def echo(self, msg=None):\n \"\"\"Echo a message back to the caller.\n\n :param msg: Message to be echoed back to caller, default is None.\n :returns: Message given by param, defaults to None.\n\n \"\"\"\n return msg\n\n @lib.api_call\n def exception(self):\n \"\"\"Raise a test exception which will be returned to the caller.\"\"\"\n raise Exception('Exception test')\n\n @lib.api_call\n def spawn_pub_server(self):\n \"\"\"Spawn publisher thread.\"\"\"\n if self.pub_server is None:\n self.pub_server = pub_server_mod.PubServer(self.systems)\n self.pub_server.setDaemon(True)\n self.pub_server.start()\n msg = 'Spawned pub server'\n self.logger.info(msg)\n return msg\n else:\n err_msg = 'PubServer is already running'\n self.logger.warning(err_msg)\n return err_msg\n\n @lib.api_call\n def stop_full(self):\n \"\"\"Stop all drive and gun motors, set turret to safe state.\"\"\"\n self.systems['driver'].move(0, 0)\n\n def clean_up(self):\n \"\"\"Tear down ZMQ socket.\"\"\"\n self.stop_full()\n self.ctrl_sock.close()\n self.context.term()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CtrlServer(object):\n <mask token>\n\n def __init__(self, testing=None, config_file='bot/config.yaml'):\n \"\"\"Build ZMQ REP socket and instantiate bot systems.\n\n :param testing: True if running on simulated HW, False if on bot.\n :type testing: boolean\n :param config_file: Name of file to read configuration from.\n :type config_file: string\n\n \"\"\"\n signal.signal(signal.SIGINT, self.signal_handler)\n self.config = lib.get_config(config_file)\n self.logger = lib.get_logger()\n if testing is True or testing == 'True':\n self.logger.info('CtrlServer running in test mode')\n lib.set_testing(True)\n elif testing is None:\n self.logger.info('Defaulting to config testing flag: {}'.format\n (self.config['testing']))\n lib.set_testing(self.config['testing'])\n else:\n self.logger.info('CtrlServer running in non-test mode')\n lib.set_testing(False)\n self.context = zmq.Context()\n self.ctrl_sock = self.context.socket(zmq.REP)\n self.server_bind_addr = '{protocol}://{host}:{port}'.format(protocol\n =self.config['server_protocol'], host=self.config[\n 'server_bind_host'], port=self.config['ctrl_server_port'])\n try:\n self.ctrl_sock.bind(self.server_bind_addr)\n except zmq.ZMQError:\n self.logger.error('ZMQ error. Is a server already running?')\n self.logger.warning('May be connected to an old server instance.')\n sys.exit(1)\n self.systems = self.assign_subsystems()\n self.logger.info('Control server initialized')\n self.pub_server = None\n\n def signal_handler(self, signal, frame):\n self.logger.info('Caught SIGINT (Ctrl+C), closing cleanly')\n self.clean_up()\n self.logger.info('Cleaned up bot, exiting...')\n sys.exit(0)\n\n def assign_subsystems(self):\n \"\"\"Instantiates and stores references to bot subsystems.\n\n :returns: Dict of subsystems, maps system name to instantiated object.\n\n \"\"\"\n self.driver = MecDriver()\n systems = {}\n systems['ctrl'] = self\n systems['driver'] = self.driver\n self.logger.debug('Systems: {}'.format(systems))\n return systems\n\n def listen(self):\n \"\"\"Perpetually listen for messages, pass them to generic handler.\"\"\"\n self.logger.info('Control server: {}'.format(self.server_bind_addr))\n while True:\n try:\n msg = self.ctrl_sock.recv_json()\n reply = self.handle_msg(msg)\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n except JSONDecodeError:\n err_msg = 'Not a JSON message!'\n self.logger.warning(err_msg)\n self.ctrl_sock.send_json(msgs.error(err_msg))\n except KeyboardInterrupt:\n self.logger.info('Exiting control server. Bye!')\n self.clean_up()\n sys.exit(0)\n\n def handle_msg(self, msg):\n \"\"\"Generic message handler. Hands-off based on type of message.\n\n :param msg: Message, received via ZMQ from client, to handle.\n :type msg: dict\n :returns: An appropriate message reply dict, from lib.messages.\n\n \"\"\"\n self.logger.debug('Received: {}'.format(msg))\n try:\n msg_type = msg['type']\n except KeyError as e:\n return msgs.error(e)\n if msg_type == 'ping_req':\n reply = msgs.ping_reply()\n elif msg_type == 'list_req':\n reply = self.list_callables()\n elif msg_type == 'call_req':\n try:\n obj_name = msg['obj_name']\n method = msg['method']\n params = msg['params']\n reply = self.call_method(obj_name, method, params)\n except KeyError as e:\n return msgs.error(e)\n elif msg_type == 'exit_req':\n self.logger.info('Received message to die. Bye!')\n reply = msgs.exit_reply()\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n self.clean_up()\n sys.exit(0)\n else:\n err_msg = 'Unrecognized message: {}'.format(msg)\n self.logger.warning(err_msg)\n reply = msgs.error(err_msg)\n return reply\n\n def list_callables(self):\n \"\"\"Build list of callable methods on each exported subsystem object.\n\n Uses introspection to create a list of callable methods for each\n registered subsystem object. Only methods which are flagged using the\n @lib.api_call decorator will be included.\n\n :returns: list_reply message with callable objects and their methods.\n\n \"\"\"\n self.logger.debug('List of callable API objects requested')\n callables = {}\n for name, obj in self.systems.items():\n methods = []\n for member in getmembers(obj):\n if is_api_method(obj, member[0]):\n methods.append(member[0])\n callables[name] = methods\n return msgs.list_reply(callables)\n\n def call_method(self, name, method, params):\n \"\"\"Call a previously registered subsystem method by name. Only\n methods tagged with the @api_call decorator can be called.\n\n :param name: Assigned name of the registered subsystem.\n :type name: string\n :param method: Subsystem method to be called.\n :type method: string\n :param params: Additional parameters for the called method.\n :type params: dict\n :returns: call_reply or error message dict to be sent to caller.\n\n \"\"\"\n self.logger.debug('API call: {}.{}({})'.format(name, method, params))\n if name in self.systems:\n obj = self.systems[name]\n if is_api_method(obj, method):\n try:\n call_return = getattr(obj, method)(**params)\n msg = 'Called {}.{}'.format(name, method)\n self.logger.debug(msg + ',returned:{}'.format(call_return))\n return msgs.call_reply(msg, call_return)\n except TypeError:\n err_msg = 'Invalid params for {}.{}'.format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n except Exception as e:\n err_msg = \"Exception: '{}'\".format(str(e))\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid method: '{}.{}'\".format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid object: '{}'\".format(name)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n\n @lib.api_call\n def echo(self, msg=None):\n \"\"\"Echo a message back to the caller.\n\n :param msg: Message to be echoed back to caller, default is None.\n :returns: Message given by param, defaults to None.\n\n \"\"\"\n return msg\n\n @lib.api_call\n def exception(self):\n \"\"\"Raise a test exception which will be returned to the caller.\"\"\"\n raise Exception('Exception test')\n\n @lib.api_call\n def spawn_pub_server(self):\n \"\"\"Spawn publisher thread.\"\"\"\n if self.pub_server is None:\n self.pub_server = pub_server_mod.PubServer(self.systems)\n self.pub_server.setDaemon(True)\n self.pub_server.start()\n msg = 'Spawned pub server'\n self.logger.info(msg)\n return msg\n else:\n err_msg = 'PubServer is already running'\n self.logger.warning(err_msg)\n return err_msg\n\n @lib.api_call\n def stop_full(self):\n \"\"\"Stop all drive and gun motors, set turret to safe state.\"\"\"\n self.systems['driver'].move(0, 0)\n\n def clean_up(self):\n \"\"\"Tear down ZMQ socket.\"\"\"\n self.stop_full()\n self.ctrl_sock.close()\n self.context.term()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef is_api_method(obj, name):\n \"\"\"Tests whether named method exists in obj and is flagged for API export.\n\n :param obj: API-exported object to search for the given method on.\n :type ojb: string\n :param name: Name of method to check for.\n :type name: string\n :returns: True if given method is on given obj and is exported, else False.\n\n \"\"\"\n try:\n method = getattr(obj, name)\n except AttributeError:\n return False\n return ismethod(method) and hasattr(method, '__api_call')\n\n\nclass CtrlServer(object):\n \"\"\"Exports bot control via ZMQ.\n\n Most functionally exported by CtrlServer is in the form of methods\n exposed by the API. @lib.api_call decorators can be added to bot\n systems, which tags them for export. They can then be called\n remotely via CtrlClient, which is typically owned by an interface\n like the CLI, which typically accepts commands from an agent like\n a human.\n\n Some control is exported directly by CtrlServer, not through the\n API. For example, CtrlServer responds directly to ping messages,\n list messages (which give the objects/methods exposed by the API),\n and exit messages.\n\n CtrlServer is the primary owner of bot resources, which we call\n systems. For example, it's CtrlServer that instantiates gunner\n and follower. Through those two, CtrlServer owns the gun, the\n IR hub, the turret and basically every other bot system.\n\n The messages that CtrlServer accepts and responds with are fully\n specified in lib.messages. Make any changes to messages there.\n\n CtrlServer can be instructed (via the API) to spawn a new thread\n for a PubServer. When that happens, CtrlServer passes its systems\n to PubServer, which can read their state and publish it over a\n ZMQ PUB socket.\n\n \"\"\"\n\n def __init__(self, testing=None, config_file='bot/config.yaml'):\n \"\"\"Build ZMQ REP socket and instantiate bot systems.\n\n :param testing: True if running on simulated HW, False if on bot.\n :type testing: boolean\n :param config_file: Name of file to read configuration from.\n :type config_file: string\n\n \"\"\"\n signal.signal(signal.SIGINT, self.signal_handler)\n self.config = lib.get_config(config_file)\n self.logger = lib.get_logger()\n if testing is True or testing == 'True':\n self.logger.info('CtrlServer running in test mode')\n lib.set_testing(True)\n elif testing is None:\n self.logger.info('Defaulting to config testing flag: {}'.format\n (self.config['testing']))\n lib.set_testing(self.config['testing'])\n else:\n self.logger.info('CtrlServer running in non-test mode')\n lib.set_testing(False)\n self.context = zmq.Context()\n self.ctrl_sock = self.context.socket(zmq.REP)\n self.server_bind_addr = '{protocol}://{host}:{port}'.format(protocol\n =self.config['server_protocol'], host=self.config[\n 'server_bind_host'], port=self.config['ctrl_server_port'])\n try:\n self.ctrl_sock.bind(self.server_bind_addr)\n except zmq.ZMQError:\n self.logger.error('ZMQ error. Is a server already running?')\n self.logger.warning('May be connected to an old server instance.')\n sys.exit(1)\n self.systems = self.assign_subsystems()\n self.logger.info('Control server initialized')\n self.pub_server = None\n\n def signal_handler(self, signal, frame):\n self.logger.info('Caught SIGINT (Ctrl+C), closing cleanly')\n self.clean_up()\n self.logger.info('Cleaned up bot, exiting...')\n sys.exit(0)\n\n def assign_subsystems(self):\n \"\"\"Instantiates and stores references to bot subsystems.\n\n :returns: Dict of subsystems, maps system name to instantiated object.\n\n \"\"\"\n self.driver = MecDriver()\n systems = {}\n systems['ctrl'] = self\n systems['driver'] = self.driver\n self.logger.debug('Systems: {}'.format(systems))\n return systems\n\n def listen(self):\n \"\"\"Perpetually listen for messages, pass them to generic handler.\"\"\"\n self.logger.info('Control server: {}'.format(self.server_bind_addr))\n while True:\n try:\n msg = self.ctrl_sock.recv_json()\n reply = self.handle_msg(msg)\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n except JSONDecodeError:\n err_msg = 'Not a JSON message!'\n self.logger.warning(err_msg)\n self.ctrl_sock.send_json(msgs.error(err_msg))\n except KeyboardInterrupt:\n self.logger.info('Exiting control server. Bye!')\n self.clean_up()\n sys.exit(0)\n\n def handle_msg(self, msg):\n \"\"\"Generic message handler. Hands-off based on type of message.\n\n :param msg: Message, received via ZMQ from client, to handle.\n :type msg: dict\n :returns: An appropriate message reply dict, from lib.messages.\n\n \"\"\"\n self.logger.debug('Received: {}'.format(msg))\n try:\n msg_type = msg['type']\n except KeyError as e:\n return msgs.error(e)\n if msg_type == 'ping_req':\n reply = msgs.ping_reply()\n elif msg_type == 'list_req':\n reply = self.list_callables()\n elif msg_type == 'call_req':\n try:\n obj_name = msg['obj_name']\n method = msg['method']\n params = msg['params']\n reply = self.call_method(obj_name, method, params)\n except KeyError as e:\n return msgs.error(e)\n elif msg_type == 'exit_req':\n self.logger.info('Received message to die. Bye!')\n reply = msgs.exit_reply()\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n self.clean_up()\n sys.exit(0)\n else:\n err_msg = 'Unrecognized message: {}'.format(msg)\n self.logger.warning(err_msg)\n reply = msgs.error(err_msg)\n return reply\n\n def list_callables(self):\n \"\"\"Build list of callable methods on each exported subsystem object.\n\n Uses introspection to create a list of callable methods for each\n registered subsystem object. Only methods which are flagged using the\n @lib.api_call decorator will be included.\n\n :returns: list_reply message with callable objects and their methods.\n\n \"\"\"\n self.logger.debug('List of callable API objects requested')\n callables = {}\n for name, obj in self.systems.items():\n methods = []\n for member in getmembers(obj):\n if is_api_method(obj, member[0]):\n methods.append(member[0])\n callables[name] = methods\n return msgs.list_reply(callables)\n\n def call_method(self, name, method, params):\n \"\"\"Call a previously registered subsystem method by name. Only\n methods tagged with the @api_call decorator can be called.\n\n :param name: Assigned name of the registered subsystem.\n :type name: string\n :param method: Subsystem method to be called.\n :type method: string\n :param params: Additional parameters for the called method.\n :type params: dict\n :returns: call_reply or error message dict to be sent to caller.\n\n \"\"\"\n self.logger.debug('API call: {}.{}({})'.format(name, method, params))\n if name in self.systems:\n obj = self.systems[name]\n if is_api_method(obj, method):\n try:\n call_return = getattr(obj, method)(**params)\n msg = 'Called {}.{}'.format(name, method)\n self.logger.debug(msg + ',returned:{}'.format(call_return))\n return msgs.call_reply(msg, call_return)\n except TypeError:\n err_msg = 'Invalid params for {}.{}'.format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n except Exception as e:\n err_msg = \"Exception: '{}'\".format(str(e))\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid method: '{}.{}'\".format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid object: '{}'\".format(name)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n\n @lib.api_call\n def echo(self, msg=None):\n \"\"\"Echo a message back to the caller.\n\n :param msg: Message to be echoed back to caller, default is None.\n :returns: Message given by param, defaults to None.\n\n \"\"\"\n return msg\n\n @lib.api_call\n def exception(self):\n \"\"\"Raise a test exception which will be returned to the caller.\"\"\"\n raise Exception('Exception test')\n\n @lib.api_call\n def spawn_pub_server(self):\n \"\"\"Spawn publisher thread.\"\"\"\n if self.pub_server is None:\n self.pub_server = pub_server_mod.PubServer(self.systems)\n self.pub_server.setDaemon(True)\n self.pub_server.start()\n msg = 'Spawned pub server'\n self.logger.info(msg)\n return msg\n else:\n err_msg = 'PubServer is already running'\n self.logger.warning(err_msg)\n return err_msg\n\n @lib.api_call\n def stop_full(self):\n \"\"\"Stop all drive and gun motors, set turret to safe state.\"\"\"\n self.systems['driver'].move(0, 0)\n\n def clean_up(self):\n \"\"\"Tear down ZMQ socket.\"\"\"\n self.stop_full()\n self.ctrl_sock.close()\n self.context.term()\n\n\nif __name__ == '__main__':\n if len(sys.argv) == 2:\n server = CtrlServer(sys.argv[1])\n else:\n server = CtrlServer()\n server.listen()\n",
"step-4": "<mask token>\nimport sys\nimport os\nfrom inspect import getmembers, ismethod\nfrom simplejson.decoder import JSONDecodeError\nimport zmq\nimport signal\nsys.path = [os.getcwd()] + sys.path\nimport bot.lib.lib as lib\nimport pub_server as pub_server_mod\nimport bot.lib.messages as msgs\nfrom bot.driver.mec_driver import MecDriver\n\n\ndef is_api_method(obj, name):\n \"\"\"Tests whether named method exists in obj and is flagged for API export.\n\n :param obj: API-exported object to search for the given method on.\n :type ojb: string\n :param name: Name of method to check for.\n :type name: string\n :returns: True if given method is on given obj and is exported, else False.\n\n \"\"\"\n try:\n method = getattr(obj, name)\n except AttributeError:\n return False\n return ismethod(method) and hasattr(method, '__api_call')\n\n\nclass CtrlServer(object):\n \"\"\"Exports bot control via ZMQ.\n\n Most functionally exported by CtrlServer is in the form of methods\n exposed by the API. @lib.api_call decorators can be added to bot\n systems, which tags them for export. They can then be called\n remotely via CtrlClient, which is typically owned by an interface\n like the CLI, which typically accepts commands from an agent like\n a human.\n\n Some control is exported directly by CtrlServer, not through the\n API. For example, CtrlServer responds directly to ping messages,\n list messages (which give the objects/methods exposed by the API),\n and exit messages.\n\n CtrlServer is the primary owner of bot resources, which we call\n systems. For example, it's CtrlServer that instantiates gunner\n and follower. Through those two, CtrlServer owns the gun, the\n IR hub, the turret and basically every other bot system.\n\n The messages that CtrlServer accepts and responds with are fully\n specified in lib.messages. Make any changes to messages there.\n\n CtrlServer can be instructed (via the API) to spawn a new thread\n for a PubServer. When that happens, CtrlServer passes its systems\n to PubServer, which can read their state and publish it over a\n ZMQ PUB socket.\n\n \"\"\"\n\n def __init__(self, testing=None, config_file='bot/config.yaml'):\n \"\"\"Build ZMQ REP socket and instantiate bot systems.\n\n :param testing: True if running on simulated HW, False if on bot.\n :type testing: boolean\n :param config_file: Name of file to read configuration from.\n :type config_file: string\n\n \"\"\"\n signal.signal(signal.SIGINT, self.signal_handler)\n self.config = lib.get_config(config_file)\n self.logger = lib.get_logger()\n if testing is True or testing == 'True':\n self.logger.info('CtrlServer running in test mode')\n lib.set_testing(True)\n elif testing is None:\n self.logger.info('Defaulting to config testing flag: {}'.format\n (self.config['testing']))\n lib.set_testing(self.config['testing'])\n else:\n self.logger.info('CtrlServer running in non-test mode')\n lib.set_testing(False)\n self.context = zmq.Context()\n self.ctrl_sock = self.context.socket(zmq.REP)\n self.server_bind_addr = '{protocol}://{host}:{port}'.format(protocol\n =self.config['server_protocol'], host=self.config[\n 'server_bind_host'], port=self.config['ctrl_server_port'])\n try:\n self.ctrl_sock.bind(self.server_bind_addr)\n except zmq.ZMQError:\n self.logger.error('ZMQ error. Is a server already running?')\n self.logger.warning('May be connected to an old server instance.')\n sys.exit(1)\n self.systems = self.assign_subsystems()\n self.logger.info('Control server initialized')\n self.pub_server = None\n\n def signal_handler(self, signal, frame):\n self.logger.info('Caught SIGINT (Ctrl+C), closing cleanly')\n self.clean_up()\n self.logger.info('Cleaned up bot, exiting...')\n sys.exit(0)\n\n def assign_subsystems(self):\n \"\"\"Instantiates and stores references to bot subsystems.\n\n :returns: Dict of subsystems, maps system name to instantiated object.\n\n \"\"\"\n self.driver = MecDriver()\n systems = {}\n systems['ctrl'] = self\n systems['driver'] = self.driver\n self.logger.debug('Systems: {}'.format(systems))\n return systems\n\n def listen(self):\n \"\"\"Perpetually listen for messages, pass them to generic handler.\"\"\"\n self.logger.info('Control server: {}'.format(self.server_bind_addr))\n while True:\n try:\n msg = self.ctrl_sock.recv_json()\n reply = self.handle_msg(msg)\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n except JSONDecodeError:\n err_msg = 'Not a JSON message!'\n self.logger.warning(err_msg)\n self.ctrl_sock.send_json(msgs.error(err_msg))\n except KeyboardInterrupt:\n self.logger.info('Exiting control server. Bye!')\n self.clean_up()\n sys.exit(0)\n\n def handle_msg(self, msg):\n \"\"\"Generic message handler. Hands-off based on type of message.\n\n :param msg: Message, received via ZMQ from client, to handle.\n :type msg: dict\n :returns: An appropriate message reply dict, from lib.messages.\n\n \"\"\"\n self.logger.debug('Received: {}'.format(msg))\n try:\n msg_type = msg['type']\n except KeyError as e:\n return msgs.error(e)\n if msg_type == 'ping_req':\n reply = msgs.ping_reply()\n elif msg_type == 'list_req':\n reply = self.list_callables()\n elif msg_type == 'call_req':\n try:\n obj_name = msg['obj_name']\n method = msg['method']\n params = msg['params']\n reply = self.call_method(obj_name, method, params)\n except KeyError as e:\n return msgs.error(e)\n elif msg_type == 'exit_req':\n self.logger.info('Received message to die. Bye!')\n reply = msgs.exit_reply()\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n self.clean_up()\n sys.exit(0)\n else:\n err_msg = 'Unrecognized message: {}'.format(msg)\n self.logger.warning(err_msg)\n reply = msgs.error(err_msg)\n return reply\n\n def list_callables(self):\n \"\"\"Build list of callable methods on each exported subsystem object.\n\n Uses introspection to create a list of callable methods for each\n registered subsystem object. Only methods which are flagged using the\n @lib.api_call decorator will be included.\n\n :returns: list_reply message with callable objects and their methods.\n\n \"\"\"\n self.logger.debug('List of callable API objects requested')\n callables = {}\n for name, obj in self.systems.items():\n methods = []\n for member in getmembers(obj):\n if is_api_method(obj, member[0]):\n methods.append(member[0])\n callables[name] = methods\n return msgs.list_reply(callables)\n\n def call_method(self, name, method, params):\n \"\"\"Call a previously registered subsystem method by name. Only\n methods tagged with the @api_call decorator can be called.\n\n :param name: Assigned name of the registered subsystem.\n :type name: string\n :param method: Subsystem method to be called.\n :type method: string\n :param params: Additional parameters for the called method.\n :type params: dict\n :returns: call_reply or error message dict to be sent to caller.\n\n \"\"\"\n self.logger.debug('API call: {}.{}({})'.format(name, method, params))\n if name in self.systems:\n obj = self.systems[name]\n if is_api_method(obj, method):\n try:\n call_return = getattr(obj, method)(**params)\n msg = 'Called {}.{}'.format(name, method)\n self.logger.debug(msg + ',returned:{}'.format(call_return))\n return msgs.call_reply(msg, call_return)\n except TypeError:\n err_msg = 'Invalid params for {}.{}'.format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n except Exception as e:\n err_msg = \"Exception: '{}'\".format(str(e))\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid method: '{}.{}'\".format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid object: '{}'\".format(name)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n\n @lib.api_call\n def echo(self, msg=None):\n \"\"\"Echo a message back to the caller.\n\n :param msg: Message to be echoed back to caller, default is None.\n :returns: Message given by param, defaults to None.\n\n \"\"\"\n return msg\n\n @lib.api_call\n def exception(self):\n \"\"\"Raise a test exception which will be returned to the caller.\"\"\"\n raise Exception('Exception test')\n\n @lib.api_call\n def spawn_pub_server(self):\n \"\"\"Spawn publisher thread.\"\"\"\n if self.pub_server is None:\n self.pub_server = pub_server_mod.PubServer(self.systems)\n self.pub_server.setDaemon(True)\n self.pub_server.start()\n msg = 'Spawned pub server'\n self.logger.info(msg)\n return msg\n else:\n err_msg = 'PubServer is already running'\n self.logger.warning(err_msg)\n return err_msg\n\n @lib.api_call\n def stop_full(self):\n \"\"\"Stop all drive and gun motors, set turret to safe state.\"\"\"\n self.systems['driver'].move(0, 0)\n\n def clean_up(self):\n \"\"\"Tear down ZMQ socket.\"\"\"\n self.stop_full()\n self.ctrl_sock.close()\n self.context.term()\n\n\nif __name__ == '__main__':\n if len(sys.argv) == 2:\n server = CtrlServer(sys.argv[1])\n else:\n server = CtrlServer()\n server.listen()\n",
"step-5": "#!/usr/bin/env python\n\"\"\"Server that accepts and executes control-type commands on the bot.\"\"\"\n\nimport sys\nimport os\nfrom inspect import getmembers, ismethod\nfrom simplejson.decoder import JSONDecodeError\nimport zmq\nimport signal\n\n# This is required to make imports work\nsys.path = [os.getcwd()] + sys.path\n\nimport bot.lib.lib as lib\nimport pub_server as pub_server_mod\nimport bot.lib.messages as msgs\n\nfrom bot.driver.mec_driver import MecDriver\n\n\ndef is_api_method(obj, name):\n \"\"\"Tests whether named method exists in obj and is flagged for API export.\n\n :param obj: API-exported object to search for the given method on.\n :type ojb: string\n :param name: Name of method to check for.\n :type name: string\n :returns: True if given method is on given obj and is exported, else False.\n\n \"\"\"\n try:\n method = getattr(obj, name)\n except AttributeError:\n return False\n return (ismethod(method) and hasattr(method, \"__api_call\"))\n\n\nclass CtrlServer(object):\n\n \"\"\"Exports bot control via ZMQ.\n\n Most functionally exported by CtrlServer is in the form of methods\n exposed by the API. @lib.api_call decorators can be added to bot\n systems, which tags them for export. They can then be called\n remotely via CtrlClient, which is typically owned by an interface\n like the CLI, which typically accepts commands from an agent like\n a human.\n\n Some control is exported directly by CtrlServer, not through the\n API. For example, CtrlServer responds directly to ping messages,\n list messages (which give the objects/methods exposed by the API),\n and exit messages.\n\n CtrlServer is the primary owner of bot resources, which we call\n systems. For example, it's CtrlServer that instantiates gunner\n and follower. Through those two, CtrlServer owns the gun, the\n IR hub, the turret and basically every other bot system.\n\n The messages that CtrlServer accepts and responds with are fully\n specified in lib.messages. Make any changes to messages there.\n\n CtrlServer can be instructed (via the API) to spawn a new thread\n for a PubServer. When that happens, CtrlServer passes its systems\n to PubServer, which can read their state and publish it over a\n ZMQ PUB socket.\n\n \"\"\"\n\n def __init__(self, testing=None, config_file=\"bot/config.yaml\"):\n \"\"\"Build ZMQ REP socket and instantiate bot systems.\n\n :param testing: True if running on simulated HW, False if on bot.\n :type testing: boolean\n :param config_file: Name of file to read configuration from.\n :type config_file: string\n\n \"\"\"\n # Register signal handler, shut down cleanly (think motors)\n signal.signal(signal.SIGINT, self.signal_handler)\n\n # Load configuration and logger\n self.config = lib.get_config(config_file)\n self.logger = lib.get_logger()\n\n # Testing flag will cause objects to run on simulated hardware\n if testing is True or testing == \"True\":\n self.logger.info(\"CtrlServer running in test mode\")\n lib.set_testing(True)\n elif testing is None:\n self.logger.info(\n \"Defaulting to config testing flag: {}\".format(\n self.config[\"testing\"]))\n lib.set_testing(self.config[\"testing\"])\n else:\n self.logger.info(\"CtrlServer running in non-test mode\")\n lib.set_testing(False)\n\n # Build socket to listen for requests\n self.context = zmq.Context()\n self.ctrl_sock = self.context.socket(zmq.REP)\n self.server_bind_addr = \"{protocol}://{host}:{port}\".format(\n protocol=self.config[\"server_protocol\"],\n host=self.config[\"server_bind_host\"],\n port=self.config[\"ctrl_server_port\"])\n try:\n self.ctrl_sock.bind(self.server_bind_addr)\n except zmq.ZMQError:\n self.logger.error(\"ZMQ error. Is a server already running?\")\n self.logger.warning(\"May be connected to an old server instance.\")\n sys.exit(1)\n\n self.systems = self.assign_subsystems()\n self.logger.info(\"Control server initialized\")\n\n # Don't spawn pub_server until told to\n self.pub_server = None\n\n def signal_handler(self, signal, frame):\n self.logger.info(\"Caught SIGINT (Ctrl+C), closing cleanly\")\n self.clean_up()\n self.logger.info(\"Cleaned up bot, exiting...\")\n sys.exit(0)\n\n def assign_subsystems(self):\n \"\"\"Instantiates and stores references to bot subsystems.\n\n :returns: Dict of subsystems, maps system name to instantiated object.\n\n \"\"\"\n\n self.driver = MecDriver()\n\n systems = {}\n systems[\"ctrl\"] = self\n systems[\"driver\"] = self.driver\n\n self.logger.debug(\"Systems: {}\".format(systems))\n return systems\n\n def listen(self):\n \"\"\"Perpetually listen for messages, pass them to generic handler.\"\"\"\n self.logger.info(\"Control server: {}\".format(self.server_bind_addr))\n while True:\n try:\n msg = self.ctrl_sock.recv_json()\n reply = self.handle_msg(msg)\n self.logger.debug(\"Sending: {}\".format(reply))\n self.ctrl_sock.send_json(reply)\n except JSONDecodeError:\n err_msg = \"Not a JSON message!\"\n self.logger.warning(err_msg)\n self.ctrl_sock.send_json(msgs.error(err_msg))\n except KeyboardInterrupt:\n self.logger.info(\"Exiting control server. Bye!\")\n self.clean_up()\n sys.exit(0)\n\n def handle_msg(self, msg):\n \"\"\"Generic message handler. Hands-off based on type of message.\n\n :param msg: Message, received via ZMQ from client, to handle.\n :type msg: dict\n :returns: An appropriate message reply dict, from lib.messages.\n\n \"\"\"\n self.logger.debug(\"Received: {}\".format(msg))\n\n try:\n msg_type = msg[\"type\"]\n except KeyError as e:\n return msgs.error(e)\n\n if msg_type == \"ping_req\":\n reply = msgs.ping_reply()\n elif msg_type == \"list_req\":\n reply = self.list_callables()\n elif msg_type == \"call_req\":\n try:\n obj_name = msg[\"obj_name\"]\n method = msg[\"method\"]\n params = msg[\"params\"]\n reply = self.call_method(obj_name, method, params)\n except KeyError as e:\n return msgs.error(e)\n elif msg_type == \"exit_req\":\n self.logger.info(\"Received message to die. Bye!\")\n reply = msgs.exit_reply()\n # Need to actually send reply here as we're about to exit\n self.logger.debug(\"Sending: {}\".format(reply))\n self.ctrl_sock.send_json(reply)\n self.clean_up()\n sys.exit(0)\n else:\n err_msg = \"Unrecognized message: {}\".format(msg)\n self.logger.warning(err_msg)\n reply = msgs.error(err_msg)\n return reply\n\n def list_callables(self):\n \"\"\"Build list of callable methods on each exported subsystem object.\n\n Uses introspection to create a list of callable methods for each\n registered subsystem object. Only methods which are flagged using the\n @lib.api_call decorator will be included.\n\n :returns: list_reply message with callable objects and their methods.\n\n \"\"\"\n self.logger.debug(\"List of callable API objects requested\")\n # Dict of subsystem object names to their callable methods.\n callables = {}\n for name, obj in self.systems.items():\n methods = []\n # Filter out methods which are not explicitly flagged for export\n for member in getmembers(obj):\n if is_api_method(obj, member[0]):\n methods.append(member[0])\n callables[name] = methods\n return msgs.list_reply(callables)\n\n def call_method(self, name, method, params):\n \"\"\"Call a previously registered subsystem method by name. Only\n methods tagged with the @api_call decorator can be called.\n\n :param name: Assigned name of the registered subsystem.\n :type name: string\n :param method: Subsystem method to be called.\n :type method: string\n :param params: Additional parameters for the called method.\n :type params: dict\n :returns: call_reply or error message dict to be sent to caller.\n\n \"\"\"\n self.logger.debug(\"API call: {}.{}({})\".format(name, method, params))\n if name in self.systems:\n obj = self.systems[name]\n if is_api_method(obj, method):\n try:\n # Calls given obj.method, unpacking and passing params dict\n call_return = getattr(obj, method)(**params)\n msg = \"Called {}.{}\".format(name, method)\n self.logger.debug(msg + \",returned:{}\".format(call_return))\n return msgs.call_reply(msg, call_return)\n except TypeError:\n # Raised when we have a mismatch of the method's kwargs\n # TODO: Return argspec here?\n err_msg = \"Invalid params for {}.{}\".format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n except Exception as e:\n # Catch exception raised by called method, notify client\n err_msg = \"Exception: '{}'\".format(str(e))\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid method: '{}.{}'\".format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid object: '{}'\".format(name)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n\n @lib.api_call\n def echo(self, msg=None):\n \"\"\"Echo a message back to the caller.\n\n :param msg: Message to be echoed back to caller, default is None.\n :returns: Message given by param, defaults to None.\n\n \"\"\"\n return msg\n\n @lib.api_call\n def exception(self):\n \"\"\"Raise a test exception which will be returned to the caller.\"\"\"\n raise Exception(\"Exception test\")\n\n @lib.api_call\n def spawn_pub_server(self):\n \"\"\"Spawn publisher thread.\"\"\"\n if self.pub_server is None:\n self.pub_server = pub_server_mod.PubServer(self.systems)\n # Prevent pub_server thread from blocking the process from closing\n self.pub_server.setDaemon(True)\n self.pub_server.start()\n msg = \"Spawned pub server\"\n self.logger.info(msg)\n return msg\n else:\n err_msg = \"PubServer is already running\"\n self.logger.warning(err_msg)\n return err_msg\n\n @lib.api_call\n def stop_full(self):\n \"\"\"Stop all drive and gun motors, set turret to safe state.\"\"\"\n self.systems[\"driver\"].move(0, 0)\n\n def clean_up(self):\n \"\"\"Tear down ZMQ socket.\"\"\"\n self.stop_full()\n self.ctrl_sock.close()\n self.context.term()\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 2:\n server = CtrlServer(sys.argv[1])\n else:\n server = CtrlServer()\n server.listen()\n",
"step-ids": [
10,
13,
16,
18,
19
]
}
|
[
10,
13,
16,
18,
19
] |
from dataclasses import dataclass, field
from typing import List
@dataclass
class Root:
a: List[object] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 2,
"max_occurs": 4,
"sequence": 1,
}
)
b: List[object] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"max_occurs": 2,
"sequence": 1,
}
)
|
normal
|
{
"blob_id": "7e318ae7317eac90d6ce9a6b1d0dcc8ff65abef0",
"index": 9430,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@dataclass\nclass Root:\n a: List[object] = field(default_factory=list, metadata={'type':\n 'Element', 'namespace': '', 'min_occurs': 2, 'max_occurs': 4,\n 'sequence': 1})\n b: List[object] = field(default_factory=list, metadata={'type':\n 'Element', 'namespace': '', 'max_occurs': 2, 'sequence': 1})\n",
"step-3": "from dataclasses import dataclass, field\nfrom typing import List\n\n\n@dataclass\nclass Root:\n a: List[object] = field(default_factory=list, metadata={'type':\n 'Element', 'namespace': '', 'min_occurs': 2, 'max_occurs': 4,\n 'sequence': 1})\n b: List[object] = field(default_factory=list, metadata={'type':\n 'Element', 'namespace': '', 'max_occurs': 2, 'sequence': 1})\n",
"step-4": "from dataclasses import dataclass, field\nfrom typing import List\n\n\n@dataclass\nclass Root:\n a: List[object] = field(\n default_factory=list,\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n \"min_occurs\": 2,\n \"max_occurs\": 4,\n \"sequence\": 1,\n }\n )\n b: List[object] = field(\n default_factory=list,\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n \"max_occurs\": 2,\n \"sequence\": 1,\n }\n )\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
torch.manual_seed(1)
if use_cuda:
torch.cuda.manual_seed(1)
np.random.seed(1)
<|reserved_special_token_0|>
print('DCCNet training script')
<|reserved_special_token_0|>
parser.add_argument('--checkpoint', type=str, default='')
parser.add_argument('--image_size', type=int, default=400)
parser.add_argument('--dataset_image_path', type=str, default=
'datasets/pf-pascal/', help='path to PF Pascal dataset')
parser.add_argument('--dataset_csv_path', type=str, default=
'datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv')
parser.add_argument('--num_epochs', type=int, default=5, help=
'number of training epochs')
parser.add_argument('--batch_size', type=int, default=16, help=
'training batch size')
parser.add_argument('--lr', type=float, default=0.0005, help='learning rate')
parser.add_argument('--result_model_fn', type=str, default=
'checkpoint_adam', help='trained model filename')
parser.add_argument('--result-model-dir', type=str, default=
'../model/checkpoints', help='path to trained models folder')
parser.add_argument('--fe_finetune_params', type=int, default=0, help=
'number of layers to finetune')
parser.add_argument('--exp_name', type=str, default='exp_delete', help=
'experiment name')
parser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,
5, 5], help='kernels sizes in neigh. cons.')
parser.add_argument('--ncons_channels', nargs='+', type=int, default=[16,
16, 1], help='channels in neigh. cons')
parser.add_argument('--sce_kernel_size', type=int, default=25, help=
'kernel size in sce.')
parser.add_argument('--sce_hidden_dim', type=int, default=1024, help=
'hidden dim in sce')
parser.add_argument('--scaleloss_weight', type=float, default=1.0, help=
'whether use scale loss, if use the weight for scale loss')
parser.add_argument('--att_scale_ncons_kernel_sizes', nargs='+', type=int,
default=[5, 5, 5], help='kernels sizes in dynamic fusion net.')
parser.add_argument('--att_scale_ncons_channels', nargs='+', type=int,
default=[16, 16, 1], help='channels in dynamic fusion net')
<|reserved_special_token_0|>
print(args)
print('Creating CNN model...')
<|reserved_special_token_0|>
if args.fe_finetune_params > 0:
for i in range(args.fe_finetune_params):
for p in model.module.FeatureExtraction.model[-1][-(i + 1)].parameters(
):
p.requires_grad = True
print('Trainable parameters:')
<|reserved_special_token_0|>
for i, param in enumerate(model.named_parameters()):
name, p = param
if p.requires_grad:
count += 1
print(str(count) + ': ' + name + '\t' + str(p.shape) + '\t')
print(model)
print('using Adam optimizer')
<|reserved_special_token_0|>
if not exists(dirname(log_name)):
makedirs(dirname(log_name))
print('Checkpoint name: ' + checkpoint_name)
<|reserved_special_token_0|>
def process_epoch(mode, epoch, model, loss_fn, optimizer, dataloader,
batch_preprocessing_fn, use_cuda=True, log_interval=50):
epoch_loss = 0
for batch_idx, batch in enumerate(dataloader):
st = time.time()
if mode == 'train':
optimizer.zero_grad()
tnf_batch = batch_preprocessing_fn(batch)
loss = loss_fn(model, tnf_batch)
loss_np = loss.data.cpu().numpy()[0]
epoch_loss += loss_np
if mode == 'train':
loss.backward()
optimizer.step()
else:
loss = None
if batch_idx % log_interval == 0:
print(mode.capitalize() +
' Epoch: {} [{}/{} ({:.0f}%)]\t\tLoss: {:.12f}\t\tcost time: {:.1f}'
.format(epoch, batch_idx, len(dataloader), 100.0 *
batch_idx / len(dataloader), loss_np, time.time() - st))
epoch_loss /= len(dataloader)
print(mode.capitalize() + ' set: Average loss: {:.12f}'.format(epoch_loss))
return epoch_loss
<|reserved_special_token_0|>
model.module.FeatureExtraction.eval()
print('Starting training...')
for epoch in range(1, args.num_epochs + 1):
st = time.time()
train_loss_curepoch = process_epoch('train', epoch, model, loss_fn,
optimizer, dataloader, batch_preprocessing_fn, log_interval=1)
time_train = time.time() - st
st = time.time()
val_loss_curepoch = process_epoch('val', epoch, model, loss_fn,
optimizer, dataloader_val, batch_preprocessing_fn, log_interval=1)
time_valloss = time.time() - st
st = time.time()
val_pck_curepoch = pfdataset_pck(dataloader=dataloader_val_pck, model=
model, verbose=False)
time_valpck = time.time() - st
train_loss[epoch - 1] = train_loss_curepoch
val_loss[epoch - 1] = val_loss_curepoch
val_pcks[epoch - 1] = val_pck_curepoch
is_best = val_pcks[epoch - 1] > best_val_pck
best_val_pck = max(val_pcks[epoch - 1], best_val_pck)
save_checkpoint({'epoch': epoch, 'args': args, 'state_dict': model.
state_dict(), 'optimizer': optimizer.state_dict(), 'train_loss':
train_loss, 'val_loss': val_loss, 'val_pck': val_pcks,
'best_val_pck': best_val_pck}, is_best, checkpoint_name,
save_all_epochs=False)
message = (
"""Epoch{} Train_loss{:.6f} cost time{:.1f} Val_loss{:.6f} cost time{:.1f} Val_pck{:.6f} cost time{:.1f}
"""
.format(epoch, train_loss_curepoch, time_train, val_loss_curepoch,
time_valloss, val_pck_curepoch, time_valpck))
print(message)
with open(log_name, 'a') as log_file:
log_file.write('%s\n' % message)
print('Done!')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
use_cuda = torch.cuda.is_available()
torch.manual_seed(1)
if use_cuda:
torch.cuda.manual_seed(1)
np.random.seed(1)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
print('DCCNet training script')
parser = argparse.ArgumentParser(description='Compute PF Pascal matches')
parser.add_argument('--checkpoint', type=str, default='')
parser.add_argument('--image_size', type=int, default=400)
parser.add_argument('--dataset_image_path', type=str, default=
'datasets/pf-pascal/', help='path to PF Pascal dataset')
parser.add_argument('--dataset_csv_path', type=str, default=
'datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv')
parser.add_argument('--num_epochs', type=int, default=5, help=
'number of training epochs')
parser.add_argument('--batch_size', type=int, default=16, help=
'training batch size')
parser.add_argument('--lr', type=float, default=0.0005, help='learning rate')
parser.add_argument('--result_model_fn', type=str, default=
'checkpoint_adam', help='trained model filename')
parser.add_argument('--result-model-dir', type=str, default=
'../model/checkpoints', help='path to trained models folder')
parser.add_argument('--fe_finetune_params', type=int, default=0, help=
'number of layers to finetune')
parser.add_argument('--exp_name', type=str, default='exp_delete', help=
'experiment name')
parser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,
5, 5], help='kernels sizes in neigh. cons.')
parser.add_argument('--ncons_channels', nargs='+', type=int, default=[16,
16, 1], help='channels in neigh. cons')
parser.add_argument('--sce_kernel_size', type=int, default=25, help=
'kernel size in sce.')
parser.add_argument('--sce_hidden_dim', type=int, default=1024, help=
'hidden dim in sce')
parser.add_argument('--scaleloss_weight', type=float, default=1.0, help=
'whether use scale loss, if use the weight for scale loss')
parser.add_argument('--att_scale_ncons_kernel_sizes', nargs='+', type=int,
default=[5, 5, 5], help='kernels sizes in dynamic fusion net.')
parser.add_argument('--att_scale_ncons_channels', nargs='+', type=int,
default=[16, 16, 1], help='channels in dynamic fusion net')
args = parser.parse_args()
print(args)
print('Creating CNN model...')
model = DCCNet(use_cuda=use_cuda, checkpoint=args.checkpoint,
ncons_kernel_sizes=args.ncons_kernel_sizes, ncons_channels=args.
ncons_channels, sce_kernel_size=args.sce_kernel_size, sce_hidden_dim=
args.sce_hidden_dim, att_scale_ncons_kernel_sizes=args.
att_scale_ncons_kernel_sizes, att_scale_ncons_channels=args.
att_scale_ncons_channels)
model = nn.DataParallel(model)
if args.fe_finetune_params > 0:
for i in range(args.fe_finetune_params):
for p in model.module.FeatureExtraction.model[-1][-(i + 1)].parameters(
):
p.requires_grad = True
print('Trainable parameters:')
count = 0
for i, param in enumerate(model.named_parameters()):
name, p = param
if p.requires_grad:
count += 1
print(str(count) + ': ' + name + '\t' + str(p.shape) + '\t')
print(model)
print('using Adam optimizer')
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()
), lr=args.lr)
cnn_image_size = args.image_size, args.image_size
Dataset = ImagePairDataset
train_csv = 'train_pairs.csv'
val_nocoordinates_csv = 'val_pairs_nocoords.csv'
val_csv = 'image_pairs/val_pairs.csv'
normalization_tnf = NormalizeImageDict(['source_image', 'target_image'])
batch_preprocessing_fn = BatchTensorToVars(use_cuda=use_cuda)
dataset = Dataset(transform=normalization_tnf, dataset_image_path=args.
dataset_image_path, dataset_csv_path=args.dataset_csv_path,
dataset_csv_file=train_csv, output_size=cnn_image_size)
dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True,
num_workers=0)
dataset_val = Dataset(transform=normalization_tnf, dataset_image_path=args.
dataset_image_path, dataset_csv_path=args.dataset_csv_path,
dataset_csv_file=val_nocoordinates_csv, output_size=cnn_image_size)
dataloader_val = DataLoader(dataset_val, batch_size=args.batch_size,
shuffle=True, num_workers=4)
dataloader_val_pck = pfpascal_val_dataloader(image_size=args.image_size,
eval_dataset_path=args.dataset_image_path, csv_file=val_csv)
checkpoint_dir = os.path.join(args.result_model_dir, args.exp_name)
checkpoint_name = os.path.join(args.result_model_dir, args.exp_name,
datetime.datetime.now().strftime('%Y-%m-%d_%H:%M') + '_' + args.
result_model_fn + '.pth.tar')
log_name = os.path.join(args.result_model_dir, args.exp_name, 'logmain_' +
args.exp_name + '.txt')
if not exists(dirname(log_name)):
makedirs(dirname(log_name))
print('Checkpoint name: ' + checkpoint_name)
best_val_pck = float('-inf')
loss_fn = lambda model, batch: weak_loss(model, batch, normalization=
'softmax', scaleloss_weight=args.scaleloss_weight)
def process_epoch(mode, epoch, model, loss_fn, optimizer, dataloader,
batch_preprocessing_fn, use_cuda=True, log_interval=50):
epoch_loss = 0
for batch_idx, batch in enumerate(dataloader):
st = time.time()
if mode == 'train':
optimizer.zero_grad()
tnf_batch = batch_preprocessing_fn(batch)
loss = loss_fn(model, tnf_batch)
loss_np = loss.data.cpu().numpy()[0]
epoch_loss += loss_np
if mode == 'train':
loss.backward()
optimizer.step()
else:
loss = None
if batch_idx % log_interval == 0:
print(mode.capitalize() +
' Epoch: {} [{}/{} ({:.0f}%)]\t\tLoss: {:.12f}\t\tcost time: {:.1f}'
.format(epoch, batch_idx, len(dataloader), 100.0 *
batch_idx / len(dataloader), loss_np, time.time() - st))
epoch_loss /= len(dataloader)
print(mode.capitalize() + ' set: Average loss: {:.12f}'.format(epoch_loss))
return epoch_loss
train_loss = np.zeros(args.num_epochs)
val_loss = np.zeros(args.num_epochs)
val_pcks = np.zeros(args.num_epochs)
model.module.FeatureExtraction.eval()
print('Starting training...')
for epoch in range(1, args.num_epochs + 1):
st = time.time()
train_loss_curepoch = process_epoch('train', epoch, model, loss_fn,
optimizer, dataloader, batch_preprocessing_fn, log_interval=1)
time_train = time.time() - st
st = time.time()
val_loss_curepoch = process_epoch('val', epoch, model, loss_fn,
optimizer, dataloader_val, batch_preprocessing_fn, log_interval=1)
time_valloss = time.time() - st
st = time.time()
val_pck_curepoch = pfdataset_pck(dataloader=dataloader_val_pck, model=
model, verbose=False)
time_valpck = time.time() - st
train_loss[epoch - 1] = train_loss_curepoch
val_loss[epoch - 1] = val_loss_curepoch
val_pcks[epoch - 1] = val_pck_curepoch
is_best = val_pcks[epoch - 1] > best_val_pck
best_val_pck = max(val_pcks[epoch - 1], best_val_pck)
save_checkpoint({'epoch': epoch, 'args': args, 'state_dict': model.
state_dict(), 'optimizer': optimizer.state_dict(), 'train_loss':
train_loss, 'val_loss': val_loss, 'val_pck': val_pcks,
'best_val_pck': best_val_pck}, is_best, checkpoint_name,
save_all_epochs=False)
message = (
"""Epoch{} Train_loss{:.6f} cost time{:.1f} Val_loss{:.6f} cost time{:.1f} Val_pck{:.6f} cost time{:.1f}
"""
.format(epoch, train_loss_curepoch, time_train, val_loss_curepoch,
time_valloss, val_pck_curepoch, time_valpck))
print(message)
with open(log_name, 'a') as log_file:
log_file.write('%s\n' % message)
print('Done!')
<|reserved_special_token_1|>
from __future__ import print_function, division
import os
from os.path import exists, join, basename, dirname
from os import makedirs
import numpy as np
import datetime
import time
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from lib.dataloader import DataLoader
from lib.im_pair_dataset import ImagePairDataset
from lib.normalization import NormalizeImageDict
from lib.torch_util import save_checkpoint
from lib.torch_util import BatchTensorToVars
from lib.eval_util_dynamic import pfdataset_pck, pfpascal_val_dataloader
from models.model_dynamic import DCCNet
from models.loss_dynamic import weak_loss
use_cuda = torch.cuda.is_available()
torch.manual_seed(1)
if use_cuda:
torch.cuda.manual_seed(1)
np.random.seed(1)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
print('DCCNet training script')
parser = argparse.ArgumentParser(description='Compute PF Pascal matches')
parser.add_argument('--checkpoint', type=str, default='')
parser.add_argument('--image_size', type=int, default=400)
parser.add_argument('--dataset_image_path', type=str, default=
'datasets/pf-pascal/', help='path to PF Pascal dataset')
parser.add_argument('--dataset_csv_path', type=str, default=
'datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv')
parser.add_argument('--num_epochs', type=int, default=5, help=
'number of training epochs')
parser.add_argument('--batch_size', type=int, default=16, help=
'training batch size')
parser.add_argument('--lr', type=float, default=0.0005, help='learning rate')
parser.add_argument('--result_model_fn', type=str, default=
'checkpoint_adam', help='trained model filename')
parser.add_argument('--result-model-dir', type=str, default=
'../model/checkpoints', help='path to trained models folder')
parser.add_argument('--fe_finetune_params', type=int, default=0, help=
'number of layers to finetune')
parser.add_argument('--exp_name', type=str, default='exp_delete', help=
'experiment name')
parser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,
5, 5], help='kernels sizes in neigh. cons.')
parser.add_argument('--ncons_channels', nargs='+', type=int, default=[16,
16, 1], help='channels in neigh. cons')
parser.add_argument('--sce_kernel_size', type=int, default=25, help=
'kernel size in sce.')
parser.add_argument('--sce_hidden_dim', type=int, default=1024, help=
'hidden dim in sce')
parser.add_argument('--scaleloss_weight', type=float, default=1.0, help=
'whether use scale loss, if use the weight for scale loss')
parser.add_argument('--att_scale_ncons_kernel_sizes', nargs='+', type=int,
default=[5, 5, 5], help='kernels sizes in dynamic fusion net.')
parser.add_argument('--att_scale_ncons_channels', nargs='+', type=int,
default=[16, 16, 1], help='channels in dynamic fusion net')
args = parser.parse_args()
print(args)
print('Creating CNN model...')
model = DCCNet(use_cuda=use_cuda, checkpoint=args.checkpoint,
ncons_kernel_sizes=args.ncons_kernel_sizes, ncons_channels=args.
ncons_channels, sce_kernel_size=args.sce_kernel_size, sce_hidden_dim=
args.sce_hidden_dim, att_scale_ncons_kernel_sizes=args.
att_scale_ncons_kernel_sizes, att_scale_ncons_channels=args.
att_scale_ncons_channels)
model = nn.DataParallel(model)
if args.fe_finetune_params > 0:
for i in range(args.fe_finetune_params):
for p in model.module.FeatureExtraction.model[-1][-(i + 1)].parameters(
):
p.requires_grad = True
print('Trainable parameters:')
count = 0
for i, param in enumerate(model.named_parameters()):
name, p = param
if p.requires_grad:
count += 1
print(str(count) + ': ' + name + '\t' + str(p.shape) + '\t')
print(model)
print('using Adam optimizer')
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()
), lr=args.lr)
cnn_image_size = args.image_size, args.image_size
Dataset = ImagePairDataset
train_csv = 'train_pairs.csv'
val_nocoordinates_csv = 'val_pairs_nocoords.csv'
val_csv = 'image_pairs/val_pairs.csv'
normalization_tnf = NormalizeImageDict(['source_image', 'target_image'])
batch_preprocessing_fn = BatchTensorToVars(use_cuda=use_cuda)
dataset = Dataset(transform=normalization_tnf, dataset_image_path=args.
dataset_image_path, dataset_csv_path=args.dataset_csv_path,
dataset_csv_file=train_csv, output_size=cnn_image_size)
dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True,
num_workers=0)
dataset_val = Dataset(transform=normalization_tnf, dataset_image_path=args.
dataset_image_path, dataset_csv_path=args.dataset_csv_path,
dataset_csv_file=val_nocoordinates_csv, output_size=cnn_image_size)
dataloader_val = DataLoader(dataset_val, batch_size=args.batch_size,
shuffle=True, num_workers=4)
dataloader_val_pck = pfpascal_val_dataloader(image_size=args.image_size,
eval_dataset_path=args.dataset_image_path, csv_file=val_csv)
checkpoint_dir = os.path.join(args.result_model_dir, args.exp_name)
checkpoint_name = os.path.join(args.result_model_dir, args.exp_name,
datetime.datetime.now().strftime('%Y-%m-%d_%H:%M') + '_' + args.
result_model_fn + '.pth.tar')
log_name = os.path.join(args.result_model_dir, args.exp_name, 'logmain_' +
args.exp_name + '.txt')
if not exists(dirname(log_name)):
makedirs(dirname(log_name))
print('Checkpoint name: ' + checkpoint_name)
best_val_pck = float('-inf')
loss_fn = lambda model, batch: weak_loss(model, batch, normalization=
'softmax', scaleloss_weight=args.scaleloss_weight)
def process_epoch(mode, epoch, model, loss_fn, optimizer, dataloader,
batch_preprocessing_fn, use_cuda=True, log_interval=50):
epoch_loss = 0
for batch_idx, batch in enumerate(dataloader):
st = time.time()
if mode == 'train':
optimizer.zero_grad()
tnf_batch = batch_preprocessing_fn(batch)
loss = loss_fn(model, tnf_batch)
loss_np = loss.data.cpu().numpy()[0]
epoch_loss += loss_np
if mode == 'train':
loss.backward()
optimizer.step()
else:
loss = None
if batch_idx % log_interval == 0:
print(mode.capitalize() +
' Epoch: {} [{}/{} ({:.0f}%)]\t\tLoss: {:.12f}\t\tcost time: {:.1f}'
.format(epoch, batch_idx, len(dataloader), 100.0 *
batch_idx / len(dataloader), loss_np, time.time() - st))
epoch_loss /= len(dataloader)
print(mode.capitalize() + ' set: Average loss: {:.12f}'.format(epoch_loss))
return epoch_loss
train_loss = np.zeros(args.num_epochs)
val_loss = np.zeros(args.num_epochs)
val_pcks = np.zeros(args.num_epochs)
model.module.FeatureExtraction.eval()
print('Starting training...')
for epoch in range(1, args.num_epochs + 1):
st = time.time()
train_loss_curepoch = process_epoch('train', epoch, model, loss_fn,
optimizer, dataloader, batch_preprocessing_fn, log_interval=1)
time_train = time.time() - st
st = time.time()
val_loss_curepoch = process_epoch('val', epoch, model, loss_fn,
optimizer, dataloader_val, batch_preprocessing_fn, log_interval=1)
time_valloss = time.time() - st
st = time.time()
val_pck_curepoch = pfdataset_pck(dataloader=dataloader_val_pck, model=
model, verbose=False)
time_valpck = time.time() - st
train_loss[epoch - 1] = train_loss_curepoch
val_loss[epoch - 1] = val_loss_curepoch
val_pcks[epoch - 1] = val_pck_curepoch
is_best = val_pcks[epoch - 1] > best_val_pck
best_val_pck = max(val_pcks[epoch - 1], best_val_pck)
save_checkpoint({'epoch': epoch, 'args': args, 'state_dict': model.
state_dict(), 'optimizer': optimizer.state_dict(), 'train_loss':
train_loss, 'val_loss': val_loss, 'val_pck': val_pcks,
'best_val_pck': best_val_pck}, is_best, checkpoint_name,
save_all_epochs=False)
message = (
"""Epoch{} Train_loss{:.6f} cost time{:.1f} Val_loss{:.6f} cost time{:.1f} Val_pck{:.6f} cost time{:.1f}
"""
.format(epoch, train_loss_curepoch, time_train, val_loss_curepoch,
time_valloss, val_pck_curepoch, time_valpck))
print(message)
with open(log_name, 'a') as log_file:
log_file.write('%s\n' % message)
print('Done!')
<|reserved_special_token_1|>
from __future__ import print_function, division
import os
from os.path import exists, join, basename, dirname
from os import makedirs
import numpy as np
import datetime
import time
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from lib.dataloader import DataLoader
from lib.im_pair_dataset import ImagePairDataset
from lib.normalization import NormalizeImageDict
from lib.torch_util import save_checkpoint
from lib.torch_util import BatchTensorToVars
from lib.eval_util_dynamic import pfdataset_pck, pfpascal_val_dataloader
# import DCCNet
from models.model_dynamic import DCCNet
from models.loss_dynamic import weak_loss
# Seed and CUDA
use_cuda = torch.cuda.is_available()
torch.manual_seed(1)
if use_cuda:
torch.cuda.manual_seed(1)
np.random.seed(1)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
print('DCCNet training script')
# Argument parsing
parser = argparse.ArgumentParser(description='Compute PF Pascal matches')
parser.add_argument('--checkpoint', type=str, default='')
parser.add_argument('--image_size', type=int, default=400)
parser.add_argument('--dataset_image_path', type=str, default='datasets/pf-pascal/', help='path to PF Pascal dataset')
parser.add_argument('--dataset_csv_path', type=str, default='datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv')
parser.add_argument('--num_epochs', type=int, default=5, help='number of training epochs')
parser.add_argument('--batch_size', type=int, default=16, help='training batch size')
parser.add_argument('--lr', type=float, default=0.0005, help='learning rate')
parser.add_argument('--result_model_fn', type=str, default='checkpoint_adam', help='trained model filename')
parser.add_argument('--result-model-dir', type=str, default='../model/checkpoints', help='path to trained models folder')
parser.add_argument('--fe_finetune_params', type=int, default=0, help='number of layers to finetune')
parser.add_argument('--exp_name', type=str, default='exp_delete', help='experiment name')
# DCCNet args
parser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,5,5], help='kernels sizes in neigh. cons.')
parser.add_argument('--ncons_channels', nargs='+', type=int, default=[16,16,1], help='channels in neigh. cons')
parser.add_argument('--sce_kernel_size',type=int,default=25,help='kernel size in sce.')
parser.add_argument('--sce_hidden_dim',type=int,default=1024,help='hidden dim in sce')
parser.add_argument('--scaleloss_weight',type=float,default=1.0,help='whether use scale loss, if use the weight for scale loss')
parser.add_argument('--att_scale_ncons_kernel_sizes', nargs='+', type=int, default=[5,5,5], help='kernels sizes in dynamic fusion net.')
parser.add_argument('--att_scale_ncons_channels', nargs='+', type=int, default=[16,16,1], help='channels in dynamic fusion net')
args = parser.parse_args()
print(args)
# Create model
print('Creating CNN model...')
model = DCCNet(use_cuda=use_cuda,
checkpoint=args.checkpoint,
ncons_kernel_sizes=args.ncons_kernel_sizes,
ncons_channels=args.ncons_channels,
sce_kernel_size=args.sce_kernel_size,
sce_hidden_dim=args.sce_hidden_dim,
att_scale_ncons_kernel_sizes=args.att_scale_ncons_kernel_sizes,
att_scale_ncons_channels=args.att_scale_ncons_channels,
)
#Multi-GPU support
model = nn.DataParallel(model)
# Set which parts of the model to train
if args.fe_finetune_params>0:
for i in range(args.fe_finetune_params):
for p in model.module.FeatureExtraction.model[-1][-(i+1)].parameters():
p.requires_grad=True
print('Trainable parameters:')
count = 0
for i,param in enumerate(model.named_parameters()):
name,p = param
if p.requires_grad:
count+=1
print(str(count)+": "+name+"\t"+str(p.shape)+"\t")
print(model)
# Optimizer
print('using Adam optimizer')
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)
cnn_image_size=(args.image_size,args.image_size)
Dataset = ImagePairDataset
train_csv = 'train_pairs.csv'
#val_pairs_nocoords.csv: for compute loss, with flip column in csv, no coordinates
#val_pairs.csv: for compute pck, with coordinates
val_nocoordinates_csv = 'val_pairs_nocoords.csv'
val_csv = 'image_pairs/val_pairs.csv'
normalization_tnf = NormalizeImageDict(['source_image','target_image'])
batch_preprocessing_fn = BatchTensorToVars(use_cuda=use_cuda)
# Dataset and dataloader
dataset = Dataset(transform=normalization_tnf,
dataset_image_path=args.dataset_image_path,
dataset_csv_path=args.dataset_csv_path,
dataset_csv_file = train_csv,
output_size=cnn_image_size,
)
dataloader = DataLoader(dataset, batch_size=args.batch_size,
shuffle=True,
num_workers=0)
dataset_val = Dataset(transform=normalization_tnf,
dataset_image_path=args.dataset_image_path,
dataset_csv_path=args.dataset_csv_path,
dataset_csv_file=val_nocoordinates_csv,
output_size=cnn_image_size)
# compute val loss
dataloader_val = DataLoader(dataset_val, batch_size=args.batch_size,
shuffle=True, num_workers=4)
# compute val pck
dataloader_val_pck = pfpascal_val_dataloader(image_size=args.image_size, eval_dataset_path=args.dataset_image_path, csv_file=val_csv) #load pfpascal val dataset
# Define checkpoint name
checkpoint_dir = os.path.join(args.result_model_dir,args.exp_name)
checkpoint_name = os.path.join(args.result_model_dir,args.exp_name,
datetime.datetime.now().strftime("%Y-%m-%d_%H:%M")+'_'+args.result_model_fn + '.pth.tar')
log_name = os.path.join(args.result_model_dir,args.exp_name, 'logmain_'+args.exp_name+'.txt')
if not exists(dirname(log_name)):
makedirs(dirname(log_name))
print('Checkpoint name: '+checkpoint_name)
# Train
best_val_pck = float("-inf")
loss_fn = lambda model,batch: weak_loss(model, batch, normalization='softmax', scaleloss_weight=args.scaleloss_weight)
# define epoch function
def process_epoch(mode,epoch,model,loss_fn,optimizer,dataloader,batch_preprocessing_fn,use_cuda=True,log_interval=50):
epoch_loss = 0
for batch_idx, batch in enumerate(dataloader):
st = time.time()
if mode=='train':
optimizer.zero_grad()
tnf_batch = batch_preprocessing_fn(batch)
loss = loss_fn(model,tnf_batch)
loss_np = loss.data.cpu().numpy()[0]
#loss_np = loss.data.cpu().numpy()
epoch_loss += loss_np
if mode=='train':
loss.backward()
optimizer.step()
else:
loss=None
if batch_idx % log_interval == 0:
print(mode.capitalize()+' Epoch: {} [{}/{} ({:.0f}%)]\t\tLoss: {:.12f}\t\tcost time: {:.1f}'.format(
epoch, batch_idx , len(dataloader),
100. * batch_idx / len(dataloader), loss_np,time.time()-st))
epoch_loss /= len(dataloader)
print(mode.capitalize()+' set: Average loss: {:.12f}'.format(epoch_loss))
return epoch_loss
train_loss = np.zeros(args.num_epochs)
val_loss = np.zeros(args.num_epochs)
val_pcks = np.zeros(args.num_epochs)
model.module.FeatureExtraction.eval()
print('Starting training...')
for epoch in range(1, args.num_epochs+1):
st = time.time()
train_loss_curepoch = process_epoch('train',epoch,model,loss_fn,optimizer,dataloader,batch_preprocessing_fn,log_interval=1)
time_train = time.time()-st
st = time.time()
val_loss_curepoch = process_epoch('val', epoch, model, loss_fn, optimizer, dataloader_val, batch_preprocessing_fn, log_interval=1)
time_valloss = time.time()-st
st = time.time()
val_pck_curepoch = pfdataset_pck(dataloader=dataloader_val_pck,model=model,verbose=False)
time_valpck = time.time()-st
train_loss[epoch - 1] = train_loss_curepoch
val_loss[epoch - 1] = val_loss_curepoch
val_pcks[epoch-1] = val_pck_curepoch
# remember best loss
is_best = val_pcks[epoch - 1] > best_val_pck
best_val_pck = max(val_pcks[epoch - 1], best_val_pck)
save_checkpoint({
'epoch': epoch,
'args': args,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
'train_loss': train_loss,
'val_loss': val_loss,
'val_pck': val_pcks,
'best_val_pck':best_val_pck,
}, is_best,checkpoint_name,save_all_epochs=False)
message = 'Epoch{}\tTrain_loss{:.6f}\tcost time{:.1f}\tVal_loss{:.6f}\tcost time{:.1f}\tVal_pck{:.6f}\tcost time{:.1f}\n'.format\
(epoch, train_loss_curepoch, time_train, val_loss_curepoch, time_valloss,val_pck_curepoch,time_valpck,)
print(message)
with open(log_name, "a") as log_file:
log_file.write('%s\n' % message)
print('Done!')
|
flexible
|
{
"blob_id": "0c97569c77fb3598d83eba607960328bb2134dd2",
"index": 333,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntorch.manual_seed(1)\nif use_cuda:\n torch.cuda.manual_seed(1)\nnp.random.seed(1)\n<mask token>\nprint('DCCNet training script')\n<mask token>\nparser.add_argument('--checkpoint', type=str, default='')\nparser.add_argument('--image_size', type=int, default=400)\nparser.add_argument('--dataset_image_path', type=str, default=\n 'datasets/pf-pascal/', help='path to PF Pascal dataset')\nparser.add_argument('--dataset_csv_path', type=str, default=\n 'datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv')\nparser.add_argument('--num_epochs', type=int, default=5, help=\n 'number of training epochs')\nparser.add_argument('--batch_size', type=int, default=16, help=\n 'training batch size')\nparser.add_argument('--lr', type=float, default=0.0005, help='learning rate')\nparser.add_argument('--result_model_fn', type=str, default=\n 'checkpoint_adam', help='trained model filename')\nparser.add_argument('--result-model-dir', type=str, default=\n '../model/checkpoints', help='path to trained models folder')\nparser.add_argument('--fe_finetune_params', type=int, default=0, help=\n 'number of layers to finetune')\nparser.add_argument('--exp_name', type=str, default='exp_delete', help=\n 'experiment name')\nparser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,\n 5, 5], help='kernels sizes in neigh. cons.')\nparser.add_argument('--ncons_channels', nargs='+', type=int, default=[16, \n 16, 1], help='channels in neigh. cons')\nparser.add_argument('--sce_kernel_size', type=int, default=25, help=\n 'kernel size in sce.')\nparser.add_argument('--sce_hidden_dim', type=int, default=1024, help=\n 'hidden dim in sce')\nparser.add_argument('--scaleloss_weight', type=float, default=1.0, help=\n 'whether use scale loss, if use the weight for scale loss')\nparser.add_argument('--att_scale_ncons_kernel_sizes', nargs='+', type=int,\n default=[5, 5, 5], help='kernels sizes in dynamic fusion net.')\nparser.add_argument('--att_scale_ncons_channels', nargs='+', type=int,\n default=[16, 16, 1], help='channels in dynamic fusion net')\n<mask token>\nprint(args)\nprint('Creating CNN model...')\n<mask token>\nif args.fe_finetune_params > 0:\n for i in range(args.fe_finetune_params):\n for p in model.module.FeatureExtraction.model[-1][-(i + 1)].parameters(\n ):\n p.requires_grad = True\nprint('Trainable parameters:')\n<mask token>\nfor i, param in enumerate(model.named_parameters()):\n name, p = param\n if p.requires_grad:\n count += 1\n print(str(count) + ': ' + name + '\\t' + str(p.shape) + '\\t')\nprint(model)\nprint('using Adam optimizer')\n<mask token>\nif not exists(dirname(log_name)):\n makedirs(dirname(log_name))\nprint('Checkpoint name: ' + checkpoint_name)\n<mask token>\n\n\ndef process_epoch(mode, epoch, model, loss_fn, optimizer, dataloader,\n batch_preprocessing_fn, use_cuda=True, log_interval=50):\n epoch_loss = 0\n for batch_idx, batch in enumerate(dataloader):\n st = time.time()\n if mode == 'train':\n optimizer.zero_grad()\n tnf_batch = batch_preprocessing_fn(batch)\n loss = loss_fn(model, tnf_batch)\n loss_np = loss.data.cpu().numpy()[0]\n epoch_loss += loss_np\n if mode == 'train':\n loss.backward()\n optimizer.step()\n else:\n loss = None\n if batch_idx % log_interval == 0:\n print(mode.capitalize() +\n ' Epoch: {} [{}/{} ({:.0f}%)]\\t\\tLoss: {:.12f}\\t\\tcost time: {:.1f}'\n .format(epoch, batch_idx, len(dataloader), 100.0 *\n batch_idx / len(dataloader), loss_np, time.time() - st))\n epoch_loss /= len(dataloader)\n print(mode.capitalize() + ' set: Average loss: {:.12f}'.format(epoch_loss))\n return epoch_loss\n\n\n<mask token>\nmodel.module.FeatureExtraction.eval()\nprint('Starting training...')\nfor epoch in range(1, args.num_epochs + 1):\n st = time.time()\n train_loss_curepoch = process_epoch('train', epoch, model, loss_fn,\n optimizer, dataloader, batch_preprocessing_fn, log_interval=1)\n time_train = time.time() - st\n st = time.time()\n val_loss_curepoch = process_epoch('val', epoch, model, loss_fn,\n optimizer, dataloader_val, batch_preprocessing_fn, log_interval=1)\n time_valloss = time.time() - st\n st = time.time()\n val_pck_curepoch = pfdataset_pck(dataloader=dataloader_val_pck, model=\n model, verbose=False)\n time_valpck = time.time() - st\n train_loss[epoch - 1] = train_loss_curepoch\n val_loss[epoch - 1] = val_loss_curepoch\n val_pcks[epoch - 1] = val_pck_curepoch\n is_best = val_pcks[epoch - 1] > best_val_pck\n best_val_pck = max(val_pcks[epoch - 1], best_val_pck)\n save_checkpoint({'epoch': epoch, 'args': args, 'state_dict': model.\n state_dict(), 'optimizer': optimizer.state_dict(), 'train_loss':\n train_loss, 'val_loss': val_loss, 'val_pck': val_pcks,\n 'best_val_pck': best_val_pck}, is_best, checkpoint_name,\n save_all_epochs=False)\n message = (\n \"\"\"Epoch{}\tTrain_loss{:.6f}\tcost time{:.1f}\tVal_loss{:.6f}\tcost time{:.1f}\tVal_pck{:.6f}\tcost time{:.1f}\n\"\"\"\n .format(epoch, train_loss_curepoch, time_train, val_loss_curepoch,\n time_valloss, val_pck_curepoch, time_valpck))\n print(message)\n with open(log_name, 'a') as log_file:\n log_file.write('%s\\n' % message)\nprint('Done!')\n",
"step-3": "<mask token>\nuse_cuda = torch.cuda.is_available()\ntorch.manual_seed(1)\nif use_cuda:\n torch.cuda.manual_seed(1)\nnp.random.seed(1)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nprint('DCCNet training script')\nparser = argparse.ArgumentParser(description='Compute PF Pascal matches')\nparser.add_argument('--checkpoint', type=str, default='')\nparser.add_argument('--image_size', type=int, default=400)\nparser.add_argument('--dataset_image_path', type=str, default=\n 'datasets/pf-pascal/', help='path to PF Pascal dataset')\nparser.add_argument('--dataset_csv_path', type=str, default=\n 'datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv')\nparser.add_argument('--num_epochs', type=int, default=5, help=\n 'number of training epochs')\nparser.add_argument('--batch_size', type=int, default=16, help=\n 'training batch size')\nparser.add_argument('--lr', type=float, default=0.0005, help='learning rate')\nparser.add_argument('--result_model_fn', type=str, default=\n 'checkpoint_adam', help='trained model filename')\nparser.add_argument('--result-model-dir', type=str, default=\n '../model/checkpoints', help='path to trained models folder')\nparser.add_argument('--fe_finetune_params', type=int, default=0, help=\n 'number of layers to finetune')\nparser.add_argument('--exp_name', type=str, default='exp_delete', help=\n 'experiment name')\nparser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,\n 5, 5], help='kernels sizes in neigh. cons.')\nparser.add_argument('--ncons_channels', nargs='+', type=int, default=[16, \n 16, 1], help='channels in neigh. cons')\nparser.add_argument('--sce_kernel_size', type=int, default=25, help=\n 'kernel size in sce.')\nparser.add_argument('--sce_hidden_dim', type=int, default=1024, help=\n 'hidden dim in sce')\nparser.add_argument('--scaleloss_weight', type=float, default=1.0, help=\n 'whether use scale loss, if use the weight for scale loss')\nparser.add_argument('--att_scale_ncons_kernel_sizes', nargs='+', type=int,\n default=[5, 5, 5], help='kernels sizes in dynamic fusion net.')\nparser.add_argument('--att_scale_ncons_channels', nargs='+', type=int,\n default=[16, 16, 1], help='channels in dynamic fusion net')\nargs = parser.parse_args()\nprint(args)\nprint('Creating CNN model...')\nmodel = DCCNet(use_cuda=use_cuda, checkpoint=args.checkpoint,\n ncons_kernel_sizes=args.ncons_kernel_sizes, ncons_channels=args.\n ncons_channels, sce_kernel_size=args.sce_kernel_size, sce_hidden_dim=\n args.sce_hidden_dim, att_scale_ncons_kernel_sizes=args.\n att_scale_ncons_kernel_sizes, att_scale_ncons_channels=args.\n att_scale_ncons_channels)\nmodel = nn.DataParallel(model)\nif args.fe_finetune_params > 0:\n for i in range(args.fe_finetune_params):\n for p in model.module.FeatureExtraction.model[-1][-(i + 1)].parameters(\n ):\n p.requires_grad = True\nprint('Trainable parameters:')\ncount = 0\nfor i, param in enumerate(model.named_parameters()):\n name, p = param\n if p.requires_grad:\n count += 1\n print(str(count) + ': ' + name + '\\t' + str(p.shape) + '\\t')\nprint(model)\nprint('using Adam optimizer')\noptimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()\n ), lr=args.lr)\ncnn_image_size = args.image_size, args.image_size\nDataset = ImagePairDataset\ntrain_csv = 'train_pairs.csv'\nval_nocoordinates_csv = 'val_pairs_nocoords.csv'\nval_csv = 'image_pairs/val_pairs.csv'\nnormalization_tnf = NormalizeImageDict(['source_image', 'target_image'])\nbatch_preprocessing_fn = BatchTensorToVars(use_cuda=use_cuda)\ndataset = Dataset(transform=normalization_tnf, dataset_image_path=args.\n dataset_image_path, dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file=train_csv, output_size=cnn_image_size)\ndataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=0)\ndataset_val = Dataset(transform=normalization_tnf, dataset_image_path=args.\n dataset_image_path, dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file=val_nocoordinates_csv, output_size=cnn_image_size)\ndataloader_val = DataLoader(dataset_val, batch_size=args.batch_size,\n shuffle=True, num_workers=4)\ndataloader_val_pck = pfpascal_val_dataloader(image_size=args.image_size,\n eval_dataset_path=args.dataset_image_path, csv_file=val_csv)\ncheckpoint_dir = os.path.join(args.result_model_dir, args.exp_name)\ncheckpoint_name = os.path.join(args.result_model_dir, args.exp_name, \n datetime.datetime.now().strftime('%Y-%m-%d_%H:%M') + '_' + args.\n result_model_fn + '.pth.tar')\nlog_name = os.path.join(args.result_model_dir, args.exp_name, 'logmain_' +\n args.exp_name + '.txt')\nif not exists(dirname(log_name)):\n makedirs(dirname(log_name))\nprint('Checkpoint name: ' + checkpoint_name)\nbest_val_pck = float('-inf')\nloss_fn = lambda model, batch: weak_loss(model, batch, normalization=\n 'softmax', scaleloss_weight=args.scaleloss_weight)\n\n\ndef process_epoch(mode, epoch, model, loss_fn, optimizer, dataloader,\n batch_preprocessing_fn, use_cuda=True, log_interval=50):\n epoch_loss = 0\n for batch_idx, batch in enumerate(dataloader):\n st = time.time()\n if mode == 'train':\n optimizer.zero_grad()\n tnf_batch = batch_preprocessing_fn(batch)\n loss = loss_fn(model, tnf_batch)\n loss_np = loss.data.cpu().numpy()[0]\n epoch_loss += loss_np\n if mode == 'train':\n loss.backward()\n optimizer.step()\n else:\n loss = None\n if batch_idx % log_interval == 0:\n print(mode.capitalize() +\n ' Epoch: {} [{}/{} ({:.0f}%)]\\t\\tLoss: {:.12f}\\t\\tcost time: {:.1f}'\n .format(epoch, batch_idx, len(dataloader), 100.0 *\n batch_idx / len(dataloader), loss_np, time.time() - st))\n epoch_loss /= len(dataloader)\n print(mode.capitalize() + ' set: Average loss: {:.12f}'.format(epoch_loss))\n return epoch_loss\n\n\ntrain_loss = np.zeros(args.num_epochs)\nval_loss = np.zeros(args.num_epochs)\nval_pcks = np.zeros(args.num_epochs)\nmodel.module.FeatureExtraction.eval()\nprint('Starting training...')\nfor epoch in range(1, args.num_epochs + 1):\n st = time.time()\n train_loss_curepoch = process_epoch('train', epoch, model, loss_fn,\n optimizer, dataloader, batch_preprocessing_fn, log_interval=1)\n time_train = time.time() - st\n st = time.time()\n val_loss_curepoch = process_epoch('val', epoch, model, loss_fn,\n optimizer, dataloader_val, batch_preprocessing_fn, log_interval=1)\n time_valloss = time.time() - st\n st = time.time()\n val_pck_curepoch = pfdataset_pck(dataloader=dataloader_val_pck, model=\n model, verbose=False)\n time_valpck = time.time() - st\n train_loss[epoch - 1] = train_loss_curepoch\n val_loss[epoch - 1] = val_loss_curepoch\n val_pcks[epoch - 1] = val_pck_curepoch\n is_best = val_pcks[epoch - 1] > best_val_pck\n best_val_pck = max(val_pcks[epoch - 1], best_val_pck)\n save_checkpoint({'epoch': epoch, 'args': args, 'state_dict': model.\n state_dict(), 'optimizer': optimizer.state_dict(), 'train_loss':\n train_loss, 'val_loss': val_loss, 'val_pck': val_pcks,\n 'best_val_pck': best_val_pck}, is_best, checkpoint_name,\n save_all_epochs=False)\n message = (\n \"\"\"Epoch{}\tTrain_loss{:.6f}\tcost time{:.1f}\tVal_loss{:.6f}\tcost time{:.1f}\tVal_pck{:.6f}\tcost time{:.1f}\n\"\"\"\n .format(epoch, train_loss_curepoch, time_train, val_loss_curepoch,\n time_valloss, val_pck_curepoch, time_valpck))\n print(message)\n with open(log_name, 'a') as log_file:\n log_file.write('%s\\n' % message)\nprint('Done!')\n",
"step-4": "from __future__ import print_function, division\nimport os\nfrom os.path import exists, join, basename, dirname\nfrom os import makedirs\nimport numpy as np\nimport datetime\nimport time\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom lib.dataloader import DataLoader\nfrom lib.im_pair_dataset import ImagePairDataset\nfrom lib.normalization import NormalizeImageDict\nfrom lib.torch_util import save_checkpoint\nfrom lib.torch_util import BatchTensorToVars\nfrom lib.eval_util_dynamic import pfdataset_pck, pfpascal_val_dataloader\nfrom models.model_dynamic import DCCNet\nfrom models.loss_dynamic import weak_loss\nuse_cuda = torch.cuda.is_available()\ntorch.manual_seed(1)\nif use_cuda:\n torch.cuda.manual_seed(1)\nnp.random.seed(1)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nprint('DCCNet training script')\nparser = argparse.ArgumentParser(description='Compute PF Pascal matches')\nparser.add_argument('--checkpoint', type=str, default='')\nparser.add_argument('--image_size', type=int, default=400)\nparser.add_argument('--dataset_image_path', type=str, default=\n 'datasets/pf-pascal/', help='path to PF Pascal dataset')\nparser.add_argument('--dataset_csv_path', type=str, default=\n 'datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv')\nparser.add_argument('--num_epochs', type=int, default=5, help=\n 'number of training epochs')\nparser.add_argument('--batch_size', type=int, default=16, help=\n 'training batch size')\nparser.add_argument('--lr', type=float, default=0.0005, help='learning rate')\nparser.add_argument('--result_model_fn', type=str, default=\n 'checkpoint_adam', help='trained model filename')\nparser.add_argument('--result-model-dir', type=str, default=\n '../model/checkpoints', help='path to trained models folder')\nparser.add_argument('--fe_finetune_params', type=int, default=0, help=\n 'number of layers to finetune')\nparser.add_argument('--exp_name', type=str, default='exp_delete', help=\n 'experiment name')\nparser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,\n 5, 5], help='kernels sizes in neigh. cons.')\nparser.add_argument('--ncons_channels', nargs='+', type=int, default=[16, \n 16, 1], help='channels in neigh. cons')\nparser.add_argument('--sce_kernel_size', type=int, default=25, help=\n 'kernel size in sce.')\nparser.add_argument('--sce_hidden_dim', type=int, default=1024, help=\n 'hidden dim in sce')\nparser.add_argument('--scaleloss_weight', type=float, default=1.0, help=\n 'whether use scale loss, if use the weight for scale loss')\nparser.add_argument('--att_scale_ncons_kernel_sizes', nargs='+', type=int,\n default=[5, 5, 5], help='kernels sizes in dynamic fusion net.')\nparser.add_argument('--att_scale_ncons_channels', nargs='+', type=int,\n default=[16, 16, 1], help='channels in dynamic fusion net')\nargs = parser.parse_args()\nprint(args)\nprint('Creating CNN model...')\nmodel = DCCNet(use_cuda=use_cuda, checkpoint=args.checkpoint,\n ncons_kernel_sizes=args.ncons_kernel_sizes, ncons_channels=args.\n ncons_channels, sce_kernel_size=args.sce_kernel_size, sce_hidden_dim=\n args.sce_hidden_dim, att_scale_ncons_kernel_sizes=args.\n att_scale_ncons_kernel_sizes, att_scale_ncons_channels=args.\n att_scale_ncons_channels)\nmodel = nn.DataParallel(model)\nif args.fe_finetune_params > 0:\n for i in range(args.fe_finetune_params):\n for p in model.module.FeatureExtraction.model[-1][-(i + 1)].parameters(\n ):\n p.requires_grad = True\nprint('Trainable parameters:')\ncount = 0\nfor i, param in enumerate(model.named_parameters()):\n name, p = param\n if p.requires_grad:\n count += 1\n print(str(count) + ': ' + name + '\\t' + str(p.shape) + '\\t')\nprint(model)\nprint('using Adam optimizer')\noptimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()\n ), lr=args.lr)\ncnn_image_size = args.image_size, args.image_size\nDataset = ImagePairDataset\ntrain_csv = 'train_pairs.csv'\nval_nocoordinates_csv = 'val_pairs_nocoords.csv'\nval_csv = 'image_pairs/val_pairs.csv'\nnormalization_tnf = NormalizeImageDict(['source_image', 'target_image'])\nbatch_preprocessing_fn = BatchTensorToVars(use_cuda=use_cuda)\ndataset = Dataset(transform=normalization_tnf, dataset_image_path=args.\n dataset_image_path, dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file=train_csv, output_size=cnn_image_size)\ndataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=0)\ndataset_val = Dataset(transform=normalization_tnf, dataset_image_path=args.\n dataset_image_path, dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file=val_nocoordinates_csv, output_size=cnn_image_size)\ndataloader_val = DataLoader(dataset_val, batch_size=args.batch_size,\n shuffle=True, num_workers=4)\ndataloader_val_pck = pfpascal_val_dataloader(image_size=args.image_size,\n eval_dataset_path=args.dataset_image_path, csv_file=val_csv)\ncheckpoint_dir = os.path.join(args.result_model_dir, args.exp_name)\ncheckpoint_name = os.path.join(args.result_model_dir, args.exp_name, \n datetime.datetime.now().strftime('%Y-%m-%d_%H:%M') + '_' + args.\n result_model_fn + '.pth.tar')\nlog_name = os.path.join(args.result_model_dir, args.exp_name, 'logmain_' +\n args.exp_name + '.txt')\nif not exists(dirname(log_name)):\n makedirs(dirname(log_name))\nprint('Checkpoint name: ' + checkpoint_name)\nbest_val_pck = float('-inf')\nloss_fn = lambda model, batch: weak_loss(model, batch, normalization=\n 'softmax', scaleloss_weight=args.scaleloss_weight)\n\n\ndef process_epoch(mode, epoch, model, loss_fn, optimizer, dataloader,\n batch_preprocessing_fn, use_cuda=True, log_interval=50):\n epoch_loss = 0\n for batch_idx, batch in enumerate(dataloader):\n st = time.time()\n if mode == 'train':\n optimizer.zero_grad()\n tnf_batch = batch_preprocessing_fn(batch)\n loss = loss_fn(model, tnf_batch)\n loss_np = loss.data.cpu().numpy()[0]\n epoch_loss += loss_np\n if mode == 'train':\n loss.backward()\n optimizer.step()\n else:\n loss = None\n if batch_idx % log_interval == 0:\n print(mode.capitalize() +\n ' Epoch: {} [{}/{} ({:.0f}%)]\\t\\tLoss: {:.12f}\\t\\tcost time: {:.1f}'\n .format(epoch, batch_idx, len(dataloader), 100.0 *\n batch_idx / len(dataloader), loss_np, time.time() - st))\n epoch_loss /= len(dataloader)\n print(mode.capitalize() + ' set: Average loss: {:.12f}'.format(epoch_loss))\n return epoch_loss\n\n\ntrain_loss = np.zeros(args.num_epochs)\nval_loss = np.zeros(args.num_epochs)\nval_pcks = np.zeros(args.num_epochs)\nmodel.module.FeatureExtraction.eval()\nprint('Starting training...')\nfor epoch in range(1, args.num_epochs + 1):\n st = time.time()\n train_loss_curepoch = process_epoch('train', epoch, model, loss_fn,\n optimizer, dataloader, batch_preprocessing_fn, log_interval=1)\n time_train = time.time() - st\n st = time.time()\n val_loss_curepoch = process_epoch('val', epoch, model, loss_fn,\n optimizer, dataloader_val, batch_preprocessing_fn, log_interval=1)\n time_valloss = time.time() - st\n st = time.time()\n val_pck_curepoch = pfdataset_pck(dataloader=dataloader_val_pck, model=\n model, verbose=False)\n time_valpck = time.time() - st\n train_loss[epoch - 1] = train_loss_curepoch\n val_loss[epoch - 1] = val_loss_curepoch\n val_pcks[epoch - 1] = val_pck_curepoch\n is_best = val_pcks[epoch - 1] > best_val_pck\n best_val_pck = max(val_pcks[epoch - 1], best_val_pck)\n save_checkpoint({'epoch': epoch, 'args': args, 'state_dict': model.\n state_dict(), 'optimizer': optimizer.state_dict(), 'train_loss':\n train_loss, 'val_loss': val_loss, 'val_pck': val_pcks,\n 'best_val_pck': best_val_pck}, is_best, checkpoint_name,\n save_all_epochs=False)\n message = (\n \"\"\"Epoch{}\tTrain_loss{:.6f}\tcost time{:.1f}\tVal_loss{:.6f}\tcost time{:.1f}\tVal_pck{:.6f}\tcost time{:.1f}\n\"\"\"\n .format(epoch, train_loss_curepoch, time_train, val_loss_curepoch,\n time_valloss, val_pck_curepoch, time_valpck))\n print(message)\n with open(log_name, 'a') as log_file:\n log_file.write('%s\\n' % message)\nprint('Done!')\n",
"step-5": "from __future__ import print_function, division\nimport os\nfrom os.path import exists, join, basename, dirname\nfrom os import makedirs\nimport numpy as np\nimport datetime\nimport time\nimport argparse\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom lib.dataloader import DataLoader\nfrom lib.im_pair_dataset import ImagePairDataset\nfrom lib.normalization import NormalizeImageDict\nfrom lib.torch_util import save_checkpoint\nfrom lib.torch_util import BatchTensorToVars\nfrom lib.eval_util_dynamic import pfdataset_pck, pfpascal_val_dataloader\n\n# import DCCNet\nfrom models.model_dynamic import DCCNet\nfrom models.loss_dynamic import weak_loss\n\n\n# Seed and CUDA\nuse_cuda = torch.cuda.is_available()\ntorch.manual_seed(1)\nif use_cuda:\n torch.cuda.manual_seed(1)\nnp.random.seed(1)\n\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\nprint('DCCNet training script')\n\n# Argument parsing\nparser = argparse.ArgumentParser(description='Compute PF Pascal matches')\nparser.add_argument('--checkpoint', type=str, default='')\nparser.add_argument('--image_size', type=int, default=400)\nparser.add_argument('--dataset_image_path', type=str, default='datasets/pf-pascal/', help='path to PF Pascal dataset')\nparser.add_argument('--dataset_csv_path', type=str, default='datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv')\nparser.add_argument('--num_epochs', type=int, default=5, help='number of training epochs')\nparser.add_argument('--batch_size', type=int, default=16, help='training batch size')\nparser.add_argument('--lr', type=float, default=0.0005, help='learning rate')\nparser.add_argument('--result_model_fn', type=str, default='checkpoint_adam', help='trained model filename')\nparser.add_argument('--result-model-dir', type=str, default='../model/checkpoints', help='path to trained models folder')\nparser.add_argument('--fe_finetune_params', type=int, default=0, help='number of layers to finetune')\nparser.add_argument('--exp_name', type=str, default='exp_delete', help='experiment name')\n\n# DCCNet args\nparser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,5,5], help='kernels sizes in neigh. cons.')\nparser.add_argument('--ncons_channels', nargs='+', type=int, default=[16,16,1], help='channels in neigh. cons')\n\nparser.add_argument('--sce_kernel_size',type=int,default=25,help='kernel size in sce.')\nparser.add_argument('--sce_hidden_dim',type=int,default=1024,help='hidden dim in sce')\nparser.add_argument('--scaleloss_weight',type=float,default=1.0,help='whether use scale loss, if use the weight for scale loss')\nparser.add_argument('--att_scale_ncons_kernel_sizes', nargs='+', type=int, default=[5,5,5], help='kernels sizes in dynamic fusion net.')\nparser.add_argument('--att_scale_ncons_channels', nargs='+', type=int, default=[16,16,1], help='channels in dynamic fusion net')\n\nargs = parser.parse_args()\nprint(args)\n\n# Create model\nprint('Creating CNN model...')\nmodel = DCCNet(use_cuda=use_cuda,\n checkpoint=args.checkpoint,\n ncons_kernel_sizes=args.ncons_kernel_sizes,\n ncons_channels=args.ncons_channels,\n sce_kernel_size=args.sce_kernel_size,\n sce_hidden_dim=args.sce_hidden_dim,\n att_scale_ncons_kernel_sizes=args.att_scale_ncons_kernel_sizes,\n att_scale_ncons_channels=args.att_scale_ncons_channels,\n )\n\n#Multi-GPU support\nmodel = nn.DataParallel(model)\n\n# Set which parts of the model to train\nif args.fe_finetune_params>0:\n for i in range(args.fe_finetune_params):\n for p in model.module.FeatureExtraction.model[-1][-(i+1)].parameters():\n p.requires_grad=True\n\nprint('Trainable parameters:')\ncount = 0\nfor i,param in enumerate(model.named_parameters()):\n name,p = param\n if p.requires_grad:\n count+=1\n print(str(count)+\": \"+name+\"\\t\"+str(p.shape)+\"\\t\")\n\nprint(model)\n\n\n# Optimizer\nprint('using Adam optimizer')\noptimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)\n \ncnn_image_size=(args.image_size,args.image_size)\n\nDataset = ImagePairDataset\ntrain_csv = 'train_pairs.csv'\n#val_pairs_nocoords.csv: for compute loss, with flip column in csv, no coordinates\n#val_pairs.csv: for compute pck, with coordinates\nval_nocoordinates_csv = 'val_pairs_nocoords.csv'\nval_csv = 'image_pairs/val_pairs.csv'\n\n\nnormalization_tnf = NormalizeImageDict(['source_image','target_image'])\nbatch_preprocessing_fn = BatchTensorToVars(use_cuda=use_cuda) \n\n# Dataset and dataloader\ndataset = Dataset(transform=normalization_tnf,\n\t dataset_image_path=args.dataset_image_path,\n\t dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file = train_csv,\n output_size=cnn_image_size,\n )\n\ndataloader = DataLoader(dataset, batch_size=args.batch_size,\n shuffle=True, \n num_workers=0)\n\ndataset_val = Dataset(transform=normalization_tnf,\n dataset_image_path=args.dataset_image_path,\n dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file=val_nocoordinates_csv,\n output_size=cnn_image_size)\n\n# compute val loss\ndataloader_val = DataLoader(dataset_val, batch_size=args.batch_size,\n shuffle=True, num_workers=4)\n\n# compute val pck\ndataloader_val_pck = pfpascal_val_dataloader(image_size=args.image_size, eval_dataset_path=args.dataset_image_path, csv_file=val_csv) #load pfpascal val dataset\n\n# Define checkpoint name\ncheckpoint_dir = os.path.join(args.result_model_dir,args.exp_name)\ncheckpoint_name = os.path.join(args.result_model_dir,args.exp_name,\n datetime.datetime.now().strftime(\"%Y-%m-%d_%H:%M\")+'_'+args.result_model_fn + '.pth.tar')\nlog_name = os.path.join(args.result_model_dir,args.exp_name, 'logmain_'+args.exp_name+'.txt')\nif not exists(dirname(log_name)):\n makedirs(dirname(log_name))\nprint('Checkpoint name: '+checkpoint_name)\n \n# Train\nbest_val_pck = float(\"-inf\")\n\nloss_fn = lambda model,batch: weak_loss(model, batch, normalization='softmax', scaleloss_weight=args.scaleloss_weight)\n\n# define epoch function\ndef process_epoch(mode,epoch,model,loss_fn,optimizer,dataloader,batch_preprocessing_fn,use_cuda=True,log_interval=50):\n epoch_loss = 0\n for batch_idx, batch in enumerate(dataloader):\n\n st = time.time()\n\n if mode=='train': \n optimizer.zero_grad()\n tnf_batch = batch_preprocessing_fn(batch)\n loss = loss_fn(model,tnf_batch)\n loss_np = loss.data.cpu().numpy()[0]\n #loss_np = loss.data.cpu().numpy()\n epoch_loss += loss_np\n if mode=='train':\n loss.backward()\n optimizer.step()\n else:\n loss=None\n if batch_idx % log_interval == 0:\n print(mode.capitalize()+' Epoch: {} [{}/{} ({:.0f}%)]\\t\\tLoss: {:.12f}\\t\\tcost time: {:.1f}'.format(\n epoch, batch_idx , len(dataloader),\n 100. * batch_idx / len(dataloader), loss_np,time.time()-st))\n epoch_loss /= len(dataloader)\n print(mode.capitalize()+' set: Average loss: {:.12f}'.format(epoch_loss))\n return epoch_loss\n\ntrain_loss = np.zeros(args.num_epochs)\nval_loss = np.zeros(args.num_epochs)\nval_pcks = np.zeros(args.num_epochs)\n\nmodel.module.FeatureExtraction.eval()\n\n\nprint('Starting training...')\nfor epoch in range(1, args.num_epochs+1):\n st = time.time()\n train_loss_curepoch = process_epoch('train',epoch,model,loss_fn,optimizer,dataloader,batch_preprocessing_fn,log_interval=1)\n time_train = time.time()-st\n\n st = time.time()\n\n val_loss_curepoch = process_epoch('val', epoch, model, loss_fn, optimizer, dataloader_val, batch_preprocessing_fn, log_interval=1)\n\n time_valloss = time.time()-st\n\n st = time.time()\n val_pck_curepoch = pfdataset_pck(dataloader=dataloader_val_pck,model=model,verbose=False)\n time_valpck = time.time()-st\n\n train_loss[epoch - 1] = train_loss_curepoch\n val_loss[epoch - 1] = val_loss_curepoch\n val_pcks[epoch-1] = val_pck_curepoch\n\n # remember best loss\n is_best = val_pcks[epoch - 1] > best_val_pck\n best_val_pck = max(val_pcks[epoch - 1], best_val_pck)\n save_checkpoint({\n 'epoch': epoch,\n 'args': args,\n 'state_dict': model.state_dict(),\n 'optimizer' : optimizer.state_dict(),\n 'train_loss': train_loss,\n 'val_loss': val_loss,\n 'val_pck': val_pcks,\n 'best_val_pck':best_val_pck,\n }, is_best,checkpoint_name,save_all_epochs=False)\n\n message = 'Epoch{}\\tTrain_loss{:.6f}\\tcost time{:.1f}\\tVal_loss{:.6f}\\tcost time{:.1f}\\tVal_pck{:.6f}\\tcost time{:.1f}\\n'.format\\\n (epoch, train_loss_curepoch, time_train, val_loss_curepoch, time_valloss,val_pck_curepoch,time_valpck,)\n print(message)\n with open(log_name, \"a\") as log_file:\n log_file.write('%s\\n' % message)\n\n\nprint('Done!')\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class ScannerTests(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def testPricingSingleItems(self):
scanner = Scanner(self.singleItemListOneDiscount)
groceryList = ['Apple', 'Orange', 'Tomato', 'Apple', 'Orange',
'Cucumber']
scanner.scanItems(groceryList)
self.assertTrue(self.singleItemListOneDiscount['Apple'].finalCost() ==
Decimal('1.00'))
self.assertTrue(self.singleItemListOneDiscount['Orange'].finalCost(
) == Decimal('0.40'))
self.assertTrue(self.singleItemListOneDiscount['Tomato'].finalCost(
) == Decimal('1.25'))
self.assertTrue(self.singleItemListOneDiscount['Cucumber'].
finalCost() == Decimal('0.10'))
self.assertEqual(Decimal('2.75'), scanner.totalPrice())
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class CheckoutTests(unittest.TestCase):
def setUp(self):
pricingRulesWithSingleDiscount = {'Apple': {(1): '0.50', (3):
'1.30'}, 'Orange': {(1): '0.20'}, 'Tomato': {(1): '1.25'},
'Cucumber': {(1): '0.10'}}
self.itemList = createInventoryList(pricingRulesWithSingleDiscount)
def testCheckout(self):
scanner = Scanner(self.itemList)
groceryList = ['Apple', 'Orange', 'Tomato', 'Apple', 'Orange',
'Cucumber']
scanner.scanItems(groceryList)
self.failIf(self.itemList['Apple'].numberOfItems != 2)
self.failIf(self.itemList['Orange'].numberOfItems != 2)
self.failIf(self.itemList['Tomato'].numberOfItems != 1)
self.failIf(self.itemList['Cucumber'].numberOfItems != 1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ScannerTests(unittest.TestCase):
def setUp(self):
pricingRulesWithSingleDiscount = {'Apple': {(1): '0.50', (3):
'1.30'}, 'Orange': {(1): '0.20'}, 'Tomato': {(1): '1.25'},
'Cucumber': {(1): '0.10'}}
pricingRulesWithMultipleDiscounts = {'Apple': {(1): '0.50', (3):
'1.30', (5): '2.00'}, 'Orange': {(1): '0.20', (4): '.60'},
'Tomato': {(1): '1.25', (2): '.62', (5): '2.50', (10): '2.00'},
'Cucumber': {(1): '0.10'}}
self.singleItemListOneDiscount = createInventoryList(
pricingRulesWithSingleDiscount)
self.multipleDiscountsItemList = createInventoryList(
pricingRulesWithMultipleDiscounts)
<|reserved_special_token_0|>
def testPricingZeroItems(self):
scanner = Scanner(self.singleItemListOneDiscount)
emptyGroceryList = []
scanner.scanItems(emptyGroceryList)
self.assertEqual(Decimal('0'), scanner.totalPrice())
def testPricingSingleItems(self):
scanner = Scanner(self.singleItemListOneDiscount)
groceryList = ['Apple', 'Orange', 'Tomato', 'Apple', 'Orange',
'Cucumber']
scanner.scanItems(groceryList)
self.assertTrue(self.singleItemListOneDiscount['Apple'].finalCost() ==
Decimal('1.00'))
self.assertTrue(self.singleItemListOneDiscount['Orange'].finalCost(
) == Decimal('0.40'))
self.assertTrue(self.singleItemListOneDiscount['Tomato'].finalCost(
) == Decimal('1.25'))
self.assertTrue(self.singleItemListOneDiscount['Cucumber'].
finalCost() == Decimal('0.10'))
self.assertEqual(Decimal('2.75'), scanner.totalPrice())
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class CheckoutTests(unittest.TestCase):
def setUp(self):
pricingRulesWithSingleDiscount = {'Apple': {(1): '0.50', (3):
'1.30'}, 'Orange': {(1): '0.20'}, 'Tomato': {(1): '1.25'},
'Cucumber': {(1): '0.10'}}
self.itemList = createInventoryList(pricingRulesWithSingleDiscount)
def testCheckout(self):
scanner = Scanner(self.itemList)
groceryList = ['Apple', 'Orange', 'Tomato', 'Apple', 'Orange',
'Cucumber']
scanner.scanItems(groceryList)
self.failIf(self.itemList['Apple'].numberOfItems != 2)
self.failIf(self.itemList['Orange'].numberOfItems != 2)
self.failIf(self.itemList['Tomato'].numberOfItems != 1)
self.failIf(self.itemList['Cucumber'].numberOfItems != 1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ScannerTests(unittest.TestCase):
def setUp(self):
pricingRulesWithSingleDiscount = {'Apple': {(1): '0.50', (3):
'1.30'}, 'Orange': {(1): '0.20'}, 'Tomato': {(1): '1.25'},
'Cucumber': {(1): '0.10'}}
pricingRulesWithMultipleDiscounts = {'Apple': {(1): '0.50', (3):
'1.30', (5): '2.00'}, 'Orange': {(1): '0.20', (4): '.60'},
'Tomato': {(1): '1.25', (2): '.62', (5): '2.50', (10): '2.00'},
'Cucumber': {(1): '0.10'}}
self.singleItemListOneDiscount = createInventoryList(
pricingRulesWithSingleDiscount)
self.multipleDiscountsItemList = createInventoryList(
pricingRulesWithMultipleDiscounts)
<|reserved_special_token_0|>
def testPricingZeroItems(self):
scanner = Scanner(self.singleItemListOneDiscount)
emptyGroceryList = []
scanner.scanItems(emptyGroceryList)
self.assertEqual(Decimal('0'), scanner.totalPrice())
def testPricingSingleItems(self):
scanner = Scanner(self.singleItemListOneDiscount)
groceryList = ['Apple', 'Orange', 'Tomato', 'Apple', 'Orange',
'Cucumber']
scanner.scanItems(groceryList)
self.assertTrue(self.singleItemListOneDiscount['Apple'].finalCost() ==
Decimal('1.00'))
self.assertTrue(self.singleItemListOneDiscount['Orange'].finalCost(
) == Decimal('0.40'))
self.assertTrue(self.singleItemListOneDiscount['Tomato'].finalCost(
) == Decimal('1.25'))
self.assertTrue(self.singleItemListOneDiscount['Cucumber'].
finalCost() == Decimal('0.10'))
self.assertEqual(Decimal('2.75'), scanner.totalPrice())
def testPricingSingleItemsWithExactDiscount(self):
scanner = Scanner(self.singleItemListOneDiscount)
groceryList = ['Apple', 'Orange', 'Orange', 'Apple', 'Orange', 'Apple']
scanner.scanItems(groceryList)
self.assertTrue(self.singleItemListOneDiscount['Apple'].finalCost() ==
Decimal('1.30'))
self.assertTrue(self.singleItemListOneDiscount['Orange'].finalCost(
) == Decimal('0.60'))
self.assertEqual(Decimal('1.90'), scanner.totalPrice())
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class CheckoutTests(unittest.TestCase):
def setUp(self):
pricingRulesWithSingleDiscount = {'Apple': {(1): '0.50', (3):
'1.30'}, 'Orange': {(1): '0.20'}, 'Tomato': {(1): '1.25'},
'Cucumber': {(1): '0.10'}}
self.itemList = createInventoryList(pricingRulesWithSingleDiscount)
def testCheckout(self):
scanner = Scanner(self.itemList)
groceryList = ['Apple', 'Orange', 'Tomato', 'Apple', 'Orange',
'Cucumber']
scanner.scanItems(groceryList)
self.failIf(self.itemList['Apple'].numberOfItems != 2)
self.failIf(self.itemList['Orange'].numberOfItems != 2)
self.failIf(self.itemList['Tomato'].numberOfItems != 1)
self.failIf(self.itemList['Cucumber'].numberOfItems != 1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ScannerTests(unittest.TestCase):
def setUp(self):
pricingRulesWithSingleDiscount = {'Apple': {(1): '0.50', (3):
'1.30'}, 'Orange': {(1): '0.20'}, 'Tomato': {(1): '1.25'},
'Cucumber': {(1): '0.10'}}
pricingRulesWithMultipleDiscounts = {'Apple': {(1): '0.50', (3):
'1.30', (5): '2.00'}, 'Orange': {(1): '0.20', (4): '.60'},
'Tomato': {(1): '1.25', (2): '.62', (5): '2.50', (10): '2.00'},
'Cucumber': {(1): '0.10'}}
self.singleItemListOneDiscount = createInventoryList(
pricingRulesWithSingleDiscount)
self.multipleDiscountsItemList = createInventoryList(
pricingRulesWithMultipleDiscounts)
def testScaningItems(self):
scanner = Scanner(self.singleItemListOneDiscount)
groceryList = ['Apple', 'Orange', 'Tomato', 'Apple', 'Orange',
'Cucumber']
scanner.scanItems(groceryList)
self.failIf(self.singleItemListOneDiscount['Apple'].numberOfItems != 2)
self.failIf(self.singleItemListOneDiscount['Orange'].numberOfItems != 2
)
self.failIf(self.singleItemListOneDiscount['Tomato'].numberOfItems != 1
)
self.failIf(self.singleItemListOneDiscount['Cucumber'].
numberOfItems != 1)
def testPricingZeroItems(self):
scanner = Scanner(self.singleItemListOneDiscount)
emptyGroceryList = []
scanner.scanItems(emptyGroceryList)
self.assertEqual(Decimal('0'), scanner.totalPrice())
def testPricingSingleItems(self):
scanner = Scanner(self.singleItemListOneDiscount)
groceryList = ['Apple', 'Orange', 'Tomato', 'Apple', 'Orange',
'Cucumber']
scanner.scanItems(groceryList)
self.assertTrue(self.singleItemListOneDiscount['Apple'].finalCost() ==
Decimal('1.00'))
self.assertTrue(self.singleItemListOneDiscount['Orange'].finalCost(
) == Decimal('0.40'))
self.assertTrue(self.singleItemListOneDiscount['Tomato'].finalCost(
) == Decimal('1.25'))
self.assertTrue(self.singleItemListOneDiscount['Cucumber'].
finalCost() == Decimal('0.10'))
self.assertEqual(Decimal('2.75'), scanner.totalPrice())
def testPricingSingleItemsWithExactDiscount(self):
scanner = Scanner(self.singleItemListOneDiscount)
groceryList = ['Apple', 'Orange', 'Orange', 'Apple', 'Orange', 'Apple']
scanner.scanItems(groceryList)
self.assertTrue(self.singleItemListOneDiscount['Apple'].finalCost() ==
Decimal('1.30'))
self.assertTrue(self.singleItemListOneDiscount['Orange'].finalCost(
) == Decimal('0.60'))
self.assertEqual(Decimal('1.90'), scanner.totalPrice())
def testPricingMultipleItemsBeyondDiscounts(self):
scanner = Scanner(self.singleItemListOneDiscount)
groceryList = ['Apple', 'Tomato', 'Cucumber', 'Apple', 'Cucumber',
'Apple', 'Apple']
scanner.scanItems(groceryList)
self.assertTrue(self.singleItemListOneDiscount['Apple'].finalCost() ==
Decimal('1.80'))
self.assertTrue(self.singleItemListOneDiscount['Tomato'].finalCost(
) == Decimal('1.25'))
self.assertTrue(self.singleItemListOneDiscount['Cucumber'].
finalCost() == Decimal('0.20'))
self.assertEqual(Decimal('3.25'), scanner.totalPrice())
def testPricingMultipleItemsWithMultipleDiscounts(self):
scanner = Scanner(self.multipleDiscountsItemList)
groceryList = ['Orange', 'Apple', 'Tomato', 'Orange', 'Tomato',
'Cucumber', 'Tomato', 'Tomato', 'Tomato', 'Apple', 'Cucumber',
'Apple', 'Tomato', 'Tomato', 'Apple', 'Tomato', 'Orange',
'Apple', 'Orange', 'Apple', 'Apple', 'Orange', 'Apple', 'Apple']
scanner.scanItems(groceryList)
self.assertTrue(self.multipleDiscountsItemList['Apple'].finalCost() ==
Decimal('3.80'))
self.assertTrue(self.multipleDiscountsItemList['Tomato'].finalCost(
) == Decimal('4.37'))
self.assertTrue(self.multipleDiscountsItemList['Orange'].finalCost(
) == Decimal('0.80'))
self.assertTrue(self.multipleDiscountsItemList['Cucumber'].
finalCost() == Decimal('0.20'))
self.assertEqual(Decimal('9.17'), scanner.totalPrice())
<|reserved_special_token_0|>
class CheckoutTests(unittest.TestCase):
def setUp(self):
pricingRulesWithSingleDiscount = {'Apple': {(1): '0.50', (3):
'1.30'}, 'Orange': {(1): '0.20'}, 'Tomato': {(1): '1.25'},
'Cucumber': {(1): '0.10'}}
self.itemList = createInventoryList(pricingRulesWithSingleDiscount)
def testCheckout(self):
scanner = Scanner(self.itemList)
groceryList = ['Apple', 'Orange', 'Tomato', 'Apple', 'Orange',
'Cucumber']
scanner.scanItems(groceryList)
self.failIf(self.itemList['Apple'].numberOfItems != 2)
self.failIf(self.itemList['Orange'].numberOfItems != 2)
self.failIf(self.itemList['Tomato'].numberOfItems != 1)
self.failIf(self.itemList['Cucumber'].numberOfItems != 1)
<|reserved_special_token_1|>
""" Unit test for the Supermarket checkout exercise """
import unittest
from decimal import *
from ShoppingCart import *
# Unit tests -----
class ScannerTests(unittest.TestCase):
def setUp(self):
pricingRulesWithSingleDiscount = { 'Apple': { 1 : '0.50' , 3 : '1.30' },
'Orange': {1 : '0.20'},
'Tomato': {1 : '1.25'},
'Cucumber': {1 : '0.10'}
}
pricingRulesWithMultipleDiscounts = { 'Apple': { 1 : '0.50' , 3 : '1.30' , 5 : '2.00' },
'Orange': {1 : '0.20' , 4 : '.60'},
'Tomato': {1 : '1.25' , 2 : '.62', 5 : '2.50', 10 : '2.00'},
'Cucumber': {1 : '0.10'}
}
self.singleItemListOneDiscount = createInventoryList(pricingRulesWithSingleDiscount)
self.multipleDiscountsItemList = createInventoryList(pricingRulesWithMultipleDiscounts)
def testScaningItems(self):
scanner = Scanner(self.singleItemListOneDiscount)
groceryList = ['Apple','Orange','Tomato', 'Apple', 'Orange','Cucumber']
scanner.scanItems(groceryList)
self.failIf(self.singleItemListOneDiscount['Apple'].numberOfItems != 2)
self.failIf(self.singleItemListOneDiscount['Orange'].numberOfItems != 2)
self.failIf(self.singleItemListOneDiscount['Tomato'].numberOfItems != 1)
self.failIf(self.singleItemListOneDiscount['Cucumber'].numberOfItems != 1)
def testPricingZeroItems(self):
scanner = Scanner(self.singleItemListOneDiscount)
emptyGroceryList = []
scanner.scanItems(emptyGroceryList)
self.assertEqual(Decimal("0"),scanner.totalPrice())
def testPricingSingleItems(self):
scanner = Scanner(self.singleItemListOneDiscount)
groceryList = ['Apple','Orange','Tomato', 'Apple', 'Orange','Cucumber']
scanner.scanItems(groceryList)
self.assertTrue(self.singleItemListOneDiscount['Apple'].finalCost() == Decimal('1.00'))
self.assertTrue(self.singleItemListOneDiscount['Orange'].finalCost() == Decimal('0.40'))
self.assertTrue(self.singleItemListOneDiscount['Tomato'].finalCost() == Decimal('1.25'))
self.assertTrue(self.singleItemListOneDiscount['Cucumber'].finalCost() == Decimal('0.10'))
self.assertEqual(Decimal("2.75"),scanner.totalPrice())
def testPricingSingleItemsWithExactDiscount(self):
scanner = Scanner(self.singleItemListOneDiscount)
groceryList = ['Apple','Orange','Orange', 'Apple', 'Orange','Apple']
scanner.scanItems(groceryList)
self.assertTrue(self.singleItemListOneDiscount['Apple'].finalCost() == Decimal('1.30'))
self.assertTrue(self.singleItemListOneDiscount['Orange'].finalCost() == Decimal('0.60'))
self.assertEqual(Decimal("1.90"),scanner.totalPrice())
def testPricingMultipleItemsBeyondDiscounts(self):
scanner = Scanner(self.singleItemListOneDiscount)
groceryList = ['Apple','Tomato','Cucumber','Apple','Cucumber','Apple','Apple']
scanner.scanItems(groceryList)
self.assertTrue(self.singleItemListOneDiscount['Apple'].finalCost() == Decimal('1.80'))
self.assertTrue(self.singleItemListOneDiscount['Tomato'].finalCost() == Decimal('1.25'))
self.assertTrue(self.singleItemListOneDiscount['Cucumber'].finalCost() == Decimal('0.20'))
self.assertEqual(Decimal("3.25"),scanner.totalPrice())
def testPricingMultipleItemsWithMultipleDiscounts(self):
scanner = Scanner(self.multipleDiscountsItemList)
groceryList = ['Orange','Apple','Tomato','Orange','Tomato','Cucumber','Tomato','Tomato','Tomato',
'Apple','Cucumber','Apple','Tomato','Tomato','Apple','Tomato','Orange','Apple',
'Orange','Apple','Apple','Orange','Apple','Apple']
scanner.scanItems(groceryList)
self.assertTrue(self.multipleDiscountsItemList['Apple'].finalCost() == Decimal('3.80'))
self.assertTrue(self.multipleDiscountsItemList['Tomato'].finalCost() == Decimal('4.37'))
self.assertTrue(self.multipleDiscountsItemList['Orange'].finalCost() == Decimal('0.80'))
self.assertTrue(self.multipleDiscountsItemList['Cucumber'].finalCost() == Decimal('0.20'))
self.assertEqual(Decimal("9.17"),scanner.totalPrice())
def testPricingMultipleItemsWithMultipleDiscountsOneExact(self):
scanner = Scanner(self.multipleDiscountsItemList)
groceryList = ['Orange','Apple','Tomato','Orange','Tomato','Cucumber','Tomato','Tomato','Tomato',
'Apple','Cucumber','Apple','Tomato','Tomato','Apple','Tomato','Orange','Apple',
'Orange','Apple','Apple','Orange','Apple','Apple','Tomato','Tomato']
scanner.scanItems(groceryList)
self.assertTrue(self.multipleDiscountsItemList['Apple'].finalCost() == Decimal('3.80'))
self.assertTrue(self.multipleDiscountsItemList['Tomato'].finalCost() == Decimal('2.00'))
self.assertTrue(self.multipleDiscountsItemList['Orange'].finalCost() == Decimal('0.80'))
self.assertTrue(self.multipleDiscountsItemList['Cucumber'].finalCost() == Decimal('0.20'))
self.assertEqual(Decimal("6.80"),scanner.totalPrice())
class CheckoutTests(unittest.TestCase):
def setUp(self):
pricingRulesWithSingleDiscount = { 'Apple': { 1 : '0.50' , 3 : '1.30' },
'Orange': {1 : '0.20'},
'Tomato': {1 : '1.25'},
'Cucumber': {1 : '0.10'}
}
self.itemList = createInventoryList(pricingRulesWithSingleDiscount)
def testCheckout(self):
scanner = Scanner(self.itemList)
groceryList = ['Apple','Orange','Tomato', 'Apple', 'Orange','Cucumber']
scanner.scanItems(groceryList)
self.failIf(self.itemList['Apple'].numberOfItems != 2)
self.failIf(self.itemList['Orange'].numberOfItems != 2)
self.failIf(self.itemList['Tomato'].numberOfItems != 1)
self.failIf(self.itemList['Cucumber'].numberOfItems != 1)
|
flexible
|
{
"blob_id": "fc2a123f8a86d149af9fc73baa360a029fcde574",
"index": 6316,
"step-1": "<mask token>\n\n\nclass ScannerTests(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def testPricingSingleItems(self):\n scanner = Scanner(self.singleItemListOneDiscount)\n groceryList = ['Apple', 'Orange', 'Tomato', 'Apple', 'Orange',\n 'Cucumber']\n scanner.scanItems(groceryList)\n self.assertTrue(self.singleItemListOneDiscount['Apple'].finalCost() ==\n Decimal('1.00'))\n self.assertTrue(self.singleItemListOneDiscount['Orange'].finalCost(\n ) == Decimal('0.40'))\n self.assertTrue(self.singleItemListOneDiscount['Tomato'].finalCost(\n ) == Decimal('1.25'))\n self.assertTrue(self.singleItemListOneDiscount['Cucumber'].\n finalCost() == Decimal('0.10'))\n self.assertEqual(Decimal('2.75'), scanner.totalPrice())\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass CheckoutTests(unittest.TestCase):\n\n def setUp(self):\n pricingRulesWithSingleDiscount = {'Apple': {(1): '0.50', (3):\n '1.30'}, 'Orange': {(1): '0.20'}, 'Tomato': {(1): '1.25'},\n 'Cucumber': {(1): '0.10'}}\n self.itemList = createInventoryList(pricingRulesWithSingleDiscount)\n\n def testCheckout(self):\n scanner = Scanner(self.itemList)\n groceryList = ['Apple', 'Orange', 'Tomato', 'Apple', 'Orange',\n 'Cucumber']\n scanner.scanItems(groceryList)\n self.failIf(self.itemList['Apple'].numberOfItems != 2)\n self.failIf(self.itemList['Orange'].numberOfItems != 2)\n self.failIf(self.itemList['Tomato'].numberOfItems != 1)\n self.failIf(self.itemList['Cucumber'].numberOfItems != 1)\n",
"step-2": "<mask token>\n\n\nclass ScannerTests(unittest.TestCase):\n\n def setUp(self):\n pricingRulesWithSingleDiscount = {'Apple': {(1): '0.50', (3):\n '1.30'}, 'Orange': {(1): '0.20'}, 'Tomato': {(1): '1.25'},\n 'Cucumber': {(1): '0.10'}}\n pricingRulesWithMultipleDiscounts = {'Apple': {(1): '0.50', (3):\n '1.30', (5): '2.00'}, 'Orange': {(1): '0.20', (4): '.60'},\n 'Tomato': {(1): '1.25', (2): '.62', (5): '2.50', (10): '2.00'},\n 'Cucumber': {(1): '0.10'}}\n self.singleItemListOneDiscount = createInventoryList(\n pricingRulesWithSingleDiscount)\n self.multipleDiscountsItemList = createInventoryList(\n pricingRulesWithMultipleDiscounts)\n <mask token>\n\n def testPricingZeroItems(self):\n scanner = Scanner(self.singleItemListOneDiscount)\n emptyGroceryList = []\n scanner.scanItems(emptyGroceryList)\n self.assertEqual(Decimal('0'), scanner.totalPrice())\n\n def testPricingSingleItems(self):\n scanner = Scanner(self.singleItemListOneDiscount)\n groceryList = ['Apple', 'Orange', 'Tomato', 'Apple', 'Orange',\n 'Cucumber']\n scanner.scanItems(groceryList)\n self.assertTrue(self.singleItemListOneDiscount['Apple'].finalCost() ==\n Decimal('1.00'))\n self.assertTrue(self.singleItemListOneDiscount['Orange'].finalCost(\n ) == Decimal('0.40'))\n self.assertTrue(self.singleItemListOneDiscount['Tomato'].finalCost(\n ) == Decimal('1.25'))\n self.assertTrue(self.singleItemListOneDiscount['Cucumber'].\n finalCost() == Decimal('0.10'))\n self.assertEqual(Decimal('2.75'), scanner.totalPrice())\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass CheckoutTests(unittest.TestCase):\n\n def setUp(self):\n pricingRulesWithSingleDiscount = {'Apple': {(1): '0.50', (3):\n '1.30'}, 'Orange': {(1): '0.20'}, 'Tomato': {(1): '1.25'},\n 'Cucumber': {(1): '0.10'}}\n self.itemList = createInventoryList(pricingRulesWithSingleDiscount)\n\n def testCheckout(self):\n scanner = Scanner(self.itemList)\n groceryList = ['Apple', 'Orange', 'Tomato', 'Apple', 'Orange',\n 'Cucumber']\n scanner.scanItems(groceryList)\n self.failIf(self.itemList['Apple'].numberOfItems != 2)\n self.failIf(self.itemList['Orange'].numberOfItems != 2)\n self.failIf(self.itemList['Tomato'].numberOfItems != 1)\n self.failIf(self.itemList['Cucumber'].numberOfItems != 1)\n",
"step-3": "<mask token>\n\n\nclass ScannerTests(unittest.TestCase):\n\n def setUp(self):\n pricingRulesWithSingleDiscount = {'Apple': {(1): '0.50', (3):\n '1.30'}, 'Orange': {(1): '0.20'}, 'Tomato': {(1): '1.25'},\n 'Cucumber': {(1): '0.10'}}\n pricingRulesWithMultipleDiscounts = {'Apple': {(1): '0.50', (3):\n '1.30', (5): '2.00'}, 'Orange': {(1): '0.20', (4): '.60'},\n 'Tomato': {(1): '1.25', (2): '.62', (5): '2.50', (10): '2.00'},\n 'Cucumber': {(1): '0.10'}}\n self.singleItemListOneDiscount = createInventoryList(\n pricingRulesWithSingleDiscount)\n self.multipleDiscountsItemList = createInventoryList(\n pricingRulesWithMultipleDiscounts)\n <mask token>\n\n def testPricingZeroItems(self):\n scanner = Scanner(self.singleItemListOneDiscount)\n emptyGroceryList = []\n scanner.scanItems(emptyGroceryList)\n self.assertEqual(Decimal('0'), scanner.totalPrice())\n\n def testPricingSingleItems(self):\n scanner = Scanner(self.singleItemListOneDiscount)\n groceryList = ['Apple', 'Orange', 'Tomato', 'Apple', 'Orange',\n 'Cucumber']\n scanner.scanItems(groceryList)\n self.assertTrue(self.singleItemListOneDiscount['Apple'].finalCost() ==\n Decimal('1.00'))\n self.assertTrue(self.singleItemListOneDiscount['Orange'].finalCost(\n ) == Decimal('0.40'))\n self.assertTrue(self.singleItemListOneDiscount['Tomato'].finalCost(\n ) == Decimal('1.25'))\n self.assertTrue(self.singleItemListOneDiscount['Cucumber'].\n finalCost() == Decimal('0.10'))\n self.assertEqual(Decimal('2.75'), scanner.totalPrice())\n\n def testPricingSingleItemsWithExactDiscount(self):\n scanner = Scanner(self.singleItemListOneDiscount)\n groceryList = ['Apple', 'Orange', 'Orange', 'Apple', 'Orange', 'Apple']\n scanner.scanItems(groceryList)\n self.assertTrue(self.singleItemListOneDiscount['Apple'].finalCost() ==\n Decimal('1.30'))\n self.assertTrue(self.singleItemListOneDiscount['Orange'].finalCost(\n ) == Decimal('0.60'))\n self.assertEqual(Decimal('1.90'), scanner.totalPrice())\n <mask token>\n <mask token>\n <mask token>\n\n\nclass CheckoutTests(unittest.TestCase):\n\n def setUp(self):\n pricingRulesWithSingleDiscount = {'Apple': {(1): '0.50', (3):\n '1.30'}, 'Orange': {(1): '0.20'}, 'Tomato': {(1): '1.25'},\n 'Cucumber': {(1): '0.10'}}\n self.itemList = createInventoryList(pricingRulesWithSingleDiscount)\n\n def testCheckout(self):\n scanner = Scanner(self.itemList)\n groceryList = ['Apple', 'Orange', 'Tomato', 'Apple', 'Orange',\n 'Cucumber']\n scanner.scanItems(groceryList)\n self.failIf(self.itemList['Apple'].numberOfItems != 2)\n self.failIf(self.itemList['Orange'].numberOfItems != 2)\n self.failIf(self.itemList['Tomato'].numberOfItems != 1)\n self.failIf(self.itemList['Cucumber'].numberOfItems != 1)\n",
"step-4": "<mask token>\n\n\nclass ScannerTests(unittest.TestCase):\n\n def setUp(self):\n pricingRulesWithSingleDiscount = {'Apple': {(1): '0.50', (3):\n '1.30'}, 'Orange': {(1): '0.20'}, 'Tomato': {(1): '1.25'},\n 'Cucumber': {(1): '0.10'}}\n pricingRulesWithMultipleDiscounts = {'Apple': {(1): '0.50', (3):\n '1.30', (5): '2.00'}, 'Orange': {(1): '0.20', (4): '.60'},\n 'Tomato': {(1): '1.25', (2): '.62', (5): '2.50', (10): '2.00'},\n 'Cucumber': {(1): '0.10'}}\n self.singleItemListOneDiscount = createInventoryList(\n pricingRulesWithSingleDiscount)\n self.multipleDiscountsItemList = createInventoryList(\n pricingRulesWithMultipleDiscounts)\n\n def testScaningItems(self):\n scanner = Scanner(self.singleItemListOneDiscount)\n groceryList = ['Apple', 'Orange', 'Tomato', 'Apple', 'Orange',\n 'Cucumber']\n scanner.scanItems(groceryList)\n self.failIf(self.singleItemListOneDiscount['Apple'].numberOfItems != 2)\n self.failIf(self.singleItemListOneDiscount['Orange'].numberOfItems != 2\n )\n self.failIf(self.singleItemListOneDiscount['Tomato'].numberOfItems != 1\n )\n self.failIf(self.singleItemListOneDiscount['Cucumber'].\n numberOfItems != 1)\n\n def testPricingZeroItems(self):\n scanner = Scanner(self.singleItemListOneDiscount)\n emptyGroceryList = []\n scanner.scanItems(emptyGroceryList)\n self.assertEqual(Decimal('0'), scanner.totalPrice())\n\n def testPricingSingleItems(self):\n scanner = Scanner(self.singleItemListOneDiscount)\n groceryList = ['Apple', 'Orange', 'Tomato', 'Apple', 'Orange',\n 'Cucumber']\n scanner.scanItems(groceryList)\n self.assertTrue(self.singleItemListOneDiscount['Apple'].finalCost() ==\n Decimal('1.00'))\n self.assertTrue(self.singleItemListOneDiscount['Orange'].finalCost(\n ) == Decimal('0.40'))\n self.assertTrue(self.singleItemListOneDiscount['Tomato'].finalCost(\n ) == Decimal('1.25'))\n self.assertTrue(self.singleItemListOneDiscount['Cucumber'].\n finalCost() == Decimal('0.10'))\n self.assertEqual(Decimal('2.75'), scanner.totalPrice())\n\n def testPricingSingleItemsWithExactDiscount(self):\n scanner = Scanner(self.singleItemListOneDiscount)\n groceryList = ['Apple', 'Orange', 'Orange', 'Apple', 'Orange', 'Apple']\n scanner.scanItems(groceryList)\n self.assertTrue(self.singleItemListOneDiscount['Apple'].finalCost() ==\n Decimal('1.30'))\n self.assertTrue(self.singleItemListOneDiscount['Orange'].finalCost(\n ) == Decimal('0.60'))\n self.assertEqual(Decimal('1.90'), scanner.totalPrice())\n\n def testPricingMultipleItemsBeyondDiscounts(self):\n scanner = Scanner(self.singleItemListOneDiscount)\n groceryList = ['Apple', 'Tomato', 'Cucumber', 'Apple', 'Cucumber',\n 'Apple', 'Apple']\n scanner.scanItems(groceryList)\n self.assertTrue(self.singleItemListOneDiscount['Apple'].finalCost() ==\n Decimal('1.80'))\n self.assertTrue(self.singleItemListOneDiscount['Tomato'].finalCost(\n ) == Decimal('1.25'))\n self.assertTrue(self.singleItemListOneDiscount['Cucumber'].\n finalCost() == Decimal('0.20'))\n self.assertEqual(Decimal('3.25'), scanner.totalPrice())\n\n def testPricingMultipleItemsWithMultipleDiscounts(self):\n scanner = Scanner(self.multipleDiscountsItemList)\n groceryList = ['Orange', 'Apple', 'Tomato', 'Orange', 'Tomato',\n 'Cucumber', 'Tomato', 'Tomato', 'Tomato', 'Apple', 'Cucumber',\n 'Apple', 'Tomato', 'Tomato', 'Apple', 'Tomato', 'Orange',\n 'Apple', 'Orange', 'Apple', 'Apple', 'Orange', 'Apple', 'Apple']\n scanner.scanItems(groceryList)\n self.assertTrue(self.multipleDiscountsItemList['Apple'].finalCost() ==\n Decimal('3.80'))\n self.assertTrue(self.multipleDiscountsItemList['Tomato'].finalCost(\n ) == Decimal('4.37'))\n self.assertTrue(self.multipleDiscountsItemList['Orange'].finalCost(\n ) == Decimal('0.80'))\n self.assertTrue(self.multipleDiscountsItemList['Cucumber'].\n finalCost() == Decimal('0.20'))\n self.assertEqual(Decimal('9.17'), scanner.totalPrice())\n <mask token>\n\n\nclass CheckoutTests(unittest.TestCase):\n\n def setUp(self):\n pricingRulesWithSingleDiscount = {'Apple': {(1): '0.50', (3):\n '1.30'}, 'Orange': {(1): '0.20'}, 'Tomato': {(1): '1.25'},\n 'Cucumber': {(1): '0.10'}}\n self.itemList = createInventoryList(pricingRulesWithSingleDiscount)\n\n def testCheckout(self):\n scanner = Scanner(self.itemList)\n groceryList = ['Apple', 'Orange', 'Tomato', 'Apple', 'Orange',\n 'Cucumber']\n scanner.scanItems(groceryList)\n self.failIf(self.itemList['Apple'].numberOfItems != 2)\n self.failIf(self.itemList['Orange'].numberOfItems != 2)\n self.failIf(self.itemList['Tomato'].numberOfItems != 1)\n self.failIf(self.itemList['Cucumber'].numberOfItems != 1)\n",
"step-5": "\n\"\"\" Unit test for the Supermarket checkout exercise \"\"\"\n\nimport unittest\nfrom decimal import *\nfrom ShoppingCart import *\n\n# Unit tests -----\n\nclass ScannerTests(unittest.TestCase):\n def setUp(self):\n\n pricingRulesWithSingleDiscount = { 'Apple': { 1 : '0.50' , 3 : '1.30' },\n 'Orange': {1 : '0.20'},\n 'Tomato': {1 : '1.25'},\n 'Cucumber': {1 : '0.10'} \n }\n\n pricingRulesWithMultipleDiscounts = { 'Apple': { 1 : '0.50' , 3 : '1.30' , 5 : '2.00' },\n 'Orange': {1 : '0.20' , 4 : '.60'},\n 'Tomato': {1 : '1.25' , 2 : '.62', 5 : '2.50', 10 : '2.00'},\n 'Cucumber': {1 : '0.10'} \n }\n\n self.singleItemListOneDiscount = createInventoryList(pricingRulesWithSingleDiscount)\n self.multipleDiscountsItemList = createInventoryList(pricingRulesWithMultipleDiscounts)\n\n def testScaningItems(self):\n scanner = Scanner(self.singleItemListOneDiscount)\n groceryList = ['Apple','Orange','Tomato', 'Apple', 'Orange','Cucumber']\n scanner.scanItems(groceryList)\n \n self.failIf(self.singleItemListOneDiscount['Apple'].numberOfItems != 2)\n self.failIf(self.singleItemListOneDiscount['Orange'].numberOfItems != 2)\n self.failIf(self.singleItemListOneDiscount['Tomato'].numberOfItems != 1)\n self.failIf(self.singleItemListOneDiscount['Cucumber'].numberOfItems != 1)\n\n def testPricingZeroItems(self):\n scanner = Scanner(self.singleItemListOneDiscount)\n\n emptyGroceryList = []\n scanner.scanItems(emptyGroceryList)\n self.assertEqual(Decimal(\"0\"),scanner.totalPrice())\n \n\n def testPricingSingleItems(self):\n scanner = Scanner(self.singleItemListOneDiscount)\n\n groceryList = ['Apple','Orange','Tomato', 'Apple', 'Orange','Cucumber']\n scanner.scanItems(groceryList)\n self.assertTrue(self.singleItemListOneDiscount['Apple'].finalCost() == Decimal('1.00'))\n self.assertTrue(self.singleItemListOneDiscount['Orange'].finalCost() == Decimal('0.40'))\n self.assertTrue(self.singleItemListOneDiscount['Tomato'].finalCost() == Decimal('1.25'))\n self.assertTrue(self.singleItemListOneDiscount['Cucumber'].finalCost() == Decimal('0.10'))\n self.assertEqual(Decimal(\"2.75\"),scanner.totalPrice())\n\n def testPricingSingleItemsWithExactDiscount(self):\n scanner = Scanner(self.singleItemListOneDiscount)\n\n groceryList = ['Apple','Orange','Orange', 'Apple', 'Orange','Apple']\n scanner.scanItems(groceryList)\n\n self.assertTrue(self.singleItemListOneDiscount['Apple'].finalCost() == Decimal('1.30'))\n self.assertTrue(self.singleItemListOneDiscount['Orange'].finalCost() == Decimal('0.60'))\n self.assertEqual(Decimal(\"1.90\"),scanner.totalPrice())\n\n def testPricingMultipleItemsBeyondDiscounts(self):\n scanner = Scanner(self.singleItemListOneDiscount)\n groceryList = ['Apple','Tomato','Cucumber','Apple','Cucumber','Apple','Apple']\n scanner.scanItems(groceryList)\n self.assertTrue(self.singleItemListOneDiscount['Apple'].finalCost() == Decimal('1.80'))\n self.assertTrue(self.singleItemListOneDiscount['Tomato'].finalCost() == Decimal('1.25'))\n self.assertTrue(self.singleItemListOneDiscount['Cucumber'].finalCost() == Decimal('0.20'))\n self.assertEqual(Decimal(\"3.25\"),scanner.totalPrice())\n\n def testPricingMultipleItemsWithMultipleDiscounts(self):\n scanner = Scanner(self.multipleDiscountsItemList)\n groceryList = ['Orange','Apple','Tomato','Orange','Tomato','Cucumber','Tomato','Tomato','Tomato',\n 'Apple','Cucumber','Apple','Tomato','Tomato','Apple','Tomato','Orange','Apple',\n 'Orange','Apple','Apple','Orange','Apple','Apple']\n scanner.scanItems(groceryList)\n self.assertTrue(self.multipleDiscountsItemList['Apple'].finalCost() == Decimal('3.80'))\n self.assertTrue(self.multipleDiscountsItemList['Tomato'].finalCost() == Decimal('4.37'))\n self.assertTrue(self.multipleDiscountsItemList['Orange'].finalCost() == Decimal('0.80'))\n self.assertTrue(self.multipleDiscountsItemList['Cucumber'].finalCost() == Decimal('0.20'))\n self.assertEqual(Decimal(\"9.17\"),scanner.totalPrice())\n\n def testPricingMultipleItemsWithMultipleDiscountsOneExact(self):\n scanner = Scanner(self.multipleDiscountsItemList)\n groceryList = ['Orange','Apple','Tomato','Orange','Tomato','Cucumber','Tomato','Tomato','Tomato',\n 'Apple','Cucumber','Apple','Tomato','Tomato','Apple','Tomato','Orange','Apple',\n 'Orange','Apple','Apple','Orange','Apple','Apple','Tomato','Tomato']\n scanner.scanItems(groceryList)\n self.assertTrue(self.multipleDiscountsItemList['Apple'].finalCost() == Decimal('3.80'))\n self.assertTrue(self.multipleDiscountsItemList['Tomato'].finalCost() == Decimal('2.00'))\n self.assertTrue(self.multipleDiscountsItemList['Orange'].finalCost() == Decimal('0.80'))\n self.assertTrue(self.multipleDiscountsItemList['Cucumber'].finalCost() == Decimal('0.20'))\n self.assertEqual(Decimal(\"6.80\"),scanner.totalPrice())\n\n\nclass CheckoutTests(unittest.TestCase):\n\n def setUp(self):\n pricingRulesWithSingleDiscount = { 'Apple': { 1 : '0.50' , 3 : '1.30' },\n 'Orange': {1 : '0.20'},\n 'Tomato': {1 : '1.25'},\n 'Cucumber': {1 : '0.10'} \n }\n\n self.itemList = createInventoryList(pricingRulesWithSingleDiscount)\n \n def testCheckout(self):\n scanner = Scanner(self.itemList)\n groceryList = ['Apple','Orange','Tomato', 'Apple', 'Orange','Cucumber']\n scanner.scanItems(groceryList)\n \n self.failIf(self.itemList['Apple'].numberOfItems != 2)\n self.failIf(self.itemList['Orange'].numberOfItems != 2)\n self.failIf(self.itemList['Tomato'].numberOfItems != 1)\n self.failIf(self.itemList['Cucumber'].numberOfItems != 1)\n\n",
"step-ids": [
5,
7,
8,
11,
14
]
}
|
[
5,
7,
8,
11,
14
] |
from rest_framework import serializers
from .models import Backend
class BackendSerializer(serializers.ModelSerializer):
class Meta:
model = Backend
fields = '__all__'
|
normal
|
{
"blob_id": "b4787d65fb8adf5dc6a99c1a13922c8f9acc2087",
"index": 1971,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass BackendSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Backend\n fields = '__all__'\n",
"step-3": "from rest_framework import serializers\nfrom .models import Backend\n\n\nclass BackendSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Backend\n fields = '__all__'\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
f = open("resources/yesterday.txt", 'r')
yesterday_lyric = ""
while 1 :
line = f.readline()
if not line :
break
yesterday_lyric = yesterday_lyric + line.strip() + "\n"
f.close()
# 대소문자 구분없이 yesterday 단어의 개수 세기 : 대문자로 또는 소문자로 만들고 카운드 세기
num_of_yesterday = yesterday_lyric.upper().count("YESTERDAY")
print("Number of a Word 'YESTERDAY'", num_of_yesterday)
# 대소문자 구분하여 Yesterday 와 yesterday의 개수를 세보자.
num_of_small_yesterday = yesterday_lyric.count("yesterday")
num_of_title_yesterday = yesterday_lyric.count("Yesterday")
print("Number of a Word 'yesterday'", num_of_small_yesterday)
print("Number of a Word 'Yesterday'", num_of_title_yesterday)
|
normal
|
{
"blob_id": "8559448822b3d3989a9795e7b497a2791588c327",
"index": 9539,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile 1:\n line = f.readline()\n if not line:\n break\n yesterday_lyric = yesterday_lyric + line.strip() + '\\n'\nf.close()\n<mask token>\nprint(\"Number of a Word 'YESTERDAY'\", num_of_yesterday)\n<mask token>\nprint(\"Number of a Word 'yesterday'\", num_of_small_yesterday)\nprint(\"Number of a Word 'Yesterday'\", num_of_title_yesterday)\n",
"step-3": "f = open('resources/yesterday.txt', 'r')\nyesterday_lyric = ''\nwhile 1:\n line = f.readline()\n if not line:\n break\n yesterday_lyric = yesterday_lyric + line.strip() + '\\n'\nf.close()\nnum_of_yesterday = yesterday_lyric.upper().count('YESTERDAY')\nprint(\"Number of a Word 'YESTERDAY'\", num_of_yesterday)\nnum_of_small_yesterday = yesterday_lyric.count('yesterday')\nnum_of_title_yesterday = yesterday_lyric.count('Yesterday')\nprint(\"Number of a Word 'yesterday'\", num_of_small_yesterday)\nprint(\"Number of a Word 'Yesterday'\", num_of_title_yesterday)\n",
"step-4": "f = open(\"resources/yesterday.txt\", 'r')\nyesterday_lyric = \"\"\nwhile 1 :\n line = f.readline()\n if not line :\n break\n yesterday_lyric = yesterday_lyric + line.strip() + \"\\n\"\n\nf.close()\n\n# 대소문자 구분없이 yesterday 단어의 개수 세기 : 대문자로 또는 소문자로 만들고 카운드 세기\nnum_of_yesterday = yesterday_lyric.upper().count(\"YESTERDAY\")\nprint(\"Number of a Word 'YESTERDAY'\", num_of_yesterday)\n\n# 대소문자 구분하여 Yesterday 와 yesterday의 개수를 세보자.\nnum_of_small_yesterday = yesterday_lyric.count(\"yesterday\")\nnum_of_title_yesterday = yesterday_lyric.count(\"Yesterday\")\nprint(\"Number of a Word 'yesterday'\", num_of_small_yesterday)\nprint(\"Number of a Word 'Yesterday'\", num_of_title_yesterday)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def get_Userid(path):
path_Divided = path.split('\\')
get_id = path_Divided[6].split('.')
get_id = get_id[0]
return get_id
def compose_Json_Path_ToRead(path_json_source, get_id):
json_path_to_read = path_json_source + '\\' + str(get_id) + '.json'
return json_path_to_read
def get_province_city_path_ToRead(province, city, input_year, input_month):
path_json_source = 'F:\\Fast_Prepared_Json\\' + str(province) + '\\' + str(
city) + '\\' + input_year + '\\' + input_month
return path_json_source
def read_Json_Source(json_path_to_read, pic_num_least, province, city, get_id):
f1 = open(json_path_to_read, encoding='utf-8')
pic_num = len(f1.readlines())
return pic_num
def gothrough_Source(path_json_source, province, city, pic_num_least):
total = 0
"""
为了能够看到下载进度,在此先计算账户总数
"""
for dirpath, dirnames, filenames in os.walk(path_json_source):
for filepath in filenames:
path = os.path.join(dirpath, filepath)
get_id = get_Userid(path)
json_path_to_read = compose_Json_Path_ToRead(path_json_source,
get_id)
pic_num = read_Json_Source(json_path_to_read, pic_num_least,
province, city, get_id)
print(pic_num)
total = total + pic_num
print('TOTAL:', total)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_Userid(path):
path_Divided = path.split('\\')
get_id = path_Divided[6].split('.')
get_id = get_id[0]
return get_id
def compose_Json_Path_ToRead(path_json_source, get_id):
json_path_to_read = path_json_source + '\\' + str(get_id) + '.json'
return json_path_to_read
def get_province_city_path_ToRead(province, city, input_year, input_month):
path_json_source = 'F:\\Fast_Prepared_Json\\' + str(province) + '\\' + str(
city) + '\\' + input_year + '\\' + input_month
return path_json_source
def read_Json_Source(json_path_to_read, pic_num_least, province, city, get_id):
f1 = open(json_path_to_read, encoding='utf-8')
pic_num = len(f1.readlines())
return pic_num
def gothrough_Source(path_json_source, province, city, pic_num_least):
total = 0
"""
为了能够看到下载进度,在此先计算账户总数
"""
for dirpath, dirnames, filenames in os.walk(path_json_source):
for filepath in filenames:
path = os.path.join(dirpath, filepath)
get_id = get_Userid(path)
json_path_to_read = compose_Json_Path_ToRead(path_json_source,
get_id)
pic_num = read_Json_Source(json_path_to_read, pic_num_least,
province, city, get_id)
print(pic_num)
total = total + pic_num
print('TOTAL:', total)
<|reserved_special_token_0|>
gothrough_Source(path_json_source, input_province, input_city, pic_num_least)
print('Ok!')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_Userid(path):
path_Divided = path.split('\\')
get_id = path_Divided[6].split('.')
get_id = get_id[0]
return get_id
def compose_Json_Path_ToRead(path_json_source, get_id):
json_path_to_read = path_json_source + '\\' + str(get_id) + '.json'
return json_path_to_read
def get_province_city_path_ToRead(province, city, input_year, input_month):
path_json_source = 'F:\\Fast_Prepared_Json\\' + str(province) + '\\' + str(
city) + '\\' + input_year + '\\' + input_month
return path_json_source
def read_Json_Source(json_path_to_read, pic_num_least, province, city, get_id):
f1 = open(json_path_to_read, encoding='utf-8')
pic_num = len(f1.readlines())
return pic_num
def gothrough_Source(path_json_source, province, city, pic_num_least):
total = 0
"""
为了能够看到下载进度,在此先计算账户总数
"""
for dirpath, dirnames, filenames in os.walk(path_json_source):
for filepath in filenames:
path = os.path.join(dirpath, filepath)
get_id = get_Userid(path)
json_path_to_read = compose_Json_Path_ToRead(path_json_source,
get_id)
pic_num = read_Json_Source(json_path_to_read, pic_num_least,
province, city, get_id)
print(pic_num)
total = total + pic_num
print('TOTAL:', total)
<|reserved_special_token_0|>
input_province = '广东省'
input_city = '广州市'
input_year = '2014'
input_month = '08'
pic_num_least = 1
path_json_source = get_province_city_path_ToRead(input_province, input_city,
input_year, input_month)
gothrough_Source(path_json_source, input_province, input_city, pic_num_least)
print('Ok!')
mark = input()
<|reserved_special_token_1|>
import json
import os
def get_Userid(path):
path_Divided = path.split('\\')
get_id = path_Divided[6].split('.')
get_id = get_id[0]
return get_id
def compose_Json_Path_ToRead(path_json_source, get_id):
json_path_to_read = path_json_source + '\\' + str(get_id) + '.json'
return json_path_to_read
def get_province_city_path_ToRead(province, city, input_year, input_month):
path_json_source = 'F:\\Fast_Prepared_Json\\' + str(province) + '\\' + str(
city) + '\\' + input_year + '\\' + input_month
return path_json_source
def read_Json_Source(json_path_to_read, pic_num_least, province, city, get_id):
f1 = open(json_path_to_read, encoding='utf-8')
pic_num = len(f1.readlines())
return pic_num
def gothrough_Source(path_json_source, province, city, pic_num_least):
total = 0
"""
为了能够看到下载进度,在此先计算账户总数
"""
for dirpath, dirnames, filenames in os.walk(path_json_source):
for filepath in filenames:
path = os.path.join(dirpath, filepath)
get_id = get_Userid(path)
json_path_to_read = compose_Json_Path_ToRead(path_json_source,
get_id)
pic_num = read_Json_Source(json_path_to_read, pic_num_least,
province, city, get_id)
print(pic_num)
total = total + pic_num
print('TOTAL:', total)
<|reserved_special_token_0|>
input_province = '广东省'
input_city = '广州市'
input_year = '2014'
input_month = '08'
pic_num_least = 1
path_json_source = get_province_city_path_ToRead(input_province, input_city,
input_year, input_month)
gothrough_Source(path_json_source, input_province, input_city, pic_num_least)
print('Ok!')
mark = input()
<|reserved_special_token_1|>
#encoding=utf-8
import json
import os
def get_Userid(path):
path_Divided = path.split('\\')
#print(path_Divided)
get_id= path_Divided[6].split('.')
get_id = get_id[0]
#print(get_id)
return get_id
def compose_Json_Path_ToRead(path_json_source,get_id):
json_path_to_read = path_json_source+"\\"+str(get_id)+".json"
return json_path_to_read
def get_province_city_path_ToRead(province,city,input_year,input_month):
path_json_source = "F:\\Fast_Prepared_Json\\"+str(province)+"\\"+str(city)+"\\"+input_year+"\\"+input_month
return path_json_source
def read_Json_Source(json_path_to_read,pic_num_least,province,city,get_id):
f1 = open(json_path_to_read,encoding='utf-8')
pic_num = len(f1.readlines())
return pic_num
def gothrough_Source(path_json_source, province, city, pic_num_least):
total = 0
"""
为了能够看到下载进度,在此先计算账户总数
"""
for dirpath, dirnames, filenames in os.walk(path_json_source):
for filepath in filenames:
path = os.path.join(dirpath, filepath)
# 现在开始得到文件名上的userid
get_id = get_Userid(path)
# 现在开始读取json数据源
json_path_to_read = compose_Json_Path_ToRead(path_json_source, get_id)
pic_num = read_Json_Source(json_path_to_read, pic_num_least, province, city, get_id)
print(pic_num)
total = total+pic_num
print("TOTAL:",total)
"""
print("请输入想要下载的省份或直辖市:")
input_province = input()
print("请输入想要下载的城市:")
input_city = input()
print("请输入想要下载的年份:(2014)")
input_year = input()
print("请输入想要下载的月份:(07)")
input_month = input()
print("请输入想要过滤的图片数目下限:")
pic_num_least = input()
"""
input_province = "广东省"
input_city = "广州市"
input_year = "2014"
input_month = "08"
pic_num_least = 1
path_json_source = get_province_city_path_ToRead(input_province,input_city,input_year,input_month)
gothrough_Source(path_json_source,input_province,input_city,pic_num_least)
print("Ok!")
mark = input()
|
flexible
|
{
"blob_id": "2e5dbd84eb1f9cc09602df8ef8d7bdd30e1b2f26",
"index": 7119,
"step-1": "<mask token>\n\n\ndef get_Userid(path):\n path_Divided = path.split('\\\\')\n get_id = path_Divided[6].split('.')\n get_id = get_id[0]\n return get_id\n\n\ndef compose_Json_Path_ToRead(path_json_source, get_id):\n json_path_to_read = path_json_source + '\\\\' + str(get_id) + '.json'\n return json_path_to_read\n\n\ndef get_province_city_path_ToRead(province, city, input_year, input_month):\n path_json_source = 'F:\\\\Fast_Prepared_Json\\\\' + str(province) + '\\\\' + str(\n city) + '\\\\' + input_year + '\\\\' + input_month\n return path_json_source\n\n\ndef read_Json_Source(json_path_to_read, pic_num_least, province, city, get_id):\n f1 = open(json_path_to_read, encoding='utf-8')\n pic_num = len(f1.readlines())\n return pic_num\n\n\ndef gothrough_Source(path_json_source, province, city, pic_num_least):\n total = 0\n \"\"\"\n 为了能够看到下载进度,在此先计算账户总数\n \"\"\"\n for dirpath, dirnames, filenames in os.walk(path_json_source):\n for filepath in filenames:\n path = os.path.join(dirpath, filepath)\n get_id = get_Userid(path)\n json_path_to_read = compose_Json_Path_ToRead(path_json_source,\n get_id)\n pic_num = read_Json_Source(json_path_to_read, pic_num_least,\n province, city, get_id)\n print(pic_num)\n total = total + pic_num\n print('TOTAL:', total)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_Userid(path):\n path_Divided = path.split('\\\\')\n get_id = path_Divided[6].split('.')\n get_id = get_id[0]\n return get_id\n\n\ndef compose_Json_Path_ToRead(path_json_source, get_id):\n json_path_to_read = path_json_source + '\\\\' + str(get_id) + '.json'\n return json_path_to_read\n\n\ndef get_province_city_path_ToRead(province, city, input_year, input_month):\n path_json_source = 'F:\\\\Fast_Prepared_Json\\\\' + str(province) + '\\\\' + str(\n city) + '\\\\' + input_year + '\\\\' + input_month\n return path_json_source\n\n\ndef read_Json_Source(json_path_to_read, pic_num_least, province, city, get_id):\n f1 = open(json_path_to_read, encoding='utf-8')\n pic_num = len(f1.readlines())\n return pic_num\n\n\ndef gothrough_Source(path_json_source, province, city, pic_num_least):\n total = 0\n \"\"\"\n 为了能够看到下载进度,在此先计算账户总数\n \"\"\"\n for dirpath, dirnames, filenames in os.walk(path_json_source):\n for filepath in filenames:\n path = os.path.join(dirpath, filepath)\n get_id = get_Userid(path)\n json_path_to_read = compose_Json_Path_ToRead(path_json_source,\n get_id)\n pic_num = read_Json_Source(json_path_to_read, pic_num_least,\n province, city, get_id)\n print(pic_num)\n total = total + pic_num\n print('TOTAL:', total)\n\n\n<mask token>\ngothrough_Source(path_json_source, input_province, input_city, pic_num_least)\nprint('Ok!')\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_Userid(path):\n path_Divided = path.split('\\\\')\n get_id = path_Divided[6].split('.')\n get_id = get_id[0]\n return get_id\n\n\ndef compose_Json_Path_ToRead(path_json_source, get_id):\n json_path_to_read = path_json_source + '\\\\' + str(get_id) + '.json'\n return json_path_to_read\n\n\ndef get_province_city_path_ToRead(province, city, input_year, input_month):\n path_json_source = 'F:\\\\Fast_Prepared_Json\\\\' + str(province) + '\\\\' + str(\n city) + '\\\\' + input_year + '\\\\' + input_month\n return path_json_source\n\n\ndef read_Json_Source(json_path_to_read, pic_num_least, province, city, get_id):\n f1 = open(json_path_to_read, encoding='utf-8')\n pic_num = len(f1.readlines())\n return pic_num\n\n\ndef gothrough_Source(path_json_source, province, city, pic_num_least):\n total = 0\n \"\"\"\n 为了能够看到下载进度,在此先计算账户总数\n \"\"\"\n for dirpath, dirnames, filenames in os.walk(path_json_source):\n for filepath in filenames:\n path = os.path.join(dirpath, filepath)\n get_id = get_Userid(path)\n json_path_to_read = compose_Json_Path_ToRead(path_json_source,\n get_id)\n pic_num = read_Json_Source(json_path_to_read, pic_num_least,\n province, city, get_id)\n print(pic_num)\n total = total + pic_num\n print('TOTAL:', total)\n\n\n<mask token>\ninput_province = '广东省'\ninput_city = '广州市'\ninput_year = '2014'\ninput_month = '08'\npic_num_least = 1\npath_json_source = get_province_city_path_ToRead(input_province, input_city,\n input_year, input_month)\ngothrough_Source(path_json_source, input_province, input_city, pic_num_least)\nprint('Ok!')\nmark = input()\n",
"step-4": "import json\nimport os\n\n\ndef get_Userid(path):\n path_Divided = path.split('\\\\')\n get_id = path_Divided[6].split('.')\n get_id = get_id[0]\n return get_id\n\n\ndef compose_Json_Path_ToRead(path_json_source, get_id):\n json_path_to_read = path_json_source + '\\\\' + str(get_id) + '.json'\n return json_path_to_read\n\n\ndef get_province_city_path_ToRead(province, city, input_year, input_month):\n path_json_source = 'F:\\\\Fast_Prepared_Json\\\\' + str(province) + '\\\\' + str(\n city) + '\\\\' + input_year + '\\\\' + input_month\n return path_json_source\n\n\ndef read_Json_Source(json_path_to_read, pic_num_least, province, city, get_id):\n f1 = open(json_path_to_read, encoding='utf-8')\n pic_num = len(f1.readlines())\n return pic_num\n\n\ndef gothrough_Source(path_json_source, province, city, pic_num_least):\n total = 0\n \"\"\"\n 为了能够看到下载进度,在此先计算账户总数\n \"\"\"\n for dirpath, dirnames, filenames in os.walk(path_json_source):\n for filepath in filenames:\n path = os.path.join(dirpath, filepath)\n get_id = get_Userid(path)\n json_path_to_read = compose_Json_Path_ToRead(path_json_source,\n get_id)\n pic_num = read_Json_Source(json_path_to_read, pic_num_least,\n province, city, get_id)\n print(pic_num)\n total = total + pic_num\n print('TOTAL:', total)\n\n\n<mask token>\ninput_province = '广东省'\ninput_city = '广州市'\ninput_year = '2014'\ninput_month = '08'\npic_num_least = 1\npath_json_source = get_province_city_path_ToRead(input_province, input_city,\n input_year, input_month)\ngothrough_Source(path_json_source, input_province, input_city, pic_num_least)\nprint('Ok!')\nmark = input()\n",
"step-5": "#encoding=utf-8\r\n\r\nimport json\r\nimport os\r\n\r\ndef get_Userid(path):\r\n path_Divided = path.split('\\\\')\r\n #print(path_Divided)\r\n get_id= path_Divided[6].split('.')\r\n get_id = get_id[0]\r\n #print(get_id)\r\n return get_id\r\n\r\ndef compose_Json_Path_ToRead(path_json_source,get_id):\r\n json_path_to_read = path_json_source+\"\\\\\"+str(get_id)+\".json\"\r\n return json_path_to_read\r\n\r\ndef get_province_city_path_ToRead(province,city,input_year,input_month):\r\n path_json_source = \"F:\\\\Fast_Prepared_Json\\\\\"+str(province)+\"\\\\\"+str(city)+\"\\\\\"+input_year+\"\\\\\"+input_month\r\n return path_json_source\r\n\r\ndef read_Json_Source(json_path_to_read,pic_num_least,province,city,get_id):\r\n f1 = open(json_path_to_read,encoding='utf-8')\r\n pic_num = len(f1.readlines())\r\n return pic_num\r\n\r\ndef gothrough_Source(path_json_source, province, city, pic_num_least):\r\n total = 0\r\n \"\"\"\r\n 为了能够看到下载进度,在此先计算账户总数\r\n \"\"\"\r\n\r\n for dirpath, dirnames, filenames in os.walk(path_json_source):\r\n for filepath in filenames:\r\n path = os.path.join(dirpath, filepath)\r\n # 现在开始得到文件名上的userid\r\n get_id = get_Userid(path)\r\n\r\n # 现在开始读取json数据源\r\n json_path_to_read = compose_Json_Path_ToRead(path_json_source, get_id)\r\n pic_num = read_Json_Source(json_path_to_read, pic_num_least, province, city, get_id)\r\n print(pic_num)\r\n total = total+pic_num\r\n print(\"TOTAL:\",total)\r\n\r\n\"\"\"\r\nprint(\"请输入想要下载的省份或直辖市:\")\r\ninput_province = input()\r\nprint(\"请输入想要下载的城市:\")\r\ninput_city = input()\r\nprint(\"请输入想要下载的年份:(2014)\")\r\ninput_year = input()\r\nprint(\"请输入想要下载的月份:(07)\")\r\ninput_month = input()\r\nprint(\"请输入想要过滤的图片数目下限:\")\r\npic_num_least = input()\r\n\"\"\"\r\ninput_province = \"广东省\"\r\ninput_city = \"广州市\"\r\ninput_year = \"2014\"\r\ninput_month = \"08\"\r\npic_num_least = 1\r\n\r\npath_json_source = get_province_city_path_ToRead(input_province,input_city,input_year,input_month)\r\n\r\ngothrough_Source(path_json_source,input_province,input_city,pic_num_least)\r\n\r\nprint(\"Ok!\")\r\nmark = input()",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
# coding: UTF-8 -*-
import os.path
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
EMOTICONS = {
"O:)": "angel",
"o:)": "angel",
"O:-)": "angel",
"o:-)": "angel",
"o:-3": "angel",
"o:3": "angel",
"O;^)": "angel",
">:[": "annoyed/disappointed",
":-(": "annoyed/disappointed",
":(": "annoyed/disappointed",
":((": "annoyed/disappointed",
":-((": "annoyed/disappointed",
":-c": "annoyed/disappointed",
":-<": "annoyed/disappointed",
":?C": "annoyed/disappointed",
":<": "annoyed/disappointed",
":[": "annoyed/disappointed",
":{": "annoyed/disappointed",
":=||": "annoyed/disappointed",
":@": "annoyed/disappointed",
">:(": "annoyed/disappointed",
":/": "annoyed/disappointed",
":\\": "annoyed/disappointed",
"=/": "annoyed/disappointed",
"=\\": "annoyed/disappointed",
">:/": "annoyed/disappointed",
">:\\": "annoyed/disappointed",
":S": "annoyed/disappointed",
":s": "annoyed/disappointed",
":-S": "annoyed/disappointed",
":-s": "annoyed/disappointed",
":|": "annoyed/disappointed",
":-|": "annoyed/disappointed",
":$": "annoyed/disappointed",
"?_?": "annoyed/disappointed",
"(>_<)": "annoyed/disappointed",
">_<": "annoyed/disappointed",
">__<": "annoyed/disappointed",
"(>__<)": "annoyed/disappointed",
"(-.-)": "annoyed/disappointed",
"(-_-)": "annoyed/disappointed",
"(._.)": "annoyed/disappointed",
"/:)": "annoyed/disappointed",
":-$": "annoyed/disappointed",
">:P": "annoyed/disappointed",
"K": "annoyed/disappointed",
"3:)": "devilish",
"3:-)": "devilish",
"}:-)": "devilish",
"}:)": "devilish",
">:)": "devilish",
"B-)": "happy",
":-)": "happy",
":)": "happy",
":o)": "happy",
":]": "happy",
":3": "happy",
":c)": "happy",
":>": "happy",
"=]": "happy",
"8)": "happy",
"=)": "happy",
":}": "happy",
":^)": "happy",
":?)": "happy",
":-))": "happy",
"<:-P": "happy",
"<:P": "happy",
"<:-p": "happy",
"<:p": "happy",
";;)": "happy",
"J": "happy",
"<3": "heart",
"^5": "high-five",
">_>^": "high-five",
"^<_<": "high-five",
":*": "kiss",
":*)": "kiss",
":^*": "kiss",
"}{": "kiss",
"('}{')": "kiss",
":-D": "laughing",
":D": "laughing",
"8-D": "laughing",
"8D": "laughing",
"x-D": "laughing",
"xD": "laughing",
"X-D": "laughing",
"XD": "laughing",
"=-D": "laughing",
"=D": "laughing",
";D": "laughing",
"-3": "laughing",
"3": "laughing",
"B^D": "laughing",
"D:<": "laughing",
"D:": "laughing",
"D8": "laughing",
"D;": "laughing",
"D=": "laughing",
"DX": "laughing",
":-B": "nerd",
"8-)": "nerd",
"8)": "nerd",
"</3": "sad",
":'(": "sad",
":'-(": "sad",
"QQ": "sad",
"L": "sad",
":#": "sealed mouth",
":-#": "sealed mouth",
":-X": "sealed mouth",
":-x": "sealed mouth",
":X": "sealed mouth",
":x": "sealed mouth",
"??": "shooting star",
"??": "shooting star",
"~?": "shooting star",
">:O": "suprprised/shocked",
">:o": "suprprised/shocked",
":-O": "suprprised/shocked",
":-o": "suprprised/shocked",
":O": "suprprised/shocked",
":o": "suprprised/shocked",
"O_o": "suprprised/shocked",
"o_O": "suprprised/shocked",
"O.o": "suprprised/shocked",
"o.O": "suprprised/shocked",
"(O_o)": "suprprised/shocked",
"(o_O)": "suprprised/shocked",
"(O.o)": "suprprised/shocked",
"(o.O)": "suprprised/shocked",
":'-)": "tears of happines",
":')": "tears of happines",
":P": "teasing/playful",
":p": "teasing/playful",
">:P": "teasing/playful",
">:p": "teasing/playful",
"X-P": "teasing/playful",
"x-p": "teasing/playful",
"xp": "teasing/playful",
"XP": "teasing/playful",
":-P": "teasing/playful",
":-p": "teasing/playful",
"=P": "teasing/playful",
"=P": "teasing/playful",
":-?": "teasing/playful",
":-b": "teasing/playful",
":b": "teasing/playful",
";)": "wink",
u"º)": "wink",
";-)": "wink",
";]": "wink",
u"^Ü^": "happy",
}
special_tokens = EMOTICONS
from DAPOS.data.variation import Prefix, Suffix
EASY_WORDS = {
u"ليا": [(Prefix(u"ل"), u"يا", Suffix(u""))],
u"لي": [(Prefix(u"ل"), u"ي", Suffix(u""))],
u"لكم": [(Prefix(u"ل"), u"كم", Suffix(u""))],
u"لكما": [(Prefix(u"ل"), u"كما", Suffix(u""))],
u"له": [(Prefix(u"ل"), u"ه", Suffix(u""))],
u"لها": [(Prefix(u"ل"), u"ها", Suffix(u""))],
u"لهم": [(Prefix(u"ل"), u"هم", Suffix(u""))],
u"لهما": [(Prefix(u"ل"), u"هما", Suffix(u""))],
u"لهن": [(Prefix(u"ل"), u"هم", Suffix(u""))],
u"بيا": [(Prefix(u"ب"), u"يا", Suffix(u""))],
u"بي": [(Prefix(u"ب"), u"ي", Suffix(u""))],
u"بك": [(Prefix(u"ب"), u"ك", Suffix(u""))],
u"بكم": [(Prefix(u"ب"), u"كم", Suffix(u""))],
u"بكما": [(Prefix(u"ب"), u"كما", Suffix(u""))],
u"به": [(Prefix(u"ب"), u"ه", Suffix(u""))],
u"بها": [(Prefix(u"ب"), u"ها", Suffix(u""))],
u"بهما": [(Prefix(u"ب"), u"هما", Suffix(u""))],
u"بهم": [(Prefix(u"ب"), u"هم", Suffix(u""))],
u"بهن": [(Prefix(u"ب"), u"هن", Suffix(u""))],
u"عليا": [(Prefix(u""), u"على", Suffix(u"يا"))],
u"فيا": [(Prefix(u"ف"), u"يا", Suffix(u""))],
}
EMOTICONS_TAG = 'EMO'
PUNCTUATION_TAG = 'PUNC'
DIGIT_TAG = 'CD'
NOTDEFINED_TAG = 'NN'
|
normal
|
{
"blob_id": "3f3ed0165120dc135a4ce1f282dbdf9dad57adf8",
"index": 980,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nPROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))\nEMOTICONS = {'O:)': 'angel', 'o:)': 'angel', 'O:-)': 'angel', 'o:-)':\n 'angel', 'o:-3': 'angel', 'o:3': 'angel', 'O;^)': 'angel', '>:[':\n 'annoyed/disappointed', ':-(': 'annoyed/disappointed', ':(':\n 'annoyed/disappointed', ':((': 'annoyed/disappointed', ':-((':\n 'annoyed/disappointed', ':-c': 'annoyed/disappointed', ':-<':\n 'annoyed/disappointed', ':?C': 'annoyed/disappointed', ':<':\n 'annoyed/disappointed', ':[': 'annoyed/disappointed', ':{':\n 'annoyed/disappointed', ':=||': 'annoyed/disappointed', ':@':\n 'annoyed/disappointed', '>:(': 'annoyed/disappointed', ':/':\n 'annoyed/disappointed', ':\\\\': 'annoyed/disappointed', '=/':\n 'annoyed/disappointed', '=\\\\': 'annoyed/disappointed', '>:/':\n 'annoyed/disappointed', '>:\\\\': 'annoyed/disappointed', ':S':\n 'annoyed/disappointed', ':s': 'annoyed/disappointed', ':-S':\n 'annoyed/disappointed', ':-s': 'annoyed/disappointed', ':|':\n 'annoyed/disappointed', ':-|': 'annoyed/disappointed', ':$':\n 'annoyed/disappointed', '?_?': 'annoyed/disappointed', '(>_<)':\n 'annoyed/disappointed', '>_<': 'annoyed/disappointed', '>__<':\n 'annoyed/disappointed', '(>__<)': 'annoyed/disappointed', '(-.-)':\n 'annoyed/disappointed', '(-_-)': 'annoyed/disappointed', '(._.)':\n 'annoyed/disappointed', '/:)': 'annoyed/disappointed', ':-$':\n 'annoyed/disappointed', '>:P': 'annoyed/disappointed', 'K':\n 'annoyed/disappointed', '3:)': 'devilish', '3:-)': 'devilish', '}:-)':\n 'devilish', '}:)': 'devilish', '>:)': 'devilish', 'B-)': 'happy', ':-)':\n 'happy', ':)': 'happy', ':o)': 'happy', ':]': 'happy', ':3': 'happy',\n ':c)': 'happy', ':>': 'happy', '=]': 'happy', '8)': 'happy', '=)':\n 'happy', ':}': 'happy', ':^)': 'happy', ':?)': 'happy', ':-))': 'happy',\n '<:-P': 'happy', '<:P': 'happy', '<:-p': 'happy', '<:p': 'happy', ';;)':\n 'happy', 'J': 'happy', '<3': 'heart', '^5': 'high-five', '>_>^':\n 'high-five', '^<_<': 'high-five', ':*': 'kiss', ':*)': 'kiss', ':^*':\n 'kiss', '}{': 'kiss', \"('}{')\": 'kiss', ':-D': 'laughing', ':D':\n 'laughing', '8-D': 'laughing', '8D': 'laughing', 'x-D': 'laughing',\n 'xD': 'laughing', 'X-D': 'laughing', 'XD': 'laughing', '=-D':\n 'laughing', '=D': 'laughing', ';D': 'laughing', '-3': 'laughing', '3':\n 'laughing', 'B^D': 'laughing', 'D:<': 'laughing', 'D:': 'laughing',\n 'D8': 'laughing', 'D;': 'laughing', 'D=': 'laughing', 'DX': 'laughing',\n ':-B': 'nerd', '8-)': 'nerd', '8)': 'nerd', '</3': 'sad', \":'(\": 'sad',\n \":'-(\": 'sad', 'QQ': 'sad', 'L': 'sad', ':#': 'sealed mouth', ':-#':\n 'sealed mouth', ':-X': 'sealed mouth', ':-x': 'sealed mouth', ':X':\n 'sealed mouth', ':x': 'sealed mouth', '??': 'shooting star', '??':\n 'shooting star', '~?': 'shooting star', '>:O': 'suprprised/shocked',\n '>:o': 'suprprised/shocked', ':-O': 'suprprised/shocked', ':-o':\n 'suprprised/shocked', ':O': 'suprprised/shocked', ':o':\n 'suprprised/shocked', 'O_o': 'suprprised/shocked', 'o_O':\n 'suprprised/shocked', 'O.o': 'suprprised/shocked', 'o.O':\n 'suprprised/shocked', '(O_o)': 'suprprised/shocked', '(o_O)':\n 'suprprised/shocked', '(O.o)': 'suprprised/shocked', '(o.O)':\n 'suprprised/shocked', \":'-)\": 'tears of happines', \":')\":\n 'tears of happines', ':P': 'teasing/playful', ':p': 'teasing/playful',\n '>:P': 'teasing/playful', '>:p': 'teasing/playful', 'X-P':\n 'teasing/playful', 'x-p': 'teasing/playful', 'xp': 'teasing/playful',\n 'XP': 'teasing/playful', ':-P': 'teasing/playful', ':-p':\n 'teasing/playful', '=P': 'teasing/playful', '=P': 'teasing/playful',\n ':-?': 'teasing/playful', ':-b': 'teasing/playful', ':b':\n 'teasing/playful', ';)': 'wink', u'º)': 'wink', ';-)': 'wink', ';]':\n 'wink', u'^Ü^': 'happy'}\nspecial_tokens = EMOTICONS\n<mask token>\nEASY_WORDS = {u'ليا': [(Prefix(u'ل'), u'يا', Suffix(u''))], u'لي': [(Prefix\n (u'ل'), u'ي', Suffix(u''))], u'لكم': [(Prefix(u'ل'), u'كم', Suffix(u'')\n )], u'لكما': [(Prefix(u'ل'), u'كما', Suffix(u''))], u'له': [(Prefix(\n u'ل'), u'ه', Suffix(u''))], u'لها': [(Prefix(u'ل'), u'ها', Suffix(u''))\n ], u'لهم': [(Prefix(u'ل'), u'هم', Suffix(u''))], u'لهما': [(Prefix(u'ل'\n ), u'هما', Suffix(u''))], u'لهن': [(Prefix(u'ل'), u'هم', Suffix(u''))],\n u'بيا': [(Prefix(u'ب'), u'يا', Suffix(u''))], u'بي': [(Prefix(u'ب'),\n u'ي', Suffix(u''))], u'بك': [(Prefix(u'ب'), u'ك', Suffix(u''))], u'بكم':\n [(Prefix(u'ب'), u'كم', Suffix(u''))], u'بكما': [(Prefix(u'ب'), u'كما',\n Suffix(u''))], u'به': [(Prefix(u'ب'), u'ه', Suffix(u''))], u'بها': [(\n Prefix(u'ب'), u'ها', Suffix(u''))], u'بهما': [(Prefix(u'ب'), u'هما',\n Suffix(u''))], u'بهم': [(Prefix(u'ب'), u'هم', Suffix(u''))], u'بهن': [(\n Prefix(u'ب'), u'هن', Suffix(u''))], u'عليا': [(Prefix(u''), u'على',\n Suffix(u'يا'))], u'فيا': [(Prefix(u'ف'), u'يا', Suffix(u''))]}\nEMOTICONS_TAG = 'EMO'\nPUNCTUATION_TAG = 'PUNC'\nDIGIT_TAG = 'CD'\nNOTDEFINED_TAG = 'NN'\n",
"step-3": "import os.path\nPROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))\nEMOTICONS = {'O:)': 'angel', 'o:)': 'angel', 'O:-)': 'angel', 'o:-)':\n 'angel', 'o:-3': 'angel', 'o:3': 'angel', 'O;^)': 'angel', '>:[':\n 'annoyed/disappointed', ':-(': 'annoyed/disappointed', ':(':\n 'annoyed/disappointed', ':((': 'annoyed/disappointed', ':-((':\n 'annoyed/disappointed', ':-c': 'annoyed/disappointed', ':-<':\n 'annoyed/disappointed', ':?C': 'annoyed/disappointed', ':<':\n 'annoyed/disappointed', ':[': 'annoyed/disappointed', ':{':\n 'annoyed/disappointed', ':=||': 'annoyed/disappointed', ':@':\n 'annoyed/disappointed', '>:(': 'annoyed/disappointed', ':/':\n 'annoyed/disappointed', ':\\\\': 'annoyed/disappointed', '=/':\n 'annoyed/disappointed', '=\\\\': 'annoyed/disappointed', '>:/':\n 'annoyed/disappointed', '>:\\\\': 'annoyed/disappointed', ':S':\n 'annoyed/disappointed', ':s': 'annoyed/disappointed', ':-S':\n 'annoyed/disappointed', ':-s': 'annoyed/disappointed', ':|':\n 'annoyed/disappointed', ':-|': 'annoyed/disappointed', ':$':\n 'annoyed/disappointed', '?_?': 'annoyed/disappointed', '(>_<)':\n 'annoyed/disappointed', '>_<': 'annoyed/disappointed', '>__<':\n 'annoyed/disappointed', '(>__<)': 'annoyed/disappointed', '(-.-)':\n 'annoyed/disappointed', '(-_-)': 'annoyed/disappointed', '(._.)':\n 'annoyed/disappointed', '/:)': 'annoyed/disappointed', ':-$':\n 'annoyed/disappointed', '>:P': 'annoyed/disappointed', 'K':\n 'annoyed/disappointed', '3:)': 'devilish', '3:-)': 'devilish', '}:-)':\n 'devilish', '}:)': 'devilish', '>:)': 'devilish', 'B-)': 'happy', ':-)':\n 'happy', ':)': 'happy', ':o)': 'happy', ':]': 'happy', ':3': 'happy',\n ':c)': 'happy', ':>': 'happy', '=]': 'happy', '8)': 'happy', '=)':\n 'happy', ':}': 'happy', ':^)': 'happy', ':?)': 'happy', ':-))': 'happy',\n '<:-P': 'happy', '<:P': 'happy', '<:-p': 'happy', '<:p': 'happy', ';;)':\n 'happy', 'J': 'happy', '<3': 'heart', '^5': 'high-five', '>_>^':\n 'high-five', '^<_<': 'high-five', ':*': 'kiss', ':*)': 'kiss', ':^*':\n 'kiss', '}{': 'kiss', \"('}{')\": 'kiss', ':-D': 'laughing', ':D':\n 'laughing', '8-D': 'laughing', '8D': 'laughing', 'x-D': 'laughing',\n 'xD': 'laughing', 'X-D': 'laughing', 'XD': 'laughing', '=-D':\n 'laughing', '=D': 'laughing', ';D': 'laughing', '-3': 'laughing', '3':\n 'laughing', 'B^D': 'laughing', 'D:<': 'laughing', 'D:': 'laughing',\n 'D8': 'laughing', 'D;': 'laughing', 'D=': 'laughing', 'DX': 'laughing',\n ':-B': 'nerd', '8-)': 'nerd', '8)': 'nerd', '</3': 'sad', \":'(\": 'sad',\n \":'-(\": 'sad', 'QQ': 'sad', 'L': 'sad', ':#': 'sealed mouth', ':-#':\n 'sealed mouth', ':-X': 'sealed mouth', ':-x': 'sealed mouth', ':X':\n 'sealed mouth', ':x': 'sealed mouth', '??': 'shooting star', '??':\n 'shooting star', '~?': 'shooting star', '>:O': 'suprprised/shocked',\n '>:o': 'suprprised/shocked', ':-O': 'suprprised/shocked', ':-o':\n 'suprprised/shocked', ':O': 'suprprised/shocked', ':o':\n 'suprprised/shocked', 'O_o': 'suprprised/shocked', 'o_O':\n 'suprprised/shocked', 'O.o': 'suprprised/shocked', 'o.O':\n 'suprprised/shocked', '(O_o)': 'suprprised/shocked', '(o_O)':\n 'suprprised/shocked', '(O.o)': 'suprprised/shocked', '(o.O)':\n 'suprprised/shocked', \":'-)\": 'tears of happines', \":')\":\n 'tears of happines', ':P': 'teasing/playful', ':p': 'teasing/playful',\n '>:P': 'teasing/playful', '>:p': 'teasing/playful', 'X-P':\n 'teasing/playful', 'x-p': 'teasing/playful', 'xp': 'teasing/playful',\n 'XP': 'teasing/playful', ':-P': 'teasing/playful', ':-p':\n 'teasing/playful', '=P': 'teasing/playful', '=P': 'teasing/playful',\n ':-?': 'teasing/playful', ':-b': 'teasing/playful', ':b':\n 'teasing/playful', ';)': 'wink', u'º)': 'wink', ';-)': 'wink', ';]':\n 'wink', u'^Ü^': 'happy'}\nspecial_tokens = EMOTICONS\nfrom DAPOS.data.variation import Prefix, Suffix\nEASY_WORDS = {u'ليا': [(Prefix(u'ل'), u'يا', Suffix(u''))], u'لي': [(Prefix\n (u'ل'), u'ي', Suffix(u''))], u'لكم': [(Prefix(u'ل'), u'كم', Suffix(u'')\n )], u'لكما': [(Prefix(u'ل'), u'كما', Suffix(u''))], u'له': [(Prefix(\n u'ل'), u'ه', Suffix(u''))], u'لها': [(Prefix(u'ل'), u'ها', Suffix(u''))\n ], u'لهم': [(Prefix(u'ل'), u'هم', Suffix(u''))], u'لهما': [(Prefix(u'ل'\n ), u'هما', Suffix(u''))], u'لهن': [(Prefix(u'ل'), u'هم', Suffix(u''))],\n u'بيا': [(Prefix(u'ب'), u'يا', Suffix(u''))], u'بي': [(Prefix(u'ب'),\n u'ي', Suffix(u''))], u'بك': [(Prefix(u'ب'), u'ك', Suffix(u''))], u'بكم':\n [(Prefix(u'ب'), u'كم', Suffix(u''))], u'بكما': [(Prefix(u'ب'), u'كما',\n Suffix(u''))], u'به': [(Prefix(u'ب'), u'ه', Suffix(u''))], u'بها': [(\n Prefix(u'ب'), u'ها', Suffix(u''))], u'بهما': [(Prefix(u'ب'), u'هما',\n Suffix(u''))], u'بهم': [(Prefix(u'ب'), u'هم', Suffix(u''))], u'بهن': [(\n Prefix(u'ب'), u'هن', Suffix(u''))], u'عليا': [(Prefix(u''), u'على',\n Suffix(u'يا'))], u'فيا': [(Prefix(u'ف'), u'يا', Suffix(u''))]}\nEMOTICONS_TAG = 'EMO'\nPUNCTUATION_TAG = 'PUNC'\nDIGIT_TAG = 'CD'\nNOTDEFINED_TAG = 'NN'\n",
"step-4": "# coding: UTF-8 -*-\nimport os.path\n\nPROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))\n\nEMOTICONS = {\n \"O:)\": \"angel\",\n \"o:)\": \"angel\",\n \"O:-)\": \"angel\",\n \"o:-)\": \"angel\",\n \"o:-3\": \"angel\",\n \"o:3\": \"angel\",\n \"O;^)\": \"angel\",\n \">:[\": \"annoyed/disappointed\",\n \":-(\": \"annoyed/disappointed\",\n \":(\": \"annoyed/disappointed\",\n \":((\": \"annoyed/disappointed\",\n \":-((\": \"annoyed/disappointed\",\n \":-c\": \"annoyed/disappointed\",\n \":-<\": \"annoyed/disappointed\",\n \":?C\": \"annoyed/disappointed\",\n \":<\": \"annoyed/disappointed\",\n \":[\": \"annoyed/disappointed\",\n \":{\": \"annoyed/disappointed\",\n \":=||\": \"annoyed/disappointed\",\n \":@\": \"annoyed/disappointed\",\n \">:(\": \"annoyed/disappointed\",\n \":/\": \"annoyed/disappointed\",\n \":\\\\\": \"annoyed/disappointed\",\n \"=/\": \"annoyed/disappointed\",\n \"=\\\\\": \"annoyed/disappointed\",\n \">:/\": \"annoyed/disappointed\",\n \">:\\\\\": \"annoyed/disappointed\",\n \":S\": \"annoyed/disappointed\",\n \":s\": \"annoyed/disappointed\",\n \":-S\": \"annoyed/disappointed\",\n \":-s\": \"annoyed/disappointed\",\n \":|\": \"annoyed/disappointed\",\n \":-|\": \"annoyed/disappointed\",\n \":$\": \"annoyed/disappointed\",\n \"?_?\": \"annoyed/disappointed\",\n \"(>_<)\": \"annoyed/disappointed\",\n \">_<\": \"annoyed/disappointed\",\n \">__<\": \"annoyed/disappointed\",\n \"(>__<)\": \"annoyed/disappointed\",\n \"(-.-)\": \"annoyed/disappointed\",\n \"(-_-)\": \"annoyed/disappointed\",\n \"(._.)\": \"annoyed/disappointed\",\n \"/:)\": \"annoyed/disappointed\",\n \":-$\": \"annoyed/disappointed\",\n \">:P\": \"annoyed/disappointed\",\n \"K\": \"annoyed/disappointed\",\n \"3:)\": \"devilish\",\n \"3:-)\": \"devilish\",\n \"}:-)\": \"devilish\",\n \"}:)\": \"devilish\",\n \">:)\": \"devilish\",\n \"B-)\": \"happy\",\n \":-)\": \"happy\",\n \":)\": \"happy\",\n \":o)\": \"happy\",\n \":]\": \"happy\",\n \":3\": \"happy\",\n \":c)\": \"happy\",\n \":>\": \"happy\",\n \"=]\": \"happy\",\n \"8)\": \"happy\",\n \"=)\": \"happy\",\n \":}\": \"happy\",\n \":^)\": \"happy\",\n \":?)\": \"happy\",\n \":-))\": \"happy\",\n \"<:-P\": \"happy\",\n \"<:P\": \"happy\",\n \"<:-p\": \"happy\",\n \"<:p\": \"happy\",\n \";;)\": \"happy\",\n \"J\": \"happy\",\n \"<3\": \"heart\",\n \"^5\": \"high-five\",\n \">_>^\": \"high-five\",\n \"^<_<\": \"high-five\",\n \":*\": \"kiss\",\n \":*)\": \"kiss\",\n \":^*\": \"kiss\",\n \"}{\": \"kiss\",\n \"('}{')\": \"kiss\",\n \":-D\": \"laughing\",\n \":D\": \"laughing\",\n \"8-D\": \"laughing\",\n \"8D\": \"laughing\",\n \"x-D\": \"laughing\",\n \"xD\": \"laughing\",\n \"X-D\": \"laughing\",\n \"XD\": \"laughing\",\n \"=-D\": \"laughing\",\n \"=D\": \"laughing\",\n \";D\": \"laughing\",\n \"-3\": \"laughing\",\n \"3\": \"laughing\",\n \"B^D\": \"laughing\",\n \"D:<\": \"laughing\",\n \"D:\": \"laughing\",\n \"D8\": \"laughing\",\n \"D;\": \"laughing\",\n \"D=\": \"laughing\",\n \"DX\": \"laughing\",\n \":-B\": \"nerd\",\n \"8-)\": \"nerd\",\n \"8)\": \"nerd\",\n \"</3\": \"sad\",\n \":'(\": \"sad\",\n \":'-(\": \"sad\",\n \"QQ\": \"sad\",\n \"L\": \"sad\",\n \":#\": \"sealed mouth\",\n \":-#\": \"sealed mouth\",\n \":-X\": \"sealed mouth\",\n \":-x\": \"sealed mouth\",\n \":X\": \"sealed mouth\",\n \":x\": \"sealed mouth\",\n \"??\": \"shooting star\",\n \"??\": \"shooting star\",\n \"~?\": \"shooting star\",\n \">:O\": \"suprprised/shocked\",\n \">:o\": \"suprprised/shocked\",\n \":-O\": \"suprprised/shocked\",\n \":-o\": \"suprprised/shocked\",\n \":O\": \"suprprised/shocked\",\n \":o\": \"suprprised/shocked\",\n \"O_o\": \"suprprised/shocked\",\n \"o_O\": \"suprprised/shocked\",\n \"O.o\": \"suprprised/shocked\",\n \"o.O\": \"suprprised/shocked\",\n \"(O_o)\": \"suprprised/shocked\",\n \"(o_O)\": \"suprprised/shocked\",\n \"(O.o)\": \"suprprised/shocked\",\n \"(o.O)\": \"suprprised/shocked\",\n \":'-)\": \"tears of happines\",\n \":')\": \"tears of happines\",\n \":P\": \"teasing/playful\",\n \":p\": \"teasing/playful\",\n \">:P\": \"teasing/playful\",\n \">:p\": \"teasing/playful\",\n \"X-P\": \"teasing/playful\",\n \"x-p\": \"teasing/playful\",\n \"xp\": \"teasing/playful\",\n \"XP\": \"teasing/playful\",\n \":-P\": \"teasing/playful\",\n \":-p\": \"teasing/playful\",\n \"=P\": \"teasing/playful\",\n \"=P\": \"teasing/playful\",\n \":-?\": \"teasing/playful\",\n \":-b\": \"teasing/playful\",\n \":b\": \"teasing/playful\",\n \";)\": \"wink\",\n u\"º)\": \"wink\",\n \";-)\": \"wink\",\n \";]\": \"wink\",\n u\"^Ü^\": \"happy\",\n}\n\nspecial_tokens = EMOTICONS\n\nfrom DAPOS.data.variation import Prefix, Suffix\n\nEASY_WORDS = {\n u\"ليا\": [(Prefix(u\"ل\"), u\"يا\", Suffix(u\"\"))],\n u\"لي\": [(Prefix(u\"ل\"), u\"ي\", Suffix(u\"\"))],\n u\"لكم\": [(Prefix(u\"ل\"), u\"كم\", Suffix(u\"\"))],\n u\"لكما\": [(Prefix(u\"ل\"), u\"كما\", Suffix(u\"\"))],\n u\"له\": [(Prefix(u\"ل\"), u\"ه\", Suffix(u\"\"))],\n u\"لها\": [(Prefix(u\"ل\"), u\"ها\", Suffix(u\"\"))],\n u\"لهم\": [(Prefix(u\"ل\"), u\"هم\", Suffix(u\"\"))],\n u\"لهما\": [(Prefix(u\"ل\"), u\"هما\", Suffix(u\"\"))],\n u\"لهن\": [(Prefix(u\"ل\"), u\"هم\", Suffix(u\"\"))],\n u\"بيا\": [(Prefix(u\"ب\"), u\"يا\", Suffix(u\"\"))],\n u\"بي\": [(Prefix(u\"ب\"), u\"ي\", Suffix(u\"\"))],\n u\"بك\": [(Prefix(u\"ب\"), u\"ك\", Suffix(u\"\"))],\n u\"بكم\": [(Prefix(u\"ب\"), u\"كم\", Suffix(u\"\"))],\n u\"بكما\": [(Prefix(u\"ب\"), u\"كما\", Suffix(u\"\"))],\n u\"به\": [(Prefix(u\"ب\"), u\"ه\", Suffix(u\"\"))],\n u\"بها\": [(Prefix(u\"ب\"), u\"ها\", Suffix(u\"\"))],\n u\"بهما\": [(Prefix(u\"ب\"), u\"هما\", Suffix(u\"\"))],\n u\"بهم\": [(Prefix(u\"ب\"), u\"هم\", Suffix(u\"\"))],\n u\"بهن\": [(Prefix(u\"ب\"), u\"هن\", Suffix(u\"\"))],\n u\"عليا\": [(Prefix(u\"\"), u\"على\", Suffix(u\"يا\"))],\n u\"فيا\": [(Prefix(u\"ف\"), u\"يا\", Suffix(u\"\"))],\n}\n\n\nEMOTICONS_TAG = 'EMO'\nPUNCTUATION_TAG = 'PUNC'\nDIGIT_TAG = 'CD'\nNOTDEFINED_TAG = 'NN'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class GraphWalkAgent(nn.Module):
def __init__(self, args):
super(GraphWalkAgent, self).__init__()
self.model = args.model
self.relation_only = args.relation_only
self.history_dim = args.history_dim
self.history_num_layers = args.history_num_layers
self.entity_dim = args.entity_dim
self.relation_dim = args.relation_dim
if self.relation_only:
self.action_dim = args.relation_dim
else:
self.action_dim = args.entity_dim + args.relation_dim
self.ff_dropout_rate = args.ff_dropout_rate
self.rnn_dropout_rate = args.rnn_dropout_rate
self.action_dropout_rate = args.action_dropout_rate
self.xavier_initialization = args.xavier_initialization
self.relation_only_in_path = args.relation_only_in_path
self.path = None
self.define_modules()
self.initialize_modules()
self.fn = None
self.fn_kg = None
<|reserved_special_token_0|>
def encode_history(self, current_entity, e_s, kg, query_relation):
embedded_q_rel = kg.get_relation_embeddings(query_relation)
encoded_history = self.path[-1][0][-1, :, :]
if self.relation_only:
X = torch.cat([encoded_history, embedded_q_rel], dim=-1)
elif self.relation_only_in_path:
E_s = kg.get_entity_embeddings(e_s)
E = kg.get_entity_embeddings(current_entity)
X = torch.cat([E, encoded_history, E_s, embedded_q_rel], dim=-1)
else:
E = kg.get_entity_embeddings(current_entity)
X = torch.cat([E, encoded_history, embedded_q_rel], dim=-1)
return X
<|reserved_special_token_0|>
def initialize_path(self, action: Action, kg: KnowledgeGraph):
if self.relation_only_in_path:
init_action_embedding = kg.get_relation_embeddings(action.rel)
else:
init_action_embedding = self.get_action_embedding(action, kg)
init_action_embedding.unsqueeze_(1)
init_h = zeros_var_cuda([self.history_num_layers, len(
init_action_embedding), self.history_dim])
init_c = zeros_var_cuda([self.history_num_layers, len(
init_action_embedding), self.history_dim])
self.path = [self.path_encoder(init_action_embedding, (init_h,
init_c))[1]]
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_ground_truth_edge_mask(self, current_nodes, r_space, e_space,
obs: Observation, kg: KnowledgeGraph):
s_e = obs.source_entity
t_e = obs.target_entity
q = obs.query_relation
def build_mask(source_nodes, target_nodes, relation):
return (current_nodes == source_nodes).unsqueeze(1) * (r_space ==
relation.unsqueeze(1)) * (e_space == target_nodes.unsqueeze(1))
mask = build_mask(s_e, t_e, q)
inv_q = kg.get_inv_relation_id(q)
inv_mask = build_mask(t_e, s_e, inv_q)
return ((mask + inv_mask) * (s_e.unsqueeze(1) != kg.dummy_e)).float()
def get_answer_mask(self, e_space, e_s, q, kg: KnowledgeGraph):
if kg.args.mask_test_false_negatives:
answer_vectors = kg.all_object_vectors
else:
answer_vectors = kg.train_object_vectors
answer_masks = []
for i in range(len(e_space)):
_e_s, _q = int(e_s[i]), int(q[i])
if not _e_s in answer_vectors or not _q in answer_vectors[_e_s]:
answer_vector = var_cuda(torch.LongTensor([[kg.num_entities]]))
else:
answer_vector = answer_vectors[_e_s][_q]
answer_mask = torch.sum(e_space[i].unsqueeze(0) ==
answer_vector, dim=0).long()
answer_masks.append(answer_mask)
answer_mask = torch.cat(answer_masks).view(len(e_space), -1)
return answer_mask
def get_false_negative_mask(self, e_space, e_s, q, e_t, kg: KnowledgeGraph
):
answer_mask = self.get_answer_mask(e_space, e_s, q, kg)
false_negative_mask = (answer_mask * (e_space != e_t.unsqueeze(1)).
long()).float()
return false_negative_mask
def validate_action_mask(self, action_mask):
action_mask_min = action_mask.min()
action_mask_max = action_mask.max()
assert action_mask_min == 0 or action_mask_min == 1
assert action_mask_max == 0 or action_mask_max == 1
<|reserved_special_token_0|>
def define_modules(self):
if self.relation_only:
input_dim = self.history_dim + self.relation_dim
elif self.relation_only_in_path:
input_dim = (self.history_dim + self.entity_dim * 2 + self.
relation_dim)
else:
input_dim = self.history_dim + self.entity_dim + self.relation_dim
self.W1 = nn.Linear(input_dim, self.action_dim)
self.W2 = nn.Linear(self.action_dim, self.action_dim)
self.W1Dropout = nn.Dropout(p=self.ff_dropout_rate)
self.W2Dropout = nn.Dropout(p=self.ff_dropout_rate)
if self.relation_only_in_path:
self.path_encoder = nn.LSTM(input_size=self.relation_dim,
hidden_size=self.history_dim, num_layers=self.
history_num_layers, batch_first=True)
else:
self.path_encoder = nn.LSTM(input_size=self.action_dim,
hidden_size=self.history_dim, num_layers=self.
history_num_layers, batch_first=True)
def initialize_modules(self):
if self.xavier_initialization:
nn.init.xavier_uniform_(self.W1.weight)
nn.init.xavier_uniform_(self.W2.weight)
for name, param in self.path_encoder.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.xavier_normal_(param)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GraphWalkAgent(nn.Module):
def __init__(self, args):
super(GraphWalkAgent, self).__init__()
self.model = args.model
self.relation_only = args.relation_only
self.history_dim = args.history_dim
self.history_num_layers = args.history_num_layers
self.entity_dim = args.entity_dim
self.relation_dim = args.relation_dim
if self.relation_only:
self.action_dim = args.relation_dim
else:
self.action_dim = args.entity_dim + args.relation_dim
self.ff_dropout_rate = args.ff_dropout_rate
self.rnn_dropout_rate = args.rnn_dropout_rate
self.action_dropout_rate = args.action_dropout_rate
self.xavier_initialization = args.xavier_initialization
self.relation_only_in_path = args.relation_only_in_path
self.path = None
self.define_modules()
self.initialize_modules()
self.fn = None
self.fn_kg = None
def transit(self, current_entity, obs: Observation, kg: KnowledgeGraph,
use_action_space_bucketing=True, merge_aspace_batching_outcome=False
) ->BucketActions:
"""
Compute the next action distribution based on
(a) the current node (entity) in KG and the query relation
(b) action history representation
:param current_entity: agent location (node) at step t.
:param obs: agent observation at step t.
e_s: source node
query_relation: query relation
last_step: If set, the agent is carrying out the last step.
last_r: label of edge traversed in the previous step
seen_nodes: notes seen on the paths
:param kg: Knowledge graph environment.
:param use_action_space_bucketing: If set, group the action space of different nodes
into buckets by their sizes.
:param merge_aspace_batch_outcome: If set, merge the transition probability distribution
generated of different action space bucket into a single batch.
:return
With aspace batching and without merging the outcomes:
db_outcomes: (Dynamic Batch) (action_space, action_dist)
action_space: (Batch) padded possible action indices
action_dist: (Batch) distribution over actions.
inv_offset: Indices to set the dynamic batching output back to the original order.
entropy: (Batch) entropy of action distribution.
Else:
action_dist: (Batch) distribution over actions.
entropy: (Batch) entropy of action distribution.
"""
X = self.encode_history(current_entity, obs.source_entity, kg, obs.
query_relation)
X = self.W1(X)
X = F.relu(X)
X = self.W1Dropout(X)
X = self.W2(X)
X2 = self.W2Dropout(X)
def policy_nn_fun(X2, acs: ActionSpace):
A = self.get_action_embedding(Action(acs.r_space, acs.e_space), kg)
action_dist = F.softmax(torch.squeeze(A @ torch.unsqueeze(X2, 2
), 2) - (1 - acs.action_mask) * ops.HUGE_INT, dim=-1)
return action_dist, ops.entropy(action_dist)
if use_action_space_bucketing:
action = self.do_it_with_bucketing(X2, current_entity, kg,
merge_aspace_batching_outcome, obs, policy_nn_fun)
else:
assert False
action = self.do_it_without_bucketing(X2, current_entity, kg,
obs, policy_nn_fun)
return action
def encode_history(self, current_entity, e_s, kg, query_relation):
embedded_q_rel = kg.get_relation_embeddings(query_relation)
encoded_history = self.path[-1][0][-1, :, :]
if self.relation_only:
X = torch.cat([encoded_history, embedded_q_rel], dim=-1)
elif self.relation_only_in_path:
E_s = kg.get_entity_embeddings(e_s)
E = kg.get_entity_embeddings(current_entity)
X = torch.cat([E, encoded_history, E_s, embedded_q_rel], dim=-1)
else:
E = kg.get_entity_embeddings(current_entity)
X = torch.cat([E, encoded_history, embedded_q_rel], dim=-1)
return X
<|reserved_special_token_0|>
def initialize_path(self, action: Action, kg: KnowledgeGraph):
if self.relation_only_in_path:
init_action_embedding = kg.get_relation_embeddings(action.rel)
else:
init_action_embedding = self.get_action_embedding(action, kg)
init_action_embedding.unsqueeze_(1)
init_h = zeros_var_cuda([self.history_num_layers, len(
init_action_embedding), self.history_dim])
init_c = zeros_var_cuda([self.history_num_layers, len(
init_action_embedding), self.history_dim])
self.path = [self.path_encoder(init_action_embedding, (init_h,
init_c))[1]]
def update_path(self, action: Action, kg: KnowledgeGraph, offset=None):
"""
Once an action was selected, update the action history.
:param action (r, e): (Variable:batch) indices of the most recent action
- r is the most recently traversed edge;
- e is the destination entity.
:param offset: (Variable:batch) if None, adjust path history with the given offset, used for search
:param KG: Knowledge graph environment.
"""
def offset_path_history(p, offset):
for i, x in enumerate(p):
if type(x) is tuple:
new_tuple = tuple([_x[:, offset, :] for _x in x])
p[i] = new_tuple
else:
p[i] = x[offset, :]
if self.relation_only_in_path:
action_embedding = kg.get_relation_embeddings(action.rel)
else:
action_embedding = self.get_action_embedding(action, kg)
if offset is not None:
offset_path_history(self.path, offset)
self.path.append(self.path_encoder(action_embedding.unsqueeze(1),
self.path[-1])[1])
<|reserved_special_token_0|>
def apply_action_masks(self, acsp: ActionSpace, e, obs: Observation, kg:
KnowledgeGraph):
r_space, e_space, action_mask = (acsp.r_space, acsp.e_space, acsp.
action_mask)
e_s, q, e_t, last_step, last_r, seen_nodes = obs
ground_truth_edge_mask = self.get_ground_truth_edge_mask(e, r_space,
e_space, obs, kg)
action_mask -= ground_truth_edge_mask
self.validate_action_mask(action_mask)
if last_step:
false_negative_mask = self.get_false_negative_mask(e_space, e_s,
q, e_t, kg)
action_mask *= 1 - false_negative_mask
self.validate_action_mask(action_mask)
return action_mask
def get_ground_truth_edge_mask(self, current_nodes, r_space, e_space,
obs: Observation, kg: KnowledgeGraph):
s_e = obs.source_entity
t_e = obs.target_entity
q = obs.query_relation
def build_mask(source_nodes, target_nodes, relation):
return (current_nodes == source_nodes).unsqueeze(1) * (r_space ==
relation.unsqueeze(1)) * (e_space == target_nodes.unsqueeze(1))
mask = build_mask(s_e, t_e, q)
inv_q = kg.get_inv_relation_id(q)
inv_mask = build_mask(t_e, s_e, inv_q)
return ((mask + inv_mask) * (s_e.unsqueeze(1) != kg.dummy_e)).float()
def get_answer_mask(self, e_space, e_s, q, kg: KnowledgeGraph):
if kg.args.mask_test_false_negatives:
answer_vectors = kg.all_object_vectors
else:
answer_vectors = kg.train_object_vectors
answer_masks = []
for i in range(len(e_space)):
_e_s, _q = int(e_s[i]), int(q[i])
if not _e_s in answer_vectors or not _q in answer_vectors[_e_s]:
answer_vector = var_cuda(torch.LongTensor([[kg.num_entities]]))
else:
answer_vector = answer_vectors[_e_s][_q]
answer_mask = torch.sum(e_space[i].unsqueeze(0) ==
answer_vector, dim=0).long()
answer_masks.append(answer_mask)
answer_mask = torch.cat(answer_masks).view(len(e_space), -1)
return answer_mask
def get_false_negative_mask(self, e_space, e_s, q, e_t, kg: KnowledgeGraph
):
answer_mask = self.get_answer_mask(e_space, e_s, q, kg)
false_negative_mask = (answer_mask * (e_space != e_t.unsqueeze(1)).
long()).float()
return false_negative_mask
def validate_action_mask(self, action_mask):
action_mask_min = action_mask.min()
action_mask_max = action_mask.max()
assert action_mask_min == 0 or action_mask_min == 1
assert action_mask_max == 0 or action_mask_max == 1
<|reserved_special_token_0|>
def define_modules(self):
if self.relation_only:
input_dim = self.history_dim + self.relation_dim
elif self.relation_only_in_path:
input_dim = (self.history_dim + self.entity_dim * 2 + self.
relation_dim)
else:
input_dim = self.history_dim + self.entity_dim + self.relation_dim
self.W1 = nn.Linear(input_dim, self.action_dim)
self.W2 = nn.Linear(self.action_dim, self.action_dim)
self.W1Dropout = nn.Dropout(p=self.ff_dropout_rate)
self.W2Dropout = nn.Dropout(p=self.ff_dropout_rate)
if self.relation_only_in_path:
self.path_encoder = nn.LSTM(input_size=self.relation_dim,
hidden_size=self.history_dim, num_layers=self.
history_num_layers, batch_first=True)
else:
self.path_encoder = nn.LSTM(input_size=self.action_dim,
hidden_size=self.history_dim, num_layers=self.
history_num_layers, batch_first=True)
def initialize_modules(self):
if self.xavier_initialization:
nn.init.xavier_uniform_(self.W1.weight)
nn.init.xavier_uniform_(self.W2.weight)
for name, param in self.path_encoder.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.xavier_normal_(param)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GraphWalkAgent(nn.Module):
def __init__(self, args):
super(GraphWalkAgent, self).__init__()
self.model = args.model
self.relation_only = args.relation_only
self.history_dim = args.history_dim
self.history_num_layers = args.history_num_layers
self.entity_dim = args.entity_dim
self.relation_dim = args.relation_dim
if self.relation_only:
self.action_dim = args.relation_dim
else:
self.action_dim = args.entity_dim + args.relation_dim
self.ff_dropout_rate = args.ff_dropout_rate
self.rnn_dropout_rate = args.rnn_dropout_rate
self.action_dropout_rate = args.action_dropout_rate
self.xavier_initialization = args.xavier_initialization
self.relation_only_in_path = args.relation_only_in_path
self.path = None
self.define_modules()
self.initialize_modules()
self.fn = None
self.fn_kg = None
def transit(self, current_entity, obs: Observation, kg: KnowledgeGraph,
use_action_space_bucketing=True, merge_aspace_batching_outcome=False
) ->BucketActions:
"""
Compute the next action distribution based on
(a) the current node (entity) in KG and the query relation
(b) action history representation
:param current_entity: agent location (node) at step t.
:param obs: agent observation at step t.
e_s: source node
query_relation: query relation
last_step: If set, the agent is carrying out the last step.
last_r: label of edge traversed in the previous step
seen_nodes: notes seen on the paths
:param kg: Knowledge graph environment.
:param use_action_space_bucketing: If set, group the action space of different nodes
into buckets by their sizes.
:param merge_aspace_batch_outcome: If set, merge the transition probability distribution
generated of different action space bucket into a single batch.
:return
With aspace batching and without merging the outcomes:
db_outcomes: (Dynamic Batch) (action_space, action_dist)
action_space: (Batch) padded possible action indices
action_dist: (Batch) distribution over actions.
inv_offset: Indices to set the dynamic batching output back to the original order.
entropy: (Batch) entropy of action distribution.
Else:
action_dist: (Batch) distribution over actions.
entropy: (Batch) entropy of action distribution.
"""
X = self.encode_history(current_entity, obs.source_entity, kg, obs.
query_relation)
X = self.W1(X)
X = F.relu(X)
X = self.W1Dropout(X)
X = self.W2(X)
X2 = self.W2Dropout(X)
def policy_nn_fun(X2, acs: ActionSpace):
A = self.get_action_embedding(Action(acs.r_space, acs.e_space), kg)
action_dist = F.softmax(torch.squeeze(A @ torch.unsqueeze(X2, 2
), 2) - (1 - acs.action_mask) * ops.HUGE_INT, dim=-1)
return action_dist, ops.entropy(action_dist)
if use_action_space_bucketing:
action = self.do_it_with_bucketing(X2, current_entity, kg,
merge_aspace_batching_outcome, obs, policy_nn_fun)
else:
assert False
action = self.do_it_without_bucketing(X2, current_entity, kg,
obs, policy_nn_fun)
return action
def encode_history(self, current_entity, e_s, kg, query_relation):
embedded_q_rel = kg.get_relation_embeddings(query_relation)
encoded_history = self.path[-1][0][-1, :, :]
if self.relation_only:
X = torch.cat([encoded_history, embedded_q_rel], dim=-1)
elif self.relation_only_in_path:
E_s = kg.get_entity_embeddings(e_s)
E = kg.get_entity_embeddings(current_entity)
X = torch.cat([E, encoded_history, E_s, embedded_q_rel], dim=-1)
else:
E = kg.get_entity_embeddings(current_entity)
X = torch.cat([E, encoded_history, embedded_q_rel], dim=-1)
return X
def do_it_with_bucketing(self, X2, current_entity, kg,
merge_aspace_batching_outcome, obs: Observation, policy_nn_fun):
entropy_list = []
references = []
buckect_action_spaces, inthis_bucket_indizes = (self.
get_action_space_in_buckets(current_entity, obs, kg))
action_spaces = []
action_dists = []
for as_b, inthis_bucket in zip(buckect_action_spaces,
inthis_bucket_indizes):
X2_b = X2[inthis_bucket, :]
action_dist_b, entropy_b = policy_nn_fun(X2_b, as_b)
references.extend(inthis_bucket)
action_spaces.append(as_b)
action_dists.append(action_dist_b)
entropy_list.append(entropy_b)
inv_offset = [i for i, _ in sorted(enumerate(references), key=lambda
x: x[1])]
entropy = torch.cat(entropy_list, dim=0)[inv_offset]
action = BucketActions(action_spaces, action_dists, inv_offset, entropy
)
if merge_aspace_batching_outcome:
action_space = pad_and_cat_action_space(buckect_action_spaces,
inv_offset, kg)
action_dist = ops.pad_and_cat(action.action_dists, padding_value=0
)[inv_offset]
action = BucketActions([action_space], [action_dist], None, entropy
)
return action
def initialize_path(self, action: Action, kg: KnowledgeGraph):
if self.relation_only_in_path:
init_action_embedding = kg.get_relation_embeddings(action.rel)
else:
init_action_embedding = self.get_action_embedding(action, kg)
init_action_embedding.unsqueeze_(1)
init_h = zeros_var_cuda([self.history_num_layers, len(
init_action_embedding), self.history_dim])
init_c = zeros_var_cuda([self.history_num_layers, len(
init_action_embedding), self.history_dim])
self.path = [self.path_encoder(init_action_embedding, (init_h,
init_c))[1]]
def update_path(self, action: Action, kg: KnowledgeGraph, offset=None):
"""
Once an action was selected, update the action history.
:param action (r, e): (Variable:batch) indices of the most recent action
- r is the most recently traversed edge;
- e is the destination entity.
:param offset: (Variable:batch) if None, adjust path history with the given offset, used for search
:param KG: Knowledge graph environment.
"""
def offset_path_history(p, offset):
for i, x in enumerate(p):
if type(x) is tuple:
new_tuple = tuple([_x[:, offset, :] for _x in x])
p[i] = new_tuple
else:
p[i] = x[offset, :]
if self.relation_only_in_path:
action_embedding = kg.get_relation_embeddings(action.rel)
else:
action_embedding = self.get_action_embedding(action, kg)
if offset is not None:
offset_path_history(self.path, offset)
self.path.append(self.path_encoder(action_embedding.unsqueeze(1),
self.path[-1])[1])
def get_action_space_in_buckets(self, current_entity: torch.Tensor, obs:
Observation, kg: KnowledgeGraph, collapse_entities=False):
"""
To compute the search operation in batch, we group the action spaces of different states
(i.e. the set of outgoing edges of different nodes) into buckets based on their sizes to
save the memory consumption of paddings.
For example, in large knowledge graphs, certain nodes may have thousands of outgoing
edges while a long tail of nodes only have a small amount of outgoing edges. If a batch
contains a node with 1000 outgoing edges while the rest of the nodes have a maximum of
5 outgoing edges, we need to pad the action spaces of all nodes to 1000, which consumes
lots of memory.
With the bucketing approach, each bucket is padded separately. In this case the node
with 1000 outgoing edges will be in its own bucket and the rest of the nodes will suffer
little from padding the action space to 5.
Once we grouped the action spaces in buckets, the policy network computation is carried
out for every bucket iteratively. Once all the computation is done, we concatenate the
results of all buckets and restore their original order in the batch. The computation
outside the policy network module is thus unaffected.
:return db_action_spaces:
[((r_space_b0, r_space_b0), action_mask_b0),
((r_space_b1, r_space_b1), action_mask_b1),
...
((r_space_bn, r_space_bn), action_mask_bn)]
A list of action space tensor representations grouped in n buckets, s.t.
r_space_b0.size(0) + r_space_b1.size(0) + ... + r_space_bn.size(0) = e.size(0)
:return db_references:
[l_batch_refs0, l_batch_refs1, ..., l_batch_refsn]
l_batch_refsi stores the indices of the examples in bucket i in the current batch,
which is used later to restore the output results to the original order.
"""
db_action_spaces, db_references = [], []
assert not collapse_entities
bucket_ids, inbucket_ids = kg.get_bucket_and_inbucket_ids(
current_entity)
for b_key in set(bucket_ids.tolist()):
inthisbucket_indices = torch.nonzero(bucket_ids.eq(b_key)).squeeze(
).tolist()
if not isinstance(inthisbucket_indices, list):
inthisbucket_indices = [inthisbucket_indices]
inbucket_ids_of_entities_inthisbucket = inbucket_ids[
inthisbucket_indices].tolist()
bucket_action_space = kg.bucketid2ActionSpace[b_key]
e_b = current_entity[inthisbucket_indices]
obs_b = obs.get_slice(inthisbucket_indices)
as_bucket = bucket_action_space.get_slice(
inbucket_ids_of_entities_inthisbucket)
action_mask = self.apply_action_masks(as_bucket, e_b, obs_b, kg)
action_space_b = ActionSpace(as_bucket.forks, as_bucket.r_space,
as_bucket.e_space, action_mask)
db_action_spaces.append(action_space_b)
db_references.append(inthisbucket_indices)
return db_action_spaces, db_references
def apply_action_masks(self, acsp: ActionSpace, e, obs: Observation, kg:
KnowledgeGraph):
r_space, e_space, action_mask = (acsp.r_space, acsp.e_space, acsp.
action_mask)
e_s, q, e_t, last_step, last_r, seen_nodes = obs
ground_truth_edge_mask = self.get_ground_truth_edge_mask(e, r_space,
e_space, obs, kg)
action_mask -= ground_truth_edge_mask
self.validate_action_mask(action_mask)
if last_step:
false_negative_mask = self.get_false_negative_mask(e_space, e_s,
q, e_t, kg)
action_mask *= 1 - false_negative_mask
self.validate_action_mask(action_mask)
return action_mask
def get_ground_truth_edge_mask(self, current_nodes, r_space, e_space,
obs: Observation, kg: KnowledgeGraph):
s_e = obs.source_entity
t_e = obs.target_entity
q = obs.query_relation
def build_mask(source_nodes, target_nodes, relation):
return (current_nodes == source_nodes).unsqueeze(1) * (r_space ==
relation.unsqueeze(1)) * (e_space == target_nodes.unsqueeze(1))
mask = build_mask(s_e, t_e, q)
inv_q = kg.get_inv_relation_id(q)
inv_mask = build_mask(t_e, s_e, inv_q)
return ((mask + inv_mask) * (s_e.unsqueeze(1) != kg.dummy_e)).float()
def get_answer_mask(self, e_space, e_s, q, kg: KnowledgeGraph):
if kg.args.mask_test_false_negatives:
answer_vectors = kg.all_object_vectors
else:
answer_vectors = kg.train_object_vectors
answer_masks = []
for i in range(len(e_space)):
_e_s, _q = int(e_s[i]), int(q[i])
if not _e_s in answer_vectors or not _q in answer_vectors[_e_s]:
answer_vector = var_cuda(torch.LongTensor([[kg.num_entities]]))
else:
answer_vector = answer_vectors[_e_s][_q]
answer_mask = torch.sum(e_space[i].unsqueeze(0) ==
answer_vector, dim=0).long()
answer_masks.append(answer_mask)
answer_mask = torch.cat(answer_masks).view(len(e_space), -1)
return answer_mask
def get_false_negative_mask(self, e_space, e_s, q, e_t, kg: KnowledgeGraph
):
answer_mask = self.get_answer_mask(e_space, e_s, q, kg)
false_negative_mask = (answer_mask * (e_space != e_t.unsqueeze(1)).
long()).float()
return false_negative_mask
def validate_action_mask(self, action_mask):
action_mask_min = action_mask.min()
action_mask_max = action_mask.max()
assert action_mask_min == 0 or action_mask_min == 1
assert action_mask_max == 0 or action_mask_max == 1
def get_action_embedding(self, action: Action, kg: KnowledgeGraph):
"""
Return (batch) action embedding which is the concatenation of the embeddings of
the traversed edge and the target node.
:param action (r, e):
(Variable:batch) indices of the most recent action
- r is the most recently traversed edge
- e is the destination entity.
:param kg: Knowledge graph enviroment.
"""
relation_embedding = kg.get_relation_embeddings(action.rel)
if self.relation_only:
action_embedding = relation_embedding
else:
entity_embedding = kg.get_entity_embeddings(action.ent)
action_embedding = torch.cat([relation_embedding,
entity_embedding], dim=-1)
return action_embedding
def define_modules(self):
if self.relation_only:
input_dim = self.history_dim + self.relation_dim
elif self.relation_only_in_path:
input_dim = (self.history_dim + self.entity_dim * 2 + self.
relation_dim)
else:
input_dim = self.history_dim + self.entity_dim + self.relation_dim
self.W1 = nn.Linear(input_dim, self.action_dim)
self.W2 = nn.Linear(self.action_dim, self.action_dim)
self.W1Dropout = nn.Dropout(p=self.ff_dropout_rate)
self.W2Dropout = nn.Dropout(p=self.ff_dropout_rate)
if self.relation_only_in_path:
self.path_encoder = nn.LSTM(input_size=self.relation_dim,
hidden_size=self.history_dim, num_layers=self.
history_num_layers, batch_first=True)
else:
self.path_encoder = nn.LSTM(input_size=self.action_dim,
hidden_size=self.history_dim, num_layers=self.
history_num_layers, batch_first=True)
def initialize_modules(self):
if self.xavier_initialization:
nn.init.xavier_uniform_(self.W1.weight)
nn.init.xavier_uniform_(self.W2.weight)
for name, param in self.path_encoder.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.xavier_normal_(param)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BucketActions(NamedTuple):
action_spaces: List[ActionSpace]
action_dists: List[torch.Tensor]
inv_offset: Union[List[int], None]
entropy: torch.Tensor
def pad_and_cat_action_space(action_spaces: List[ActionSpace], inv_offset,
kg: KnowledgeGraph):
db_r_space, db_e_space, db_action_mask = [], [], []
forks = []
for acsp in action_spaces:
forks += acsp.forks
db_r_space.append(acsp.r_space)
db_e_space.append(acsp.e_space)
db_action_mask.append(acsp.action_mask)
r_space = ops.pad_and_cat(db_r_space, padding_value=kg.dummy_r)[inv_offset]
e_space = ops.pad_and_cat(db_e_space, padding_value=kg.dummy_e)[inv_offset]
action_mask = ops.pad_and_cat(db_action_mask, padding_value=0)[inv_offset]
action_space = ActionSpace(forks, r_space, e_space, action_mask)
return action_space
class GraphWalkAgent(nn.Module):
def __init__(self, args):
super(GraphWalkAgent, self).__init__()
self.model = args.model
self.relation_only = args.relation_only
self.history_dim = args.history_dim
self.history_num_layers = args.history_num_layers
self.entity_dim = args.entity_dim
self.relation_dim = args.relation_dim
if self.relation_only:
self.action_dim = args.relation_dim
else:
self.action_dim = args.entity_dim + args.relation_dim
self.ff_dropout_rate = args.ff_dropout_rate
self.rnn_dropout_rate = args.rnn_dropout_rate
self.action_dropout_rate = args.action_dropout_rate
self.xavier_initialization = args.xavier_initialization
self.relation_only_in_path = args.relation_only_in_path
self.path = None
self.define_modules()
self.initialize_modules()
self.fn = None
self.fn_kg = None
def transit(self, current_entity, obs: Observation, kg: KnowledgeGraph,
use_action_space_bucketing=True, merge_aspace_batching_outcome=False
) ->BucketActions:
"""
Compute the next action distribution based on
(a) the current node (entity) in KG and the query relation
(b) action history representation
:param current_entity: agent location (node) at step t.
:param obs: agent observation at step t.
e_s: source node
query_relation: query relation
last_step: If set, the agent is carrying out the last step.
last_r: label of edge traversed in the previous step
seen_nodes: notes seen on the paths
:param kg: Knowledge graph environment.
:param use_action_space_bucketing: If set, group the action space of different nodes
into buckets by their sizes.
:param merge_aspace_batch_outcome: If set, merge the transition probability distribution
generated of different action space bucket into a single batch.
:return
With aspace batching and without merging the outcomes:
db_outcomes: (Dynamic Batch) (action_space, action_dist)
action_space: (Batch) padded possible action indices
action_dist: (Batch) distribution over actions.
inv_offset: Indices to set the dynamic batching output back to the original order.
entropy: (Batch) entropy of action distribution.
Else:
action_dist: (Batch) distribution over actions.
entropy: (Batch) entropy of action distribution.
"""
X = self.encode_history(current_entity, obs.source_entity, kg, obs.
query_relation)
X = self.W1(X)
X = F.relu(X)
X = self.W1Dropout(X)
X = self.W2(X)
X2 = self.W2Dropout(X)
def policy_nn_fun(X2, acs: ActionSpace):
A = self.get_action_embedding(Action(acs.r_space, acs.e_space), kg)
action_dist = F.softmax(torch.squeeze(A @ torch.unsqueeze(X2, 2
), 2) - (1 - acs.action_mask) * ops.HUGE_INT, dim=-1)
return action_dist, ops.entropy(action_dist)
if use_action_space_bucketing:
action = self.do_it_with_bucketing(X2, current_entity, kg,
merge_aspace_batching_outcome, obs, policy_nn_fun)
else:
assert False
action = self.do_it_without_bucketing(X2, current_entity, kg,
obs, policy_nn_fun)
return action
def encode_history(self, current_entity, e_s, kg, query_relation):
embedded_q_rel = kg.get_relation_embeddings(query_relation)
encoded_history = self.path[-1][0][-1, :, :]
if self.relation_only:
X = torch.cat([encoded_history, embedded_q_rel], dim=-1)
elif self.relation_only_in_path:
E_s = kg.get_entity_embeddings(e_s)
E = kg.get_entity_embeddings(current_entity)
X = torch.cat([E, encoded_history, E_s, embedded_q_rel], dim=-1)
else:
E = kg.get_entity_embeddings(current_entity)
X = torch.cat([E, encoded_history, embedded_q_rel], dim=-1)
return X
def do_it_with_bucketing(self, X2, current_entity, kg,
merge_aspace_batching_outcome, obs: Observation, policy_nn_fun):
entropy_list = []
references = []
buckect_action_spaces, inthis_bucket_indizes = (self.
get_action_space_in_buckets(current_entity, obs, kg))
action_spaces = []
action_dists = []
for as_b, inthis_bucket in zip(buckect_action_spaces,
inthis_bucket_indizes):
X2_b = X2[inthis_bucket, :]
action_dist_b, entropy_b = policy_nn_fun(X2_b, as_b)
references.extend(inthis_bucket)
action_spaces.append(as_b)
action_dists.append(action_dist_b)
entropy_list.append(entropy_b)
inv_offset = [i for i, _ in sorted(enumerate(references), key=lambda
x: x[1])]
entropy = torch.cat(entropy_list, dim=0)[inv_offset]
action = BucketActions(action_spaces, action_dists, inv_offset, entropy
)
if merge_aspace_batching_outcome:
action_space = pad_and_cat_action_space(buckect_action_spaces,
inv_offset, kg)
action_dist = ops.pad_and_cat(action.action_dists, padding_value=0
)[inv_offset]
action = BucketActions([action_space], [action_dist], None, entropy
)
return action
def initialize_path(self, action: Action, kg: KnowledgeGraph):
if self.relation_only_in_path:
init_action_embedding = kg.get_relation_embeddings(action.rel)
else:
init_action_embedding = self.get_action_embedding(action, kg)
init_action_embedding.unsqueeze_(1)
init_h = zeros_var_cuda([self.history_num_layers, len(
init_action_embedding), self.history_dim])
init_c = zeros_var_cuda([self.history_num_layers, len(
init_action_embedding), self.history_dim])
self.path = [self.path_encoder(init_action_embedding, (init_h,
init_c))[1]]
def update_path(self, action: Action, kg: KnowledgeGraph, offset=None):
"""
Once an action was selected, update the action history.
:param action (r, e): (Variable:batch) indices of the most recent action
- r is the most recently traversed edge;
- e is the destination entity.
:param offset: (Variable:batch) if None, adjust path history with the given offset, used for search
:param KG: Knowledge graph environment.
"""
def offset_path_history(p, offset):
for i, x in enumerate(p):
if type(x) is tuple:
new_tuple = tuple([_x[:, offset, :] for _x in x])
p[i] = new_tuple
else:
p[i] = x[offset, :]
if self.relation_only_in_path:
action_embedding = kg.get_relation_embeddings(action.rel)
else:
action_embedding = self.get_action_embedding(action, kg)
if offset is not None:
offset_path_history(self.path, offset)
self.path.append(self.path_encoder(action_embedding.unsqueeze(1),
self.path[-1])[1])
def get_action_space_in_buckets(self, current_entity: torch.Tensor, obs:
Observation, kg: KnowledgeGraph, collapse_entities=False):
"""
To compute the search operation in batch, we group the action spaces of different states
(i.e. the set of outgoing edges of different nodes) into buckets based on their sizes to
save the memory consumption of paddings.
For example, in large knowledge graphs, certain nodes may have thousands of outgoing
edges while a long tail of nodes only have a small amount of outgoing edges. If a batch
contains a node with 1000 outgoing edges while the rest of the nodes have a maximum of
5 outgoing edges, we need to pad the action spaces of all nodes to 1000, which consumes
lots of memory.
With the bucketing approach, each bucket is padded separately. In this case the node
with 1000 outgoing edges will be in its own bucket and the rest of the nodes will suffer
little from padding the action space to 5.
Once we grouped the action spaces in buckets, the policy network computation is carried
out for every bucket iteratively. Once all the computation is done, we concatenate the
results of all buckets and restore their original order in the batch. The computation
outside the policy network module is thus unaffected.
:return db_action_spaces:
[((r_space_b0, r_space_b0), action_mask_b0),
((r_space_b1, r_space_b1), action_mask_b1),
...
((r_space_bn, r_space_bn), action_mask_bn)]
A list of action space tensor representations grouped in n buckets, s.t.
r_space_b0.size(0) + r_space_b1.size(0) + ... + r_space_bn.size(0) = e.size(0)
:return db_references:
[l_batch_refs0, l_batch_refs1, ..., l_batch_refsn]
l_batch_refsi stores the indices of the examples in bucket i in the current batch,
which is used later to restore the output results to the original order.
"""
db_action_spaces, db_references = [], []
assert not collapse_entities
bucket_ids, inbucket_ids = kg.get_bucket_and_inbucket_ids(
current_entity)
for b_key in set(bucket_ids.tolist()):
inthisbucket_indices = torch.nonzero(bucket_ids.eq(b_key)).squeeze(
).tolist()
if not isinstance(inthisbucket_indices, list):
inthisbucket_indices = [inthisbucket_indices]
inbucket_ids_of_entities_inthisbucket = inbucket_ids[
inthisbucket_indices].tolist()
bucket_action_space = kg.bucketid2ActionSpace[b_key]
e_b = current_entity[inthisbucket_indices]
obs_b = obs.get_slice(inthisbucket_indices)
as_bucket = bucket_action_space.get_slice(
inbucket_ids_of_entities_inthisbucket)
action_mask = self.apply_action_masks(as_bucket, e_b, obs_b, kg)
action_space_b = ActionSpace(as_bucket.forks, as_bucket.r_space,
as_bucket.e_space, action_mask)
db_action_spaces.append(action_space_b)
db_references.append(inthisbucket_indices)
return db_action_spaces, db_references
def apply_action_masks(self, acsp: ActionSpace, e, obs: Observation, kg:
KnowledgeGraph):
r_space, e_space, action_mask = (acsp.r_space, acsp.e_space, acsp.
action_mask)
e_s, q, e_t, last_step, last_r, seen_nodes = obs
ground_truth_edge_mask = self.get_ground_truth_edge_mask(e, r_space,
e_space, obs, kg)
action_mask -= ground_truth_edge_mask
self.validate_action_mask(action_mask)
if last_step:
false_negative_mask = self.get_false_negative_mask(e_space, e_s,
q, e_t, kg)
action_mask *= 1 - false_negative_mask
self.validate_action_mask(action_mask)
return action_mask
def get_ground_truth_edge_mask(self, current_nodes, r_space, e_space,
obs: Observation, kg: KnowledgeGraph):
s_e = obs.source_entity
t_e = obs.target_entity
q = obs.query_relation
def build_mask(source_nodes, target_nodes, relation):
return (current_nodes == source_nodes).unsqueeze(1) * (r_space ==
relation.unsqueeze(1)) * (e_space == target_nodes.unsqueeze(1))
mask = build_mask(s_e, t_e, q)
inv_q = kg.get_inv_relation_id(q)
inv_mask = build_mask(t_e, s_e, inv_q)
return ((mask + inv_mask) * (s_e.unsqueeze(1) != kg.dummy_e)).float()
def get_answer_mask(self, e_space, e_s, q, kg: KnowledgeGraph):
if kg.args.mask_test_false_negatives:
answer_vectors = kg.all_object_vectors
else:
answer_vectors = kg.train_object_vectors
answer_masks = []
for i in range(len(e_space)):
_e_s, _q = int(e_s[i]), int(q[i])
if not _e_s in answer_vectors or not _q in answer_vectors[_e_s]:
answer_vector = var_cuda(torch.LongTensor([[kg.num_entities]]))
else:
answer_vector = answer_vectors[_e_s][_q]
answer_mask = torch.sum(e_space[i].unsqueeze(0) ==
answer_vector, dim=0).long()
answer_masks.append(answer_mask)
answer_mask = torch.cat(answer_masks).view(len(e_space), -1)
return answer_mask
def get_false_negative_mask(self, e_space, e_s, q, e_t, kg: KnowledgeGraph
):
answer_mask = self.get_answer_mask(e_space, e_s, q, kg)
false_negative_mask = (answer_mask * (e_space != e_t.unsqueeze(1)).
long()).float()
return false_negative_mask
def validate_action_mask(self, action_mask):
action_mask_min = action_mask.min()
action_mask_max = action_mask.max()
assert action_mask_min == 0 or action_mask_min == 1
assert action_mask_max == 0 or action_mask_max == 1
def get_action_embedding(self, action: Action, kg: KnowledgeGraph):
"""
Return (batch) action embedding which is the concatenation of the embeddings of
the traversed edge and the target node.
:param action (r, e):
(Variable:batch) indices of the most recent action
- r is the most recently traversed edge
- e is the destination entity.
:param kg: Knowledge graph enviroment.
"""
relation_embedding = kg.get_relation_embeddings(action.rel)
if self.relation_only:
action_embedding = relation_embedding
else:
entity_embedding = kg.get_entity_embeddings(action.ent)
action_embedding = torch.cat([relation_embedding,
entity_embedding], dim=-1)
return action_embedding
def define_modules(self):
if self.relation_only:
input_dim = self.history_dim + self.relation_dim
elif self.relation_only_in_path:
input_dim = (self.history_dim + self.entity_dim * 2 + self.
relation_dim)
else:
input_dim = self.history_dim + self.entity_dim + self.relation_dim
self.W1 = nn.Linear(input_dim, self.action_dim)
self.W2 = nn.Linear(self.action_dim, self.action_dim)
self.W1Dropout = nn.Dropout(p=self.ff_dropout_rate)
self.W2Dropout = nn.Dropout(p=self.ff_dropout_rate)
if self.relation_only_in_path:
self.path_encoder = nn.LSTM(input_size=self.relation_dim,
hidden_size=self.history_dim, num_layers=self.
history_num_layers, batch_first=True)
else:
self.path_encoder = nn.LSTM(input_size=self.action_dim,
hidden_size=self.history_dim, num_layers=self.
history_num_layers, batch_first=True)
def initialize_modules(self):
if self.xavier_initialization:
nn.init.xavier_uniform_(self.W1.weight)
nn.init.xavier_uniform_(self.W2.weight)
for name, param in self.path_encoder.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.xavier_normal_(param)
<|reserved_special_token_1|>
"""
Copyright (c) 2018, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Graph Search Policy Network.
"""
from typing import List, NamedTuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
import src.utils.ops as ops
from src.knowledge_graph import KnowledgeGraph, ActionSpace, Observation, Action
from src.utils.ops import var_cuda, zeros_var_cuda
class BucketActions(NamedTuple):
action_spaces: List[ActionSpace]
action_dists: List[torch.Tensor]
inv_offset: Union[List[int], None]
entropy: torch.Tensor
def pad_and_cat_action_space(
action_spaces: List[ActionSpace], inv_offset, kg: KnowledgeGraph
):
db_r_space, db_e_space, db_action_mask = [], [], []
forks = []
for acsp in action_spaces:
forks += acsp.forks
db_r_space.append(acsp.r_space)
db_e_space.append(acsp.e_space)
db_action_mask.append(acsp.action_mask)
r_space = ops.pad_and_cat(db_r_space, padding_value=kg.dummy_r)[inv_offset]
e_space = ops.pad_and_cat(db_e_space, padding_value=kg.dummy_e)[inv_offset]
action_mask = ops.pad_and_cat(db_action_mask, padding_value=0)[inv_offset]
action_space = ActionSpace(forks, r_space, e_space, action_mask)
return action_space
class GraphWalkAgent(nn.Module):
def __init__(self, args):
super(GraphWalkAgent, self).__init__()
self.model = args.model
self.relation_only = args.relation_only
self.history_dim = args.history_dim
self.history_num_layers = args.history_num_layers
self.entity_dim = args.entity_dim
self.relation_dim = args.relation_dim
if self.relation_only:
self.action_dim = args.relation_dim
else:
self.action_dim = args.entity_dim + args.relation_dim
self.ff_dropout_rate = args.ff_dropout_rate
self.rnn_dropout_rate = args.rnn_dropout_rate
self.action_dropout_rate = args.action_dropout_rate
self.xavier_initialization = args.xavier_initialization
self.relation_only_in_path = args.relation_only_in_path
self.path = None
# Set policy network modules
self.define_modules()
self.initialize_modules()
# Fact network modules
self.fn = None
self.fn_kg = None
def transit(
self,
current_entity,
obs: Observation,
kg: KnowledgeGraph,
use_action_space_bucketing=True,
merge_aspace_batching_outcome=False,
) -> BucketActions:
"""
Compute the next action distribution based on
(a) the current node (entity) in KG and the query relation
(b) action history representation
:param current_entity: agent location (node) at step t.
:param obs: agent observation at step t.
e_s: source node
query_relation: query relation
last_step: If set, the agent is carrying out the last step.
last_r: label of edge traversed in the previous step
seen_nodes: notes seen on the paths
:param kg: Knowledge graph environment.
:param use_action_space_bucketing: If set, group the action space of different nodes
into buckets by their sizes.
:param merge_aspace_batch_outcome: If set, merge the transition probability distribution
generated of different action space bucket into a single batch.
:return
With aspace batching and without merging the outcomes:
db_outcomes: (Dynamic Batch) (action_space, action_dist)
action_space: (Batch) padded possible action indices
action_dist: (Batch) distribution over actions.
inv_offset: Indices to set the dynamic batching output back to the original order.
entropy: (Batch) entropy of action distribution.
Else:
action_dist: (Batch) distribution over actions.
entropy: (Batch) entropy of action distribution.
"""
# Representation of the current state (current node and other observations)
X = self.encode_history(
current_entity, obs.source_entity, kg, obs.query_relation
)
# MLP
X = self.W1(X)
X = F.relu(X)
X = self.W1Dropout(X)
X = self.W2(X)
X2 = self.W2Dropout(X)
def policy_nn_fun(X2, acs: ActionSpace):
A = self.get_action_embedding(Action(acs.r_space, acs.e_space), kg)
action_dist = F.softmax(
torch.squeeze(A @ torch.unsqueeze(X2, 2), 2)
- (1 - acs.action_mask) * ops.HUGE_INT,
dim=-1,
)
# action_dist = ops.weighted_softmax(torch.squeeze(A @ torch.unsqueeze(X2, 2), 2), action_mask)
return action_dist, ops.entropy(action_dist)
if use_action_space_bucketing:
action = self.do_it_with_bucketing(
X2,
current_entity,
kg,
merge_aspace_batching_outcome,
obs,
policy_nn_fun,
)
else:
assert False
action = self.do_it_without_bucketing(
X2, current_entity, kg, obs, policy_nn_fun
)
return action
def encode_history(self, current_entity, e_s, kg, query_relation):
embedded_q_rel = kg.get_relation_embeddings(query_relation)
encoded_history = self.path[-1][0][-1, :, :]
if self.relation_only:
X = torch.cat([encoded_history, embedded_q_rel], dim=-1)
elif self.relation_only_in_path:
E_s = kg.get_entity_embeddings(e_s)
E = kg.get_entity_embeddings(current_entity)
X = torch.cat([E, encoded_history, E_s, embedded_q_rel], dim=-1)
else:
E = kg.get_entity_embeddings(current_entity)
X = torch.cat([E, encoded_history, embedded_q_rel], dim=-1)
return X
# def do_it_without_bucketing(self, X2, current_entity, kg, obs, policy_nn_fun):
# def get_action_space(e, obs, kg):
# r_space = kg.action_space["relation-space"][e]
# e_space = kg.action_space["entity-space"][e]
# action_mask = kg.action_space["action-mask"][e]
# return self.apply_action_masks(acsp, e, obs, kg)
#
# action_space = get_action_space(current_entity, obs, kg)
# action_dist, entropy = policy_nn_fun(X2, action_space)
# db_outcomes = [(action_space, action_dist)]
# inv_offset = None
# return db_outcomes, entropy, inv_offset
def do_it_with_bucketing(
self,
X2,
current_entity,
kg,
merge_aspace_batching_outcome,
obs: Observation,
policy_nn_fun,
):
entropy_list = []
references = []
buckect_action_spaces, inthis_bucket_indizes = self.get_action_space_in_buckets(
current_entity, obs, kg
)
action_spaces = []
action_dists = []
for as_b, inthis_bucket in zip(buckect_action_spaces, inthis_bucket_indizes):
X2_b = X2[inthis_bucket, :]
action_dist_b, entropy_b = policy_nn_fun(X2_b, as_b)
references.extend(inthis_bucket)
action_spaces.append(as_b)
action_dists.append(action_dist_b)
entropy_list.append(entropy_b)
inv_offset = [i for i, _ in sorted(enumerate(references), key=lambda x: x[1])]
entropy = torch.cat(entropy_list, dim=0)[inv_offset]
action = BucketActions(action_spaces, action_dists, inv_offset, entropy)
if merge_aspace_batching_outcome:
action_space = pad_and_cat_action_space(
buckect_action_spaces, inv_offset, kg
)
action_dist = ops.pad_and_cat(action.action_dists, padding_value=0)[
inv_offset
]
action = BucketActions([action_space], [action_dist], None, entropy)
return action
def initialize_path(self, action: Action, kg: KnowledgeGraph):
# [batch_size, action_dim]
if self.relation_only_in_path:
init_action_embedding = kg.get_relation_embeddings(action.rel)
else:
init_action_embedding = self.get_action_embedding(action, kg)
init_action_embedding.unsqueeze_(1)
# [num_layers, batch_size, dim]
init_h = zeros_var_cuda(
[self.history_num_layers, len(init_action_embedding), self.history_dim]
)
init_c = zeros_var_cuda(
[self.history_num_layers, len(init_action_embedding), self.history_dim]
)
self.path = [self.path_encoder(init_action_embedding, (init_h, init_c))[1]]
def update_path(self, action: Action, kg: KnowledgeGraph, offset=None):
"""
Once an action was selected, update the action history.
:param action (r, e): (Variable:batch) indices of the most recent action
- r is the most recently traversed edge;
- e is the destination entity.
:param offset: (Variable:batch) if None, adjust path history with the given offset, used for search
:param KG: Knowledge graph environment.
"""
def offset_path_history(p, offset):
for i, x in enumerate(p):
if type(x) is tuple:
new_tuple = tuple([_x[:, offset, :] for _x in x])
p[i] = new_tuple
else:
p[i] = x[offset, :]
# update action history
if self.relation_only_in_path:
action_embedding = kg.get_relation_embeddings(action.rel)
else:
action_embedding = self.get_action_embedding(action, kg)
if offset is not None:
offset_path_history(self.path, offset)
self.path.append(
self.path_encoder(action_embedding.unsqueeze(1), self.path[-1])[1]
)
def get_action_space_in_buckets(
self,
current_entity: torch.Tensor,
obs: Observation,
kg: KnowledgeGraph,
collapse_entities=False,
):
"""
To compute the search operation in batch, we group the action spaces of different states
(i.e. the set of outgoing edges of different nodes) into buckets based on their sizes to
save the memory consumption of paddings.
For example, in large knowledge graphs, certain nodes may have thousands of outgoing
edges while a long tail of nodes only have a small amount of outgoing edges. If a batch
contains a node with 1000 outgoing edges while the rest of the nodes have a maximum of
5 outgoing edges, we need to pad the action spaces of all nodes to 1000, which consumes
lots of memory.
With the bucketing approach, each bucket is padded separately. In this case the node
with 1000 outgoing edges will be in its own bucket and the rest of the nodes will suffer
little from padding the action space to 5.
Once we grouped the action spaces in buckets, the policy network computation is carried
out for every bucket iteratively. Once all the computation is done, we concatenate the
results of all buckets and restore their original order in the batch. The computation
outside the policy network module is thus unaffected.
:return db_action_spaces:
[((r_space_b0, r_space_b0), action_mask_b0),
((r_space_b1, r_space_b1), action_mask_b1),
...
((r_space_bn, r_space_bn), action_mask_bn)]
A list of action space tensor representations grouped in n buckets, s.t.
r_space_b0.size(0) + r_space_b1.size(0) + ... + r_space_bn.size(0) = e.size(0)
:return db_references:
[l_batch_refs0, l_batch_refs1, ..., l_batch_refsn]
l_batch_refsi stores the indices of the examples in bucket i in the current batch,
which is used later to restore the output results to the original order.
"""
db_action_spaces, db_references = [], []
assert not collapse_entities # NotImplementedError
bucket_ids, inbucket_ids = kg.get_bucket_and_inbucket_ids(current_entity)
for b_key in set(bucket_ids.tolist()):
inthisbucket_indices = (
torch.nonzero(bucket_ids.eq(b_key)).squeeze().tolist()
)
if not isinstance(inthisbucket_indices, list): # TODO(tilo) wtf!
inthisbucket_indices = [inthisbucket_indices]
inbucket_ids_of_entities_inthisbucket = inbucket_ids[
inthisbucket_indices
].tolist()
bucket_action_space = kg.bucketid2ActionSpace[b_key]
e_b = current_entity[inthisbucket_indices]
obs_b = obs.get_slice(inthisbucket_indices)
as_bucket = bucket_action_space.get_slice(
inbucket_ids_of_entities_inthisbucket
)
action_mask = self.apply_action_masks(as_bucket, e_b, obs_b, kg)
action_space_b = ActionSpace(
as_bucket.forks, as_bucket.r_space, as_bucket.e_space, action_mask
)
db_action_spaces.append(action_space_b)
db_references.append(inthisbucket_indices)
return db_action_spaces, db_references
def apply_action_masks(
self, acsp: ActionSpace, e, obs: Observation, kg: KnowledgeGraph
):
r_space, e_space, action_mask = acsp.r_space, acsp.e_space, acsp.action_mask
e_s, q, e_t, last_step, last_r, seen_nodes = obs
# Prevent the agent from selecting the ground truth edge
ground_truth_edge_mask = self.get_ground_truth_edge_mask(
e, r_space, e_space, obs, kg
)
action_mask -= ground_truth_edge_mask
self.validate_action_mask(action_mask)
# Mask out false negatives in the final step
if last_step:
false_negative_mask = self.get_false_negative_mask(e_space, e_s, q, e_t, kg)
action_mask *= 1 - false_negative_mask
self.validate_action_mask(action_mask)
# Prevent the agent from stopping in the middle of a path
# stop_mask = (last_r == NO_OP_RELATION_ID).unsqueeze(1).float()
# action_mask = (1 - stop_mask) * action_mask + stop_mask * (r_space == NO_OP_RELATION_ID).float()
# Prevent loops
# Note: avoid duplicate removal of self-loops
# seen_nodes_b = seen_nodes[l_batch_refs]
# loop_mask_b = (((seen_nodes_b.unsqueeze(1) == e_space.unsqueeze(2)).sum(2) > 0) *
# (r_space != NO_OP_RELATION_ID)).float()
# action_mask *= (1 - loop_mask_b)
return action_mask
def get_ground_truth_edge_mask(
self, current_nodes, r_space, e_space, obs: Observation, kg: KnowledgeGraph
):
s_e = obs.source_entity
t_e = obs.target_entity
q = obs.query_relation
def build_mask(source_nodes, target_nodes, relation):
return (
(current_nodes == source_nodes).unsqueeze(1)
* (r_space == relation.unsqueeze(1))
* (e_space == target_nodes.unsqueeze(1))
)
mask = build_mask(s_e, t_e, q)
inv_q = kg.get_inv_relation_id(q)
inv_mask = build_mask(t_e, s_e, inv_q)
return ((mask + inv_mask) * (s_e.unsqueeze(1) != kg.dummy_e)).float()
def get_answer_mask(self, e_space, e_s, q, kg: KnowledgeGraph):
if kg.args.mask_test_false_negatives:
answer_vectors = kg.all_object_vectors
else:
answer_vectors = kg.train_object_vectors
answer_masks = []
for i in range(len(e_space)):
_e_s, _q = int(e_s[i]), int(q[i])
if not _e_s in answer_vectors or not _q in answer_vectors[_e_s]:
answer_vector = var_cuda(torch.LongTensor([[kg.num_entities]]))
else:
answer_vector = answer_vectors[_e_s][_q]
answer_mask = torch.sum(
e_space[i].unsqueeze(0) == answer_vector, dim=0
).long()
answer_masks.append(answer_mask)
answer_mask = torch.cat(answer_masks).view(len(e_space), -1)
return answer_mask
def get_false_negative_mask(self, e_space, e_s, q, e_t, kg: KnowledgeGraph):
answer_mask = self.get_answer_mask(e_space, e_s, q, kg)
# This is a trick applied during training where we convert a multi-answer predction problem into several
# single-answer prediction problems. By masking out the other answers in the training set, we are forcing
# the agent to walk towards a particular answer.
# This trick does not affect inference on the test set: at inference time the ground truth answer will not
# appear in the answer mask. This can be checked by uncommenting the following assertion statement.
# Note that the assertion statement can trigger in the last batch if you're using a batch_size > 1 since
# we append dummy examples to the last batch to make it the required batch size.
# The assertion statement will also trigger in the dev set inference of NELL-995 since we randomly
# sampled the dev set from the training data.
# assert(float((answer_mask * (e_space == e_t.unsqueeze(1)).long()).sum()) == 0)
false_negative_mask = (
answer_mask * (e_space != e_t.unsqueeze(1)).long()
).float()
return false_negative_mask
def validate_action_mask(self, action_mask):
action_mask_min = action_mask.min()
action_mask_max = action_mask.max()
assert action_mask_min == 0 or action_mask_min == 1
assert action_mask_max == 0 or action_mask_max == 1
def get_action_embedding(self, action: Action, kg: KnowledgeGraph):
"""
Return (batch) action embedding which is the concatenation of the embeddings of
the traversed edge and the target node.
:param action (r, e):
(Variable:batch) indices of the most recent action
- r is the most recently traversed edge
- e is the destination entity.
:param kg: Knowledge graph enviroment.
"""
relation_embedding = kg.get_relation_embeddings(action.rel)
if self.relation_only:
action_embedding = relation_embedding
else:
entity_embedding = kg.get_entity_embeddings(action.ent)
action_embedding = torch.cat([relation_embedding, entity_embedding], dim=-1)
return action_embedding
def define_modules(self):
if self.relation_only:
input_dim = self.history_dim + self.relation_dim
elif self.relation_only_in_path:
input_dim = self.history_dim + self.entity_dim * 2 + self.relation_dim
else:
input_dim = self.history_dim + self.entity_dim + self.relation_dim
self.W1 = nn.Linear(input_dim, self.action_dim)
self.W2 = nn.Linear(self.action_dim, self.action_dim)
self.W1Dropout = nn.Dropout(p=self.ff_dropout_rate)
self.W2Dropout = nn.Dropout(p=self.ff_dropout_rate)
if self.relation_only_in_path:
self.path_encoder = nn.LSTM(
input_size=self.relation_dim,
hidden_size=self.history_dim,
num_layers=self.history_num_layers,
batch_first=True,
)
else:
self.path_encoder = nn.LSTM(
input_size=self.action_dim,
hidden_size=self.history_dim,
num_layers=self.history_num_layers,
batch_first=True,
)
def initialize_modules(self):
if self.xavier_initialization:
nn.init.xavier_uniform_(self.W1.weight)
nn.init.xavier_uniform_(self.W2.weight)
for name, param in self.path_encoder.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0.0)
elif "weight" in name:
nn.init.xavier_normal_(param)
|
flexible
|
{
"blob_id": "4a892c3532a3e3ddcd54705336dce820ff49b91b",
"index": 6289,
"step-1": "<mask token>\n\n\nclass GraphWalkAgent(nn.Module):\n\n def __init__(self, args):\n super(GraphWalkAgent, self).__init__()\n self.model = args.model\n self.relation_only = args.relation_only\n self.history_dim = args.history_dim\n self.history_num_layers = args.history_num_layers\n self.entity_dim = args.entity_dim\n self.relation_dim = args.relation_dim\n if self.relation_only:\n self.action_dim = args.relation_dim\n else:\n self.action_dim = args.entity_dim + args.relation_dim\n self.ff_dropout_rate = args.ff_dropout_rate\n self.rnn_dropout_rate = args.rnn_dropout_rate\n self.action_dropout_rate = args.action_dropout_rate\n self.xavier_initialization = args.xavier_initialization\n self.relation_only_in_path = args.relation_only_in_path\n self.path = None\n self.define_modules()\n self.initialize_modules()\n self.fn = None\n self.fn_kg = None\n <mask token>\n\n def encode_history(self, current_entity, e_s, kg, query_relation):\n embedded_q_rel = kg.get_relation_embeddings(query_relation)\n encoded_history = self.path[-1][0][-1, :, :]\n if self.relation_only:\n X = torch.cat([encoded_history, embedded_q_rel], dim=-1)\n elif self.relation_only_in_path:\n E_s = kg.get_entity_embeddings(e_s)\n E = kg.get_entity_embeddings(current_entity)\n X = torch.cat([E, encoded_history, E_s, embedded_q_rel], dim=-1)\n else:\n E = kg.get_entity_embeddings(current_entity)\n X = torch.cat([E, encoded_history, embedded_q_rel], dim=-1)\n return X\n <mask token>\n\n def initialize_path(self, action: Action, kg: KnowledgeGraph):\n if self.relation_only_in_path:\n init_action_embedding = kg.get_relation_embeddings(action.rel)\n else:\n init_action_embedding = self.get_action_embedding(action, kg)\n init_action_embedding.unsqueeze_(1)\n init_h = zeros_var_cuda([self.history_num_layers, len(\n init_action_embedding), self.history_dim])\n init_c = zeros_var_cuda([self.history_num_layers, len(\n init_action_embedding), self.history_dim])\n self.path = [self.path_encoder(init_action_embedding, (init_h,\n init_c))[1]]\n <mask token>\n <mask token>\n <mask token>\n\n def get_ground_truth_edge_mask(self, current_nodes, r_space, e_space,\n obs: Observation, kg: KnowledgeGraph):\n s_e = obs.source_entity\n t_e = obs.target_entity\n q = obs.query_relation\n\n def build_mask(source_nodes, target_nodes, relation):\n return (current_nodes == source_nodes).unsqueeze(1) * (r_space ==\n relation.unsqueeze(1)) * (e_space == target_nodes.unsqueeze(1))\n mask = build_mask(s_e, t_e, q)\n inv_q = kg.get_inv_relation_id(q)\n inv_mask = build_mask(t_e, s_e, inv_q)\n return ((mask + inv_mask) * (s_e.unsqueeze(1) != kg.dummy_e)).float()\n\n def get_answer_mask(self, e_space, e_s, q, kg: KnowledgeGraph):\n if kg.args.mask_test_false_negatives:\n answer_vectors = kg.all_object_vectors\n else:\n answer_vectors = kg.train_object_vectors\n answer_masks = []\n for i in range(len(e_space)):\n _e_s, _q = int(e_s[i]), int(q[i])\n if not _e_s in answer_vectors or not _q in answer_vectors[_e_s]:\n answer_vector = var_cuda(torch.LongTensor([[kg.num_entities]]))\n else:\n answer_vector = answer_vectors[_e_s][_q]\n answer_mask = torch.sum(e_space[i].unsqueeze(0) ==\n answer_vector, dim=0).long()\n answer_masks.append(answer_mask)\n answer_mask = torch.cat(answer_masks).view(len(e_space), -1)\n return answer_mask\n\n def get_false_negative_mask(self, e_space, e_s, q, e_t, kg: KnowledgeGraph\n ):\n answer_mask = self.get_answer_mask(e_space, e_s, q, kg)\n false_negative_mask = (answer_mask * (e_space != e_t.unsqueeze(1)).\n long()).float()\n return false_negative_mask\n\n def validate_action_mask(self, action_mask):\n action_mask_min = action_mask.min()\n action_mask_max = action_mask.max()\n assert action_mask_min == 0 or action_mask_min == 1\n assert action_mask_max == 0 or action_mask_max == 1\n <mask token>\n\n def define_modules(self):\n if self.relation_only:\n input_dim = self.history_dim + self.relation_dim\n elif self.relation_only_in_path:\n input_dim = (self.history_dim + self.entity_dim * 2 + self.\n relation_dim)\n else:\n input_dim = self.history_dim + self.entity_dim + self.relation_dim\n self.W1 = nn.Linear(input_dim, self.action_dim)\n self.W2 = nn.Linear(self.action_dim, self.action_dim)\n self.W1Dropout = nn.Dropout(p=self.ff_dropout_rate)\n self.W2Dropout = nn.Dropout(p=self.ff_dropout_rate)\n if self.relation_only_in_path:\n self.path_encoder = nn.LSTM(input_size=self.relation_dim,\n hidden_size=self.history_dim, num_layers=self.\n history_num_layers, batch_first=True)\n else:\n self.path_encoder = nn.LSTM(input_size=self.action_dim,\n hidden_size=self.history_dim, num_layers=self.\n history_num_layers, batch_first=True)\n\n def initialize_modules(self):\n if self.xavier_initialization:\n nn.init.xavier_uniform_(self.W1.weight)\n nn.init.xavier_uniform_(self.W2.weight)\n for name, param in self.path_encoder.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0.0)\n elif 'weight' in name:\n nn.init.xavier_normal_(param)\n",
"step-2": "<mask token>\n\n\nclass GraphWalkAgent(nn.Module):\n\n def __init__(self, args):\n super(GraphWalkAgent, self).__init__()\n self.model = args.model\n self.relation_only = args.relation_only\n self.history_dim = args.history_dim\n self.history_num_layers = args.history_num_layers\n self.entity_dim = args.entity_dim\n self.relation_dim = args.relation_dim\n if self.relation_only:\n self.action_dim = args.relation_dim\n else:\n self.action_dim = args.entity_dim + args.relation_dim\n self.ff_dropout_rate = args.ff_dropout_rate\n self.rnn_dropout_rate = args.rnn_dropout_rate\n self.action_dropout_rate = args.action_dropout_rate\n self.xavier_initialization = args.xavier_initialization\n self.relation_only_in_path = args.relation_only_in_path\n self.path = None\n self.define_modules()\n self.initialize_modules()\n self.fn = None\n self.fn_kg = None\n\n def transit(self, current_entity, obs: Observation, kg: KnowledgeGraph,\n use_action_space_bucketing=True, merge_aspace_batching_outcome=False\n ) ->BucketActions:\n \"\"\"\n Compute the next action distribution based on\n (a) the current node (entity) in KG and the query relation\n (b) action history representation\n :param current_entity: agent location (node) at step t.\n :param obs: agent observation at step t.\n e_s: source node\n query_relation: query relation\n last_step: If set, the agent is carrying out the last step.\n last_r: label of edge traversed in the previous step\n seen_nodes: notes seen on the paths\n :param kg: Knowledge graph environment.\n :param use_action_space_bucketing: If set, group the action space of different nodes \n into buckets by their sizes.\n :param merge_aspace_batch_outcome: If set, merge the transition probability distribution\n generated of different action space bucket into a single batch.\n :return\n With aspace batching and without merging the outcomes:\n db_outcomes: (Dynamic Batch) (action_space, action_dist)\n action_space: (Batch) padded possible action indices\n action_dist: (Batch) distribution over actions.\n inv_offset: Indices to set the dynamic batching output back to the original order.\n entropy: (Batch) entropy of action distribution.\n Else:\n action_dist: (Batch) distribution over actions.\n entropy: (Batch) entropy of action distribution.\n \"\"\"\n X = self.encode_history(current_entity, obs.source_entity, kg, obs.\n query_relation)\n X = self.W1(X)\n X = F.relu(X)\n X = self.W1Dropout(X)\n X = self.W2(X)\n X2 = self.W2Dropout(X)\n\n def policy_nn_fun(X2, acs: ActionSpace):\n A = self.get_action_embedding(Action(acs.r_space, acs.e_space), kg)\n action_dist = F.softmax(torch.squeeze(A @ torch.unsqueeze(X2, 2\n ), 2) - (1 - acs.action_mask) * ops.HUGE_INT, dim=-1)\n return action_dist, ops.entropy(action_dist)\n if use_action_space_bucketing:\n action = self.do_it_with_bucketing(X2, current_entity, kg,\n merge_aspace_batching_outcome, obs, policy_nn_fun)\n else:\n assert False\n action = self.do_it_without_bucketing(X2, current_entity, kg,\n obs, policy_nn_fun)\n return action\n\n def encode_history(self, current_entity, e_s, kg, query_relation):\n embedded_q_rel = kg.get_relation_embeddings(query_relation)\n encoded_history = self.path[-1][0][-1, :, :]\n if self.relation_only:\n X = torch.cat([encoded_history, embedded_q_rel], dim=-1)\n elif self.relation_only_in_path:\n E_s = kg.get_entity_embeddings(e_s)\n E = kg.get_entity_embeddings(current_entity)\n X = torch.cat([E, encoded_history, E_s, embedded_q_rel], dim=-1)\n else:\n E = kg.get_entity_embeddings(current_entity)\n X = torch.cat([E, encoded_history, embedded_q_rel], dim=-1)\n return X\n <mask token>\n\n def initialize_path(self, action: Action, kg: KnowledgeGraph):\n if self.relation_only_in_path:\n init_action_embedding = kg.get_relation_embeddings(action.rel)\n else:\n init_action_embedding = self.get_action_embedding(action, kg)\n init_action_embedding.unsqueeze_(1)\n init_h = zeros_var_cuda([self.history_num_layers, len(\n init_action_embedding), self.history_dim])\n init_c = zeros_var_cuda([self.history_num_layers, len(\n init_action_embedding), self.history_dim])\n self.path = [self.path_encoder(init_action_embedding, (init_h,\n init_c))[1]]\n\n def update_path(self, action: Action, kg: KnowledgeGraph, offset=None):\n \"\"\"\n Once an action was selected, update the action history.\n :param action (r, e): (Variable:batch) indices of the most recent action\n - r is the most recently traversed edge;\n - e is the destination entity.\n :param offset: (Variable:batch) if None, adjust path history with the given offset, used for search\n :param KG: Knowledge graph environment.\n \"\"\"\n\n def offset_path_history(p, offset):\n for i, x in enumerate(p):\n if type(x) is tuple:\n new_tuple = tuple([_x[:, offset, :] for _x in x])\n p[i] = new_tuple\n else:\n p[i] = x[offset, :]\n if self.relation_only_in_path:\n action_embedding = kg.get_relation_embeddings(action.rel)\n else:\n action_embedding = self.get_action_embedding(action, kg)\n if offset is not None:\n offset_path_history(self.path, offset)\n self.path.append(self.path_encoder(action_embedding.unsqueeze(1),\n self.path[-1])[1])\n <mask token>\n\n def apply_action_masks(self, acsp: ActionSpace, e, obs: Observation, kg:\n KnowledgeGraph):\n r_space, e_space, action_mask = (acsp.r_space, acsp.e_space, acsp.\n action_mask)\n e_s, q, e_t, last_step, last_r, seen_nodes = obs\n ground_truth_edge_mask = self.get_ground_truth_edge_mask(e, r_space,\n e_space, obs, kg)\n action_mask -= ground_truth_edge_mask\n self.validate_action_mask(action_mask)\n if last_step:\n false_negative_mask = self.get_false_negative_mask(e_space, e_s,\n q, e_t, kg)\n action_mask *= 1 - false_negative_mask\n self.validate_action_mask(action_mask)\n return action_mask\n\n def get_ground_truth_edge_mask(self, current_nodes, r_space, e_space,\n obs: Observation, kg: KnowledgeGraph):\n s_e = obs.source_entity\n t_e = obs.target_entity\n q = obs.query_relation\n\n def build_mask(source_nodes, target_nodes, relation):\n return (current_nodes == source_nodes).unsqueeze(1) * (r_space ==\n relation.unsqueeze(1)) * (e_space == target_nodes.unsqueeze(1))\n mask = build_mask(s_e, t_e, q)\n inv_q = kg.get_inv_relation_id(q)\n inv_mask = build_mask(t_e, s_e, inv_q)\n return ((mask + inv_mask) * (s_e.unsqueeze(1) != kg.dummy_e)).float()\n\n def get_answer_mask(self, e_space, e_s, q, kg: KnowledgeGraph):\n if kg.args.mask_test_false_negatives:\n answer_vectors = kg.all_object_vectors\n else:\n answer_vectors = kg.train_object_vectors\n answer_masks = []\n for i in range(len(e_space)):\n _e_s, _q = int(e_s[i]), int(q[i])\n if not _e_s in answer_vectors or not _q in answer_vectors[_e_s]:\n answer_vector = var_cuda(torch.LongTensor([[kg.num_entities]]))\n else:\n answer_vector = answer_vectors[_e_s][_q]\n answer_mask = torch.sum(e_space[i].unsqueeze(0) ==\n answer_vector, dim=0).long()\n answer_masks.append(answer_mask)\n answer_mask = torch.cat(answer_masks).view(len(e_space), -1)\n return answer_mask\n\n def get_false_negative_mask(self, e_space, e_s, q, e_t, kg: KnowledgeGraph\n ):\n answer_mask = self.get_answer_mask(e_space, e_s, q, kg)\n false_negative_mask = (answer_mask * (e_space != e_t.unsqueeze(1)).\n long()).float()\n return false_negative_mask\n\n def validate_action_mask(self, action_mask):\n action_mask_min = action_mask.min()\n action_mask_max = action_mask.max()\n assert action_mask_min == 0 or action_mask_min == 1\n assert action_mask_max == 0 or action_mask_max == 1\n <mask token>\n\n def define_modules(self):\n if self.relation_only:\n input_dim = self.history_dim + self.relation_dim\n elif self.relation_only_in_path:\n input_dim = (self.history_dim + self.entity_dim * 2 + self.\n relation_dim)\n else:\n input_dim = self.history_dim + self.entity_dim + self.relation_dim\n self.W1 = nn.Linear(input_dim, self.action_dim)\n self.W2 = nn.Linear(self.action_dim, self.action_dim)\n self.W1Dropout = nn.Dropout(p=self.ff_dropout_rate)\n self.W2Dropout = nn.Dropout(p=self.ff_dropout_rate)\n if self.relation_only_in_path:\n self.path_encoder = nn.LSTM(input_size=self.relation_dim,\n hidden_size=self.history_dim, num_layers=self.\n history_num_layers, batch_first=True)\n else:\n self.path_encoder = nn.LSTM(input_size=self.action_dim,\n hidden_size=self.history_dim, num_layers=self.\n history_num_layers, batch_first=True)\n\n def initialize_modules(self):\n if self.xavier_initialization:\n nn.init.xavier_uniform_(self.W1.weight)\n nn.init.xavier_uniform_(self.W2.weight)\n for name, param in self.path_encoder.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0.0)\n elif 'weight' in name:\n nn.init.xavier_normal_(param)\n",
"step-3": "<mask token>\n\n\nclass GraphWalkAgent(nn.Module):\n\n def __init__(self, args):\n super(GraphWalkAgent, self).__init__()\n self.model = args.model\n self.relation_only = args.relation_only\n self.history_dim = args.history_dim\n self.history_num_layers = args.history_num_layers\n self.entity_dim = args.entity_dim\n self.relation_dim = args.relation_dim\n if self.relation_only:\n self.action_dim = args.relation_dim\n else:\n self.action_dim = args.entity_dim + args.relation_dim\n self.ff_dropout_rate = args.ff_dropout_rate\n self.rnn_dropout_rate = args.rnn_dropout_rate\n self.action_dropout_rate = args.action_dropout_rate\n self.xavier_initialization = args.xavier_initialization\n self.relation_only_in_path = args.relation_only_in_path\n self.path = None\n self.define_modules()\n self.initialize_modules()\n self.fn = None\n self.fn_kg = None\n\n def transit(self, current_entity, obs: Observation, kg: KnowledgeGraph,\n use_action_space_bucketing=True, merge_aspace_batching_outcome=False\n ) ->BucketActions:\n \"\"\"\n Compute the next action distribution based on\n (a) the current node (entity) in KG and the query relation\n (b) action history representation\n :param current_entity: agent location (node) at step t.\n :param obs: agent observation at step t.\n e_s: source node\n query_relation: query relation\n last_step: If set, the agent is carrying out the last step.\n last_r: label of edge traversed in the previous step\n seen_nodes: notes seen on the paths\n :param kg: Knowledge graph environment.\n :param use_action_space_bucketing: If set, group the action space of different nodes \n into buckets by their sizes.\n :param merge_aspace_batch_outcome: If set, merge the transition probability distribution\n generated of different action space bucket into a single batch.\n :return\n With aspace batching and without merging the outcomes:\n db_outcomes: (Dynamic Batch) (action_space, action_dist)\n action_space: (Batch) padded possible action indices\n action_dist: (Batch) distribution over actions.\n inv_offset: Indices to set the dynamic batching output back to the original order.\n entropy: (Batch) entropy of action distribution.\n Else:\n action_dist: (Batch) distribution over actions.\n entropy: (Batch) entropy of action distribution.\n \"\"\"\n X = self.encode_history(current_entity, obs.source_entity, kg, obs.\n query_relation)\n X = self.W1(X)\n X = F.relu(X)\n X = self.W1Dropout(X)\n X = self.W2(X)\n X2 = self.W2Dropout(X)\n\n def policy_nn_fun(X2, acs: ActionSpace):\n A = self.get_action_embedding(Action(acs.r_space, acs.e_space), kg)\n action_dist = F.softmax(torch.squeeze(A @ torch.unsqueeze(X2, 2\n ), 2) - (1 - acs.action_mask) * ops.HUGE_INT, dim=-1)\n return action_dist, ops.entropy(action_dist)\n if use_action_space_bucketing:\n action = self.do_it_with_bucketing(X2, current_entity, kg,\n merge_aspace_batching_outcome, obs, policy_nn_fun)\n else:\n assert False\n action = self.do_it_without_bucketing(X2, current_entity, kg,\n obs, policy_nn_fun)\n return action\n\n def encode_history(self, current_entity, e_s, kg, query_relation):\n embedded_q_rel = kg.get_relation_embeddings(query_relation)\n encoded_history = self.path[-1][0][-1, :, :]\n if self.relation_only:\n X = torch.cat([encoded_history, embedded_q_rel], dim=-1)\n elif self.relation_only_in_path:\n E_s = kg.get_entity_embeddings(e_s)\n E = kg.get_entity_embeddings(current_entity)\n X = torch.cat([E, encoded_history, E_s, embedded_q_rel], dim=-1)\n else:\n E = kg.get_entity_embeddings(current_entity)\n X = torch.cat([E, encoded_history, embedded_q_rel], dim=-1)\n return X\n\n def do_it_with_bucketing(self, X2, current_entity, kg,\n merge_aspace_batching_outcome, obs: Observation, policy_nn_fun):\n entropy_list = []\n references = []\n buckect_action_spaces, inthis_bucket_indizes = (self.\n get_action_space_in_buckets(current_entity, obs, kg))\n action_spaces = []\n action_dists = []\n for as_b, inthis_bucket in zip(buckect_action_spaces,\n inthis_bucket_indizes):\n X2_b = X2[inthis_bucket, :]\n action_dist_b, entropy_b = policy_nn_fun(X2_b, as_b)\n references.extend(inthis_bucket)\n action_spaces.append(as_b)\n action_dists.append(action_dist_b)\n entropy_list.append(entropy_b)\n inv_offset = [i for i, _ in sorted(enumerate(references), key=lambda\n x: x[1])]\n entropy = torch.cat(entropy_list, dim=0)[inv_offset]\n action = BucketActions(action_spaces, action_dists, inv_offset, entropy\n )\n if merge_aspace_batching_outcome:\n action_space = pad_and_cat_action_space(buckect_action_spaces,\n inv_offset, kg)\n action_dist = ops.pad_and_cat(action.action_dists, padding_value=0\n )[inv_offset]\n action = BucketActions([action_space], [action_dist], None, entropy\n )\n return action\n\n def initialize_path(self, action: Action, kg: KnowledgeGraph):\n if self.relation_only_in_path:\n init_action_embedding = kg.get_relation_embeddings(action.rel)\n else:\n init_action_embedding = self.get_action_embedding(action, kg)\n init_action_embedding.unsqueeze_(1)\n init_h = zeros_var_cuda([self.history_num_layers, len(\n init_action_embedding), self.history_dim])\n init_c = zeros_var_cuda([self.history_num_layers, len(\n init_action_embedding), self.history_dim])\n self.path = [self.path_encoder(init_action_embedding, (init_h,\n init_c))[1]]\n\n def update_path(self, action: Action, kg: KnowledgeGraph, offset=None):\n \"\"\"\n Once an action was selected, update the action history.\n :param action (r, e): (Variable:batch) indices of the most recent action\n - r is the most recently traversed edge;\n - e is the destination entity.\n :param offset: (Variable:batch) if None, adjust path history with the given offset, used for search\n :param KG: Knowledge graph environment.\n \"\"\"\n\n def offset_path_history(p, offset):\n for i, x in enumerate(p):\n if type(x) is tuple:\n new_tuple = tuple([_x[:, offset, :] for _x in x])\n p[i] = new_tuple\n else:\n p[i] = x[offset, :]\n if self.relation_only_in_path:\n action_embedding = kg.get_relation_embeddings(action.rel)\n else:\n action_embedding = self.get_action_embedding(action, kg)\n if offset is not None:\n offset_path_history(self.path, offset)\n self.path.append(self.path_encoder(action_embedding.unsqueeze(1),\n self.path[-1])[1])\n\n def get_action_space_in_buckets(self, current_entity: torch.Tensor, obs:\n Observation, kg: KnowledgeGraph, collapse_entities=False):\n \"\"\"\n To compute the search operation in batch, we group the action spaces of different states\n (i.e. the set of outgoing edges of different nodes) into buckets based on their sizes to\n save the memory consumption of paddings.\n\n For example, in large knowledge graphs, certain nodes may have thousands of outgoing\n edges while a long tail of nodes only have a small amount of outgoing edges. If a batch\n contains a node with 1000 outgoing edges while the rest of the nodes have a maximum of\n 5 outgoing edges, we need to pad the action spaces of all nodes to 1000, which consumes\n lots of memory.\n\n With the bucketing approach, each bucket is padded separately. In this case the node\n with 1000 outgoing edges will be in its own bucket and the rest of the nodes will suffer\n little from padding the action space to 5.\n\n Once we grouped the action spaces in buckets, the policy network computation is carried\n out for every bucket iteratively. Once all the computation is done, we concatenate the\n results of all buckets and restore their original order in the batch. The computation\n outside the policy network module is thus unaffected.\n\n :return db_action_spaces:\n [((r_space_b0, r_space_b0), action_mask_b0),\n ((r_space_b1, r_space_b1), action_mask_b1),\n ...\n ((r_space_bn, r_space_bn), action_mask_bn)]\n\n A list of action space tensor representations grouped in n buckets, s.t.\n r_space_b0.size(0) + r_space_b1.size(0) + ... + r_space_bn.size(0) = e.size(0)\n\n :return db_references:\n [l_batch_refs0, l_batch_refs1, ..., l_batch_refsn]\n l_batch_refsi stores the indices of the examples in bucket i in the current batch,\n which is used later to restore the output results to the original order.\n \"\"\"\n db_action_spaces, db_references = [], []\n assert not collapse_entities\n bucket_ids, inbucket_ids = kg.get_bucket_and_inbucket_ids(\n current_entity)\n for b_key in set(bucket_ids.tolist()):\n inthisbucket_indices = torch.nonzero(bucket_ids.eq(b_key)).squeeze(\n ).tolist()\n if not isinstance(inthisbucket_indices, list):\n inthisbucket_indices = [inthisbucket_indices]\n inbucket_ids_of_entities_inthisbucket = inbucket_ids[\n inthisbucket_indices].tolist()\n bucket_action_space = kg.bucketid2ActionSpace[b_key]\n e_b = current_entity[inthisbucket_indices]\n obs_b = obs.get_slice(inthisbucket_indices)\n as_bucket = bucket_action_space.get_slice(\n inbucket_ids_of_entities_inthisbucket)\n action_mask = self.apply_action_masks(as_bucket, e_b, obs_b, kg)\n action_space_b = ActionSpace(as_bucket.forks, as_bucket.r_space,\n as_bucket.e_space, action_mask)\n db_action_spaces.append(action_space_b)\n db_references.append(inthisbucket_indices)\n return db_action_spaces, db_references\n\n def apply_action_masks(self, acsp: ActionSpace, e, obs: Observation, kg:\n KnowledgeGraph):\n r_space, e_space, action_mask = (acsp.r_space, acsp.e_space, acsp.\n action_mask)\n e_s, q, e_t, last_step, last_r, seen_nodes = obs\n ground_truth_edge_mask = self.get_ground_truth_edge_mask(e, r_space,\n e_space, obs, kg)\n action_mask -= ground_truth_edge_mask\n self.validate_action_mask(action_mask)\n if last_step:\n false_negative_mask = self.get_false_negative_mask(e_space, e_s,\n q, e_t, kg)\n action_mask *= 1 - false_negative_mask\n self.validate_action_mask(action_mask)\n return action_mask\n\n def get_ground_truth_edge_mask(self, current_nodes, r_space, e_space,\n obs: Observation, kg: KnowledgeGraph):\n s_e = obs.source_entity\n t_e = obs.target_entity\n q = obs.query_relation\n\n def build_mask(source_nodes, target_nodes, relation):\n return (current_nodes == source_nodes).unsqueeze(1) * (r_space ==\n relation.unsqueeze(1)) * (e_space == target_nodes.unsqueeze(1))\n mask = build_mask(s_e, t_e, q)\n inv_q = kg.get_inv_relation_id(q)\n inv_mask = build_mask(t_e, s_e, inv_q)\n return ((mask + inv_mask) * (s_e.unsqueeze(1) != kg.dummy_e)).float()\n\n def get_answer_mask(self, e_space, e_s, q, kg: KnowledgeGraph):\n if kg.args.mask_test_false_negatives:\n answer_vectors = kg.all_object_vectors\n else:\n answer_vectors = kg.train_object_vectors\n answer_masks = []\n for i in range(len(e_space)):\n _e_s, _q = int(e_s[i]), int(q[i])\n if not _e_s in answer_vectors or not _q in answer_vectors[_e_s]:\n answer_vector = var_cuda(torch.LongTensor([[kg.num_entities]]))\n else:\n answer_vector = answer_vectors[_e_s][_q]\n answer_mask = torch.sum(e_space[i].unsqueeze(0) ==\n answer_vector, dim=0).long()\n answer_masks.append(answer_mask)\n answer_mask = torch.cat(answer_masks).view(len(e_space), -1)\n return answer_mask\n\n def get_false_negative_mask(self, e_space, e_s, q, e_t, kg: KnowledgeGraph\n ):\n answer_mask = self.get_answer_mask(e_space, e_s, q, kg)\n false_negative_mask = (answer_mask * (e_space != e_t.unsqueeze(1)).\n long()).float()\n return false_negative_mask\n\n def validate_action_mask(self, action_mask):\n action_mask_min = action_mask.min()\n action_mask_max = action_mask.max()\n assert action_mask_min == 0 or action_mask_min == 1\n assert action_mask_max == 0 or action_mask_max == 1\n\n def get_action_embedding(self, action: Action, kg: KnowledgeGraph):\n \"\"\"\n Return (batch) action embedding which is the concatenation of the embeddings of\n the traversed edge and the target node.\n\n :param action (r, e):\n (Variable:batch) indices of the most recent action\n - r is the most recently traversed edge\n - e is the destination entity.\n :param kg: Knowledge graph enviroment.\n \"\"\"\n relation_embedding = kg.get_relation_embeddings(action.rel)\n if self.relation_only:\n action_embedding = relation_embedding\n else:\n entity_embedding = kg.get_entity_embeddings(action.ent)\n action_embedding = torch.cat([relation_embedding,\n entity_embedding], dim=-1)\n return action_embedding\n\n def define_modules(self):\n if self.relation_only:\n input_dim = self.history_dim + self.relation_dim\n elif self.relation_only_in_path:\n input_dim = (self.history_dim + self.entity_dim * 2 + self.\n relation_dim)\n else:\n input_dim = self.history_dim + self.entity_dim + self.relation_dim\n self.W1 = nn.Linear(input_dim, self.action_dim)\n self.W2 = nn.Linear(self.action_dim, self.action_dim)\n self.W1Dropout = nn.Dropout(p=self.ff_dropout_rate)\n self.W2Dropout = nn.Dropout(p=self.ff_dropout_rate)\n if self.relation_only_in_path:\n self.path_encoder = nn.LSTM(input_size=self.relation_dim,\n hidden_size=self.history_dim, num_layers=self.\n history_num_layers, batch_first=True)\n else:\n self.path_encoder = nn.LSTM(input_size=self.action_dim,\n hidden_size=self.history_dim, num_layers=self.\n history_num_layers, batch_first=True)\n\n def initialize_modules(self):\n if self.xavier_initialization:\n nn.init.xavier_uniform_(self.W1.weight)\n nn.init.xavier_uniform_(self.W2.weight)\n for name, param in self.path_encoder.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0.0)\n elif 'weight' in name:\n nn.init.xavier_normal_(param)\n",
"step-4": "<mask token>\n\n\nclass BucketActions(NamedTuple):\n action_spaces: List[ActionSpace]\n action_dists: List[torch.Tensor]\n inv_offset: Union[List[int], None]\n entropy: torch.Tensor\n\n\ndef pad_and_cat_action_space(action_spaces: List[ActionSpace], inv_offset,\n kg: KnowledgeGraph):\n db_r_space, db_e_space, db_action_mask = [], [], []\n forks = []\n for acsp in action_spaces:\n forks += acsp.forks\n db_r_space.append(acsp.r_space)\n db_e_space.append(acsp.e_space)\n db_action_mask.append(acsp.action_mask)\n r_space = ops.pad_and_cat(db_r_space, padding_value=kg.dummy_r)[inv_offset]\n e_space = ops.pad_and_cat(db_e_space, padding_value=kg.dummy_e)[inv_offset]\n action_mask = ops.pad_and_cat(db_action_mask, padding_value=0)[inv_offset]\n action_space = ActionSpace(forks, r_space, e_space, action_mask)\n return action_space\n\n\nclass GraphWalkAgent(nn.Module):\n\n def __init__(self, args):\n super(GraphWalkAgent, self).__init__()\n self.model = args.model\n self.relation_only = args.relation_only\n self.history_dim = args.history_dim\n self.history_num_layers = args.history_num_layers\n self.entity_dim = args.entity_dim\n self.relation_dim = args.relation_dim\n if self.relation_only:\n self.action_dim = args.relation_dim\n else:\n self.action_dim = args.entity_dim + args.relation_dim\n self.ff_dropout_rate = args.ff_dropout_rate\n self.rnn_dropout_rate = args.rnn_dropout_rate\n self.action_dropout_rate = args.action_dropout_rate\n self.xavier_initialization = args.xavier_initialization\n self.relation_only_in_path = args.relation_only_in_path\n self.path = None\n self.define_modules()\n self.initialize_modules()\n self.fn = None\n self.fn_kg = None\n\n def transit(self, current_entity, obs: Observation, kg: KnowledgeGraph,\n use_action_space_bucketing=True, merge_aspace_batching_outcome=False\n ) ->BucketActions:\n \"\"\"\n Compute the next action distribution based on\n (a) the current node (entity) in KG and the query relation\n (b) action history representation\n :param current_entity: agent location (node) at step t.\n :param obs: agent observation at step t.\n e_s: source node\n query_relation: query relation\n last_step: If set, the agent is carrying out the last step.\n last_r: label of edge traversed in the previous step\n seen_nodes: notes seen on the paths\n :param kg: Knowledge graph environment.\n :param use_action_space_bucketing: If set, group the action space of different nodes \n into buckets by their sizes.\n :param merge_aspace_batch_outcome: If set, merge the transition probability distribution\n generated of different action space bucket into a single batch.\n :return\n With aspace batching and without merging the outcomes:\n db_outcomes: (Dynamic Batch) (action_space, action_dist)\n action_space: (Batch) padded possible action indices\n action_dist: (Batch) distribution over actions.\n inv_offset: Indices to set the dynamic batching output back to the original order.\n entropy: (Batch) entropy of action distribution.\n Else:\n action_dist: (Batch) distribution over actions.\n entropy: (Batch) entropy of action distribution.\n \"\"\"\n X = self.encode_history(current_entity, obs.source_entity, kg, obs.\n query_relation)\n X = self.W1(X)\n X = F.relu(X)\n X = self.W1Dropout(X)\n X = self.W2(X)\n X2 = self.W2Dropout(X)\n\n def policy_nn_fun(X2, acs: ActionSpace):\n A = self.get_action_embedding(Action(acs.r_space, acs.e_space), kg)\n action_dist = F.softmax(torch.squeeze(A @ torch.unsqueeze(X2, 2\n ), 2) - (1 - acs.action_mask) * ops.HUGE_INT, dim=-1)\n return action_dist, ops.entropy(action_dist)\n if use_action_space_bucketing:\n action = self.do_it_with_bucketing(X2, current_entity, kg,\n merge_aspace_batching_outcome, obs, policy_nn_fun)\n else:\n assert False\n action = self.do_it_without_bucketing(X2, current_entity, kg,\n obs, policy_nn_fun)\n return action\n\n def encode_history(self, current_entity, e_s, kg, query_relation):\n embedded_q_rel = kg.get_relation_embeddings(query_relation)\n encoded_history = self.path[-1][0][-1, :, :]\n if self.relation_only:\n X = torch.cat([encoded_history, embedded_q_rel], dim=-1)\n elif self.relation_only_in_path:\n E_s = kg.get_entity_embeddings(e_s)\n E = kg.get_entity_embeddings(current_entity)\n X = torch.cat([E, encoded_history, E_s, embedded_q_rel], dim=-1)\n else:\n E = kg.get_entity_embeddings(current_entity)\n X = torch.cat([E, encoded_history, embedded_q_rel], dim=-1)\n return X\n\n def do_it_with_bucketing(self, X2, current_entity, kg,\n merge_aspace_batching_outcome, obs: Observation, policy_nn_fun):\n entropy_list = []\n references = []\n buckect_action_spaces, inthis_bucket_indizes = (self.\n get_action_space_in_buckets(current_entity, obs, kg))\n action_spaces = []\n action_dists = []\n for as_b, inthis_bucket in zip(buckect_action_spaces,\n inthis_bucket_indizes):\n X2_b = X2[inthis_bucket, :]\n action_dist_b, entropy_b = policy_nn_fun(X2_b, as_b)\n references.extend(inthis_bucket)\n action_spaces.append(as_b)\n action_dists.append(action_dist_b)\n entropy_list.append(entropy_b)\n inv_offset = [i for i, _ in sorted(enumerate(references), key=lambda\n x: x[1])]\n entropy = torch.cat(entropy_list, dim=0)[inv_offset]\n action = BucketActions(action_spaces, action_dists, inv_offset, entropy\n )\n if merge_aspace_batching_outcome:\n action_space = pad_and_cat_action_space(buckect_action_spaces,\n inv_offset, kg)\n action_dist = ops.pad_and_cat(action.action_dists, padding_value=0\n )[inv_offset]\n action = BucketActions([action_space], [action_dist], None, entropy\n )\n return action\n\n def initialize_path(self, action: Action, kg: KnowledgeGraph):\n if self.relation_only_in_path:\n init_action_embedding = kg.get_relation_embeddings(action.rel)\n else:\n init_action_embedding = self.get_action_embedding(action, kg)\n init_action_embedding.unsqueeze_(1)\n init_h = zeros_var_cuda([self.history_num_layers, len(\n init_action_embedding), self.history_dim])\n init_c = zeros_var_cuda([self.history_num_layers, len(\n init_action_embedding), self.history_dim])\n self.path = [self.path_encoder(init_action_embedding, (init_h,\n init_c))[1]]\n\n def update_path(self, action: Action, kg: KnowledgeGraph, offset=None):\n \"\"\"\n Once an action was selected, update the action history.\n :param action (r, e): (Variable:batch) indices of the most recent action\n - r is the most recently traversed edge;\n - e is the destination entity.\n :param offset: (Variable:batch) if None, adjust path history with the given offset, used for search\n :param KG: Knowledge graph environment.\n \"\"\"\n\n def offset_path_history(p, offset):\n for i, x in enumerate(p):\n if type(x) is tuple:\n new_tuple = tuple([_x[:, offset, :] for _x in x])\n p[i] = new_tuple\n else:\n p[i] = x[offset, :]\n if self.relation_only_in_path:\n action_embedding = kg.get_relation_embeddings(action.rel)\n else:\n action_embedding = self.get_action_embedding(action, kg)\n if offset is not None:\n offset_path_history(self.path, offset)\n self.path.append(self.path_encoder(action_embedding.unsqueeze(1),\n self.path[-1])[1])\n\n def get_action_space_in_buckets(self, current_entity: torch.Tensor, obs:\n Observation, kg: KnowledgeGraph, collapse_entities=False):\n \"\"\"\n To compute the search operation in batch, we group the action spaces of different states\n (i.e. the set of outgoing edges of different nodes) into buckets based on their sizes to\n save the memory consumption of paddings.\n\n For example, in large knowledge graphs, certain nodes may have thousands of outgoing\n edges while a long tail of nodes only have a small amount of outgoing edges. If a batch\n contains a node with 1000 outgoing edges while the rest of the nodes have a maximum of\n 5 outgoing edges, we need to pad the action spaces of all nodes to 1000, which consumes\n lots of memory.\n\n With the bucketing approach, each bucket is padded separately. In this case the node\n with 1000 outgoing edges will be in its own bucket and the rest of the nodes will suffer\n little from padding the action space to 5.\n\n Once we grouped the action spaces in buckets, the policy network computation is carried\n out for every bucket iteratively. Once all the computation is done, we concatenate the\n results of all buckets and restore their original order in the batch. The computation\n outside the policy network module is thus unaffected.\n\n :return db_action_spaces:\n [((r_space_b0, r_space_b0), action_mask_b0),\n ((r_space_b1, r_space_b1), action_mask_b1),\n ...\n ((r_space_bn, r_space_bn), action_mask_bn)]\n\n A list of action space tensor representations grouped in n buckets, s.t.\n r_space_b0.size(0) + r_space_b1.size(0) + ... + r_space_bn.size(0) = e.size(0)\n\n :return db_references:\n [l_batch_refs0, l_batch_refs1, ..., l_batch_refsn]\n l_batch_refsi stores the indices of the examples in bucket i in the current batch,\n which is used later to restore the output results to the original order.\n \"\"\"\n db_action_spaces, db_references = [], []\n assert not collapse_entities\n bucket_ids, inbucket_ids = kg.get_bucket_and_inbucket_ids(\n current_entity)\n for b_key in set(bucket_ids.tolist()):\n inthisbucket_indices = torch.nonzero(bucket_ids.eq(b_key)).squeeze(\n ).tolist()\n if not isinstance(inthisbucket_indices, list):\n inthisbucket_indices = [inthisbucket_indices]\n inbucket_ids_of_entities_inthisbucket = inbucket_ids[\n inthisbucket_indices].tolist()\n bucket_action_space = kg.bucketid2ActionSpace[b_key]\n e_b = current_entity[inthisbucket_indices]\n obs_b = obs.get_slice(inthisbucket_indices)\n as_bucket = bucket_action_space.get_slice(\n inbucket_ids_of_entities_inthisbucket)\n action_mask = self.apply_action_masks(as_bucket, e_b, obs_b, kg)\n action_space_b = ActionSpace(as_bucket.forks, as_bucket.r_space,\n as_bucket.e_space, action_mask)\n db_action_spaces.append(action_space_b)\n db_references.append(inthisbucket_indices)\n return db_action_spaces, db_references\n\n def apply_action_masks(self, acsp: ActionSpace, e, obs: Observation, kg:\n KnowledgeGraph):\n r_space, e_space, action_mask = (acsp.r_space, acsp.e_space, acsp.\n action_mask)\n e_s, q, e_t, last_step, last_r, seen_nodes = obs\n ground_truth_edge_mask = self.get_ground_truth_edge_mask(e, r_space,\n e_space, obs, kg)\n action_mask -= ground_truth_edge_mask\n self.validate_action_mask(action_mask)\n if last_step:\n false_negative_mask = self.get_false_negative_mask(e_space, e_s,\n q, e_t, kg)\n action_mask *= 1 - false_negative_mask\n self.validate_action_mask(action_mask)\n return action_mask\n\n def get_ground_truth_edge_mask(self, current_nodes, r_space, e_space,\n obs: Observation, kg: KnowledgeGraph):\n s_e = obs.source_entity\n t_e = obs.target_entity\n q = obs.query_relation\n\n def build_mask(source_nodes, target_nodes, relation):\n return (current_nodes == source_nodes).unsqueeze(1) * (r_space ==\n relation.unsqueeze(1)) * (e_space == target_nodes.unsqueeze(1))\n mask = build_mask(s_e, t_e, q)\n inv_q = kg.get_inv_relation_id(q)\n inv_mask = build_mask(t_e, s_e, inv_q)\n return ((mask + inv_mask) * (s_e.unsqueeze(1) != kg.dummy_e)).float()\n\n def get_answer_mask(self, e_space, e_s, q, kg: KnowledgeGraph):\n if kg.args.mask_test_false_negatives:\n answer_vectors = kg.all_object_vectors\n else:\n answer_vectors = kg.train_object_vectors\n answer_masks = []\n for i in range(len(e_space)):\n _e_s, _q = int(e_s[i]), int(q[i])\n if not _e_s in answer_vectors or not _q in answer_vectors[_e_s]:\n answer_vector = var_cuda(torch.LongTensor([[kg.num_entities]]))\n else:\n answer_vector = answer_vectors[_e_s][_q]\n answer_mask = torch.sum(e_space[i].unsqueeze(0) ==\n answer_vector, dim=0).long()\n answer_masks.append(answer_mask)\n answer_mask = torch.cat(answer_masks).view(len(e_space), -1)\n return answer_mask\n\n def get_false_negative_mask(self, e_space, e_s, q, e_t, kg: KnowledgeGraph\n ):\n answer_mask = self.get_answer_mask(e_space, e_s, q, kg)\n false_negative_mask = (answer_mask * (e_space != e_t.unsqueeze(1)).\n long()).float()\n return false_negative_mask\n\n def validate_action_mask(self, action_mask):\n action_mask_min = action_mask.min()\n action_mask_max = action_mask.max()\n assert action_mask_min == 0 or action_mask_min == 1\n assert action_mask_max == 0 or action_mask_max == 1\n\n def get_action_embedding(self, action: Action, kg: KnowledgeGraph):\n \"\"\"\n Return (batch) action embedding which is the concatenation of the embeddings of\n the traversed edge and the target node.\n\n :param action (r, e):\n (Variable:batch) indices of the most recent action\n - r is the most recently traversed edge\n - e is the destination entity.\n :param kg: Knowledge graph enviroment.\n \"\"\"\n relation_embedding = kg.get_relation_embeddings(action.rel)\n if self.relation_only:\n action_embedding = relation_embedding\n else:\n entity_embedding = kg.get_entity_embeddings(action.ent)\n action_embedding = torch.cat([relation_embedding,\n entity_embedding], dim=-1)\n return action_embedding\n\n def define_modules(self):\n if self.relation_only:\n input_dim = self.history_dim + self.relation_dim\n elif self.relation_only_in_path:\n input_dim = (self.history_dim + self.entity_dim * 2 + self.\n relation_dim)\n else:\n input_dim = self.history_dim + self.entity_dim + self.relation_dim\n self.W1 = nn.Linear(input_dim, self.action_dim)\n self.W2 = nn.Linear(self.action_dim, self.action_dim)\n self.W1Dropout = nn.Dropout(p=self.ff_dropout_rate)\n self.W2Dropout = nn.Dropout(p=self.ff_dropout_rate)\n if self.relation_only_in_path:\n self.path_encoder = nn.LSTM(input_size=self.relation_dim,\n hidden_size=self.history_dim, num_layers=self.\n history_num_layers, batch_first=True)\n else:\n self.path_encoder = nn.LSTM(input_size=self.action_dim,\n hidden_size=self.history_dim, num_layers=self.\n history_num_layers, batch_first=True)\n\n def initialize_modules(self):\n if self.xavier_initialization:\n nn.init.xavier_uniform_(self.W1.weight)\n nn.init.xavier_uniform_(self.W2.weight)\n for name, param in self.path_encoder.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0.0)\n elif 'weight' in name:\n nn.init.xavier_normal_(param)\n",
"step-5": "\"\"\"\n Copyright (c) 2018, salesforce.com, inc.\n All rights reserved.\n SPDX-License-Identifier: BSD-3-Clause\n For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause\n \n Graph Search Policy Network.\n\"\"\"\nfrom typing import List, NamedTuple, Union\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport src.utils.ops as ops\nfrom src.knowledge_graph import KnowledgeGraph, ActionSpace, Observation, Action\nfrom src.utils.ops import var_cuda, zeros_var_cuda\n\n\nclass BucketActions(NamedTuple):\n action_spaces: List[ActionSpace]\n action_dists: List[torch.Tensor]\n inv_offset: Union[List[int], None]\n entropy: torch.Tensor\n\n\ndef pad_and_cat_action_space(\n action_spaces: List[ActionSpace], inv_offset, kg: KnowledgeGraph\n):\n db_r_space, db_e_space, db_action_mask = [], [], []\n forks = []\n for acsp in action_spaces:\n forks += acsp.forks\n db_r_space.append(acsp.r_space)\n db_e_space.append(acsp.e_space)\n db_action_mask.append(acsp.action_mask)\n r_space = ops.pad_and_cat(db_r_space, padding_value=kg.dummy_r)[inv_offset]\n e_space = ops.pad_and_cat(db_e_space, padding_value=kg.dummy_e)[inv_offset]\n action_mask = ops.pad_and_cat(db_action_mask, padding_value=0)[inv_offset]\n action_space = ActionSpace(forks, r_space, e_space, action_mask)\n return action_space\n\n\nclass GraphWalkAgent(nn.Module):\n def __init__(self, args):\n super(GraphWalkAgent, self).__init__()\n self.model = args.model\n self.relation_only = args.relation_only\n\n self.history_dim = args.history_dim\n self.history_num_layers = args.history_num_layers\n self.entity_dim = args.entity_dim\n self.relation_dim = args.relation_dim\n if self.relation_only:\n self.action_dim = args.relation_dim\n else:\n self.action_dim = args.entity_dim + args.relation_dim\n self.ff_dropout_rate = args.ff_dropout_rate\n self.rnn_dropout_rate = args.rnn_dropout_rate\n self.action_dropout_rate = args.action_dropout_rate\n\n self.xavier_initialization = args.xavier_initialization\n\n self.relation_only_in_path = args.relation_only_in_path\n self.path = None\n\n # Set policy network modules\n self.define_modules()\n self.initialize_modules()\n\n # Fact network modules\n self.fn = None\n self.fn_kg = None\n\n def transit(\n self,\n current_entity,\n obs: Observation,\n kg: KnowledgeGraph,\n use_action_space_bucketing=True,\n merge_aspace_batching_outcome=False,\n ) -> BucketActions:\n \"\"\"\n Compute the next action distribution based on\n (a) the current node (entity) in KG and the query relation\n (b) action history representation\n :param current_entity: agent location (node) at step t.\n :param obs: agent observation at step t.\n e_s: source node\n query_relation: query relation\n last_step: If set, the agent is carrying out the last step.\n last_r: label of edge traversed in the previous step\n seen_nodes: notes seen on the paths\n :param kg: Knowledge graph environment.\n :param use_action_space_bucketing: If set, group the action space of different nodes \n into buckets by their sizes.\n :param merge_aspace_batch_outcome: If set, merge the transition probability distribution\n generated of different action space bucket into a single batch.\n :return\n With aspace batching and without merging the outcomes:\n db_outcomes: (Dynamic Batch) (action_space, action_dist)\n action_space: (Batch) padded possible action indices\n action_dist: (Batch) distribution over actions.\n inv_offset: Indices to set the dynamic batching output back to the original order.\n entropy: (Batch) entropy of action distribution.\n Else:\n action_dist: (Batch) distribution over actions.\n entropy: (Batch) entropy of action distribution.\n \"\"\"\n\n # Representation of the current state (current node and other observations)\n X = self.encode_history(\n current_entity, obs.source_entity, kg, obs.query_relation\n )\n\n # MLP\n X = self.W1(X)\n X = F.relu(X)\n X = self.W1Dropout(X)\n X = self.W2(X)\n X2 = self.W2Dropout(X)\n\n def policy_nn_fun(X2, acs: ActionSpace):\n A = self.get_action_embedding(Action(acs.r_space, acs.e_space), kg)\n action_dist = F.softmax(\n torch.squeeze(A @ torch.unsqueeze(X2, 2), 2)\n - (1 - acs.action_mask) * ops.HUGE_INT,\n dim=-1,\n )\n # action_dist = ops.weighted_softmax(torch.squeeze(A @ torch.unsqueeze(X2, 2), 2), action_mask)\n return action_dist, ops.entropy(action_dist)\n\n if use_action_space_bucketing:\n action = self.do_it_with_bucketing(\n X2,\n current_entity,\n kg,\n merge_aspace_batching_outcome,\n obs,\n policy_nn_fun,\n )\n else:\n assert False\n action = self.do_it_without_bucketing(\n X2, current_entity, kg, obs, policy_nn_fun\n )\n\n return action\n\n def encode_history(self, current_entity, e_s, kg, query_relation):\n embedded_q_rel = kg.get_relation_embeddings(query_relation)\n encoded_history = self.path[-1][0][-1, :, :]\n if self.relation_only:\n X = torch.cat([encoded_history, embedded_q_rel], dim=-1)\n elif self.relation_only_in_path:\n E_s = kg.get_entity_embeddings(e_s)\n E = kg.get_entity_embeddings(current_entity)\n X = torch.cat([E, encoded_history, E_s, embedded_q_rel], dim=-1)\n else:\n E = kg.get_entity_embeddings(current_entity)\n X = torch.cat([E, encoded_history, embedded_q_rel], dim=-1)\n return X\n\n # def do_it_without_bucketing(self, X2, current_entity, kg, obs, policy_nn_fun):\n # def get_action_space(e, obs, kg):\n # r_space = kg.action_space[\"relation-space\"][e]\n # e_space = kg.action_space[\"entity-space\"][e]\n # action_mask = kg.action_space[\"action-mask\"][e]\n # return self.apply_action_masks(acsp, e, obs, kg)\n #\n # action_space = get_action_space(current_entity, obs, kg)\n # action_dist, entropy = policy_nn_fun(X2, action_space)\n # db_outcomes = [(action_space, action_dist)]\n # inv_offset = None\n # return db_outcomes, entropy, inv_offset\n\n def do_it_with_bucketing(\n self,\n X2,\n current_entity,\n kg,\n merge_aspace_batching_outcome,\n obs: Observation,\n policy_nn_fun,\n ):\n entropy_list = []\n references = []\n buckect_action_spaces, inthis_bucket_indizes = self.get_action_space_in_buckets(\n current_entity, obs, kg\n )\n action_spaces = []\n action_dists = []\n\n for as_b, inthis_bucket in zip(buckect_action_spaces, inthis_bucket_indizes):\n X2_b = X2[inthis_bucket, :]\n action_dist_b, entropy_b = policy_nn_fun(X2_b, as_b)\n references.extend(inthis_bucket)\n action_spaces.append(as_b)\n action_dists.append(action_dist_b)\n entropy_list.append(entropy_b)\n inv_offset = [i for i, _ in sorted(enumerate(references), key=lambda x: x[1])]\n entropy = torch.cat(entropy_list, dim=0)[inv_offset]\n action = BucketActions(action_spaces, action_dists, inv_offset, entropy)\n\n if merge_aspace_batching_outcome:\n action_space = pad_and_cat_action_space(\n buckect_action_spaces, inv_offset, kg\n )\n action_dist = ops.pad_and_cat(action.action_dists, padding_value=0)[\n inv_offset\n ]\n action = BucketActions([action_space], [action_dist], None, entropy)\n return action\n\n def initialize_path(self, action: Action, kg: KnowledgeGraph):\n # [batch_size, action_dim]\n if self.relation_only_in_path:\n init_action_embedding = kg.get_relation_embeddings(action.rel)\n else:\n init_action_embedding = self.get_action_embedding(action, kg)\n init_action_embedding.unsqueeze_(1)\n # [num_layers, batch_size, dim]\n init_h = zeros_var_cuda(\n [self.history_num_layers, len(init_action_embedding), self.history_dim]\n )\n init_c = zeros_var_cuda(\n [self.history_num_layers, len(init_action_embedding), self.history_dim]\n )\n self.path = [self.path_encoder(init_action_embedding, (init_h, init_c))[1]]\n\n def update_path(self, action: Action, kg: KnowledgeGraph, offset=None):\n \"\"\"\n Once an action was selected, update the action history.\n :param action (r, e): (Variable:batch) indices of the most recent action\n - r is the most recently traversed edge;\n - e is the destination entity.\n :param offset: (Variable:batch) if None, adjust path history with the given offset, used for search\n :param KG: Knowledge graph environment.\n \"\"\"\n\n def offset_path_history(p, offset):\n for i, x in enumerate(p):\n if type(x) is tuple:\n new_tuple = tuple([_x[:, offset, :] for _x in x])\n p[i] = new_tuple\n else:\n p[i] = x[offset, :]\n\n # update action history\n if self.relation_only_in_path:\n action_embedding = kg.get_relation_embeddings(action.rel)\n else:\n action_embedding = self.get_action_embedding(action, kg)\n if offset is not None:\n offset_path_history(self.path, offset)\n\n self.path.append(\n self.path_encoder(action_embedding.unsqueeze(1), self.path[-1])[1]\n )\n\n def get_action_space_in_buckets(\n self,\n current_entity: torch.Tensor,\n obs: Observation,\n kg: KnowledgeGraph,\n collapse_entities=False,\n ):\n \"\"\"\n To compute the search operation in batch, we group the action spaces of different states\n (i.e. the set of outgoing edges of different nodes) into buckets based on their sizes to\n save the memory consumption of paddings.\n\n For example, in large knowledge graphs, certain nodes may have thousands of outgoing\n edges while a long tail of nodes only have a small amount of outgoing edges. If a batch\n contains a node with 1000 outgoing edges while the rest of the nodes have a maximum of\n 5 outgoing edges, we need to pad the action spaces of all nodes to 1000, which consumes\n lots of memory.\n\n With the bucketing approach, each bucket is padded separately. In this case the node\n with 1000 outgoing edges will be in its own bucket and the rest of the nodes will suffer\n little from padding the action space to 5.\n\n Once we grouped the action spaces in buckets, the policy network computation is carried\n out for every bucket iteratively. Once all the computation is done, we concatenate the\n results of all buckets and restore their original order in the batch. The computation\n outside the policy network module is thus unaffected.\n\n :return db_action_spaces:\n [((r_space_b0, r_space_b0), action_mask_b0),\n ((r_space_b1, r_space_b1), action_mask_b1),\n ...\n ((r_space_bn, r_space_bn), action_mask_bn)]\n\n A list of action space tensor representations grouped in n buckets, s.t.\n r_space_b0.size(0) + r_space_b1.size(0) + ... + r_space_bn.size(0) = e.size(0)\n\n :return db_references:\n [l_batch_refs0, l_batch_refs1, ..., l_batch_refsn]\n l_batch_refsi stores the indices of the examples in bucket i in the current batch,\n which is used later to restore the output results to the original order.\n \"\"\"\n db_action_spaces, db_references = [], []\n assert not collapse_entities # NotImplementedError\n bucket_ids, inbucket_ids = kg.get_bucket_and_inbucket_ids(current_entity)\n\n for b_key in set(bucket_ids.tolist()):\n inthisbucket_indices = (\n torch.nonzero(bucket_ids.eq(b_key)).squeeze().tolist()\n )\n if not isinstance(inthisbucket_indices, list): # TODO(tilo) wtf!\n inthisbucket_indices = [inthisbucket_indices]\n\n inbucket_ids_of_entities_inthisbucket = inbucket_ids[\n inthisbucket_indices\n ].tolist()\n\n bucket_action_space = kg.bucketid2ActionSpace[b_key]\n\n e_b = current_entity[inthisbucket_indices]\n obs_b = obs.get_slice(inthisbucket_indices)\n\n as_bucket = bucket_action_space.get_slice(\n inbucket_ids_of_entities_inthisbucket\n )\n action_mask = self.apply_action_masks(as_bucket, e_b, obs_b, kg)\n action_space_b = ActionSpace(\n as_bucket.forks, as_bucket.r_space, as_bucket.e_space, action_mask\n )\n db_action_spaces.append(action_space_b)\n db_references.append(inthisbucket_indices)\n\n return db_action_spaces, db_references\n\n def apply_action_masks(\n self, acsp: ActionSpace, e, obs: Observation, kg: KnowledgeGraph\n ):\n r_space, e_space, action_mask = acsp.r_space, acsp.e_space, acsp.action_mask\n e_s, q, e_t, last_step, last_r, seen_nodes = obs\n\n # Prevent the agent from selecting the ground truth edge\n ground_truth_edge_mask = self.get_ground_truth_edge_mask(\n e, r_space, e_space, obs, kg\n )\n action_mask -= ground_truth_edge_mask\n self.validate_action_mask(action_mask)\n\n # Mask out false negatives in the final step\n if last_step:\n false_negative_mask = self.get_false_negative_mask(e_space, e_s, q, e_t, kg)\n action_mask *= 1 - false_negative_mask\n self.validate_action_mask(action_mask)\n\n # Prevent the agent from stopping in the middle of a path\n # stop_mask = (last_r == NO_OP_RELATION_ID).unsqueeze(1).float()\n # action_mask = (1 - stop_mask) * action_mask + stop_mask * (r_space == NO_OP_RELATION_ID).float()\n # Prevent loops\n # Note: avoid duplicate removal of self-loops\n # seen_nodes_b = seen_nodes[l_batch_refs]\n # loop_mask_b = (((seen_nodes_b.unsqueeze(1) == e_space.unsqueeze(2)).sum(2) > 0) *\n # (r_space != NO_OP_RELATION_ID)).float()\n # action_mask *= (1 - loop_mask_b)\n return action_mask\n\n def get_ground_truth_edge_mask(\n self, current_nodes, r_space, e_space, obs: Observation, kg: KnowledgeGraph\n ):\n s_e = obs.source_entity\n t_e = obs.target_entity\n q = obs.query_relation\n\n def build_mask(source_nodes, target_nodes, relation):\n return (\n (current_nodes == source_nodes).unsqueeze(1)\n * (r_space == relation.unsqueeze(1))\n * (e_space == target_nodes.unsqueeze(1))\n )\n\n mask = build_mask(s_e, t_e, q)\n inv_q = kg.get_inv_relation_id(q)\n inv_mask = build_mask(t_e, s_e, inv_q)\n return ((mask + inv_mask) * (s_e.unsqueeze(1) != kg.dummy_e)).float()\n\n def get_answer_mask(self, e_space, e_s, q, kg: KnowledgeGraph):\n if kg.args.mask_test_false_negatives:\n answer_vectors = kg.all_object_vectors\n else:\n answer_vectors = kg.train_object_vectors\n answer_masks = []\n for i in range(len(e_space)):\n _e_s, _q = int(e_s[i]), int(q[i])\n if not _e_s in answer_vectors or not _q in answer_vectors[_e_s]:\n answer_vector = var_cuda(torch.LongTensor([[kg.num_entities]]))\n else:\n answer_vector = answer_vectors[_e_s][_q]\n answer_mask = torch.sum(\n e_space[i].unsqueeze(0) == answer_vector, dim=0\n ).long()\n answer_masks.append(answer_mask)\n answer_mask = torch.cat(answer_masks).view(len(e_space), -1)\n return answer_mask\n\n def get_false_negative_mask(self, e_space, e_s, q, e_t, kg: KnowledgeGraph):\n answer_mask = self.get_answer_mask(e_space, e_s, q, kg)\n # This is a trick applied during training where we convert a multi-answer predction problem into several\n # single-answer prediction problems. By masking out the other answers in the training set, we are forcing\n # the agent to walk towards a particular answer.\n # This trick does not affect inference on the test set: at inference time the ground truth answer will not\n # appear in the answer mask. This can be checked by uncommenting the following assertion statement.\n # Note that the assertion statement can trigger in the last batch if you're using a batch_size > 1 since\n # we append dummy examples to the last batch to make it the required batch size.\n # The assertion statement will also trigger in the dev set inference of NELL-995 since we randomly\n # sampled the dev set from the training data.\n # assert(float((answer_mask * (e_space == e_t.unsqueeze(1)).long()).sum()) == 0)\n false_negative_mask = (\n answer_mask * (e_space != e_t.unsqueeze(1)).long()\n ).float()\n return false_negative_mask\n\n def validate_action_mask(self, action_mask):\n action_mask_min = action_mask.min()\n action_mask_max = action_mask.max()\n assert action_mask_min == 0 or action_mask_min == 1\n assert action_mask_max == 0 or action_mask_max == 1\n\n def get_action_embedding(self, action: Action, kg: KnowledgeGraph):\n \"\"\"\n Return (batch) action embedding which is the concatenation of the embeddings of\n the traversed edge and the target node.\n\n :param action (r, e):\n (Variable:batch) indices of the most recent action\n - r is the most recently traversed edge\n - e is the destination entity.\n :param kg: Knowledge graph enviroment.\n \"\"\"\n relation_embedding = kg.get_relation_embeddings(action.rel)\n if self.relation_only:\n action_embedding = relation_embedding\n else:\n entity_embedding = kg.get_entity_embeddings(action.ent)\n action_embedding = torch.cat([relation_embedding, entity_embedding], dim=-1)\n return action_embedding\n\n def define_modules(self):\n if self.relation_only:\n input_dim = self.history_dim + self.relation_dim\n elif self.relation_only_in_path:\n input_dim = self.history_dim + self.entity_dim * 2 + self.relation_dim\n else:\n input_dim = self.history_dim + self.entity_dim + self.relation_dim\n self.W1 = nn.Linear(input_dim, self.action_dim)\n self.W2 = nn.Linear(self.action_dim, self.action_dim)\n self.W1Dropout = nn.Dropout(p=self.ff_dropout_rate)\n self.W2Dropout = nn.Dropout(p=self.ff_dropout_rate)\n if self.relation_only_in_path:\n self.path_encoder = nn.LSTM(\n input_size=self.relation_dim,\n hidden_size=self.history_dim,\n num_layers=self.history_num_layers,\n batch_first=True,\n )\n else:\n self.path_encoder = nn.LSTM(\n input_size=self.action_dim,\n hidden_size=self.history_dim,\n num_layers=self.history_num_layers,\n batch_first=True,\n )\n\n def initialize_modules(self):\n if self.xavier_initialization:\n nn.init.xavier_uniform_(self.W1.weight)\n nn.init.xavier_uniform_(self.W2.weight)\n for name, param in self.path_encoder.named_parameters():\n if \"bias\" in name:\n nn.init.constant_(param, 0.0)\n elif \"weight\" in name:\n nn.init.xavier_normal_(param)\n",
"step-ids": [
10,
13,
16,
18,
20
]
}
|
[
10,
13,
16,
18,
20
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_data_dir_register():
register = register_path.DataDirRegister(namespace_to_data_dirs={'ns1':
[epath.Path('/path/ns1')]})
assert {'ns1'} == register.namespaces
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from etils import epath
from tensorflow_datasets.core.community import register_path
def test_data_dir_register():
register = register_path.DataDirRegister(namespace_to_data_dirs={'ns1':
[epath.Path('/path/ns1')]})
assert {'ns1'} == register.namespaces
<|reserved_special_token_1|>
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_datasets.core.community.register_path."""
from etils import epath
from tensorflow_datasets.core.community import register_path
def test_data_dir_register():
register = register_path.DataDirRegister(
namespace_to_data_dirs={'ns1': [epath.Path('/path/ns1')]})
assert {'ns1'} == register.namespaces
|
flexible
|
{
"blob_id": "ed65d7e0de3fc792753e34b77254bccc8cee6d66",
"index": 3657,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_data_dir_register():\n register = register_path.DataDirRegister(namespace_to_data_dirs={'ns1':\n [epath.Path('/path/ns1')]})\n assert {'ns1'} == register.namespaces\n",
"step-3": "<mask token>\nfrom etils import epath\nfrom tensorflow_datasets.core.community import register_path\n\n\ndef test_data_dir_register():\n register = register_path.DataDirRegister(namespace_to_data_dirs={'ns1':\n [epath.Path('/path/ns1')]})\n assert {'ns1'} == register.namespaces\n",
"step-4": "# coding=utf-8\n# Copyright 2022 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tensorflow_datasets.core.community.register_path.\"\"\"\n\nfrom etils import epath\nfrom tensorflow_datasets.core.community import register_path\n\n\ndef test_data_dir_register():\n register = register_path.DataDirRegister(\n namespace_to_data_dirs={'ns1': [epath.Path('/path/ns1')]})\n assert {'ns1'} == register.namespaces\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(TSA, V)
<|reserved_special_token_1|>
l, w, h = map(int, input().split())
TSA = 2 * (l * w + w * h + h * l)
V = l * w * h
print(TSA, V)
|
flexible
|
{
"blob_id": "d3382ead1d98ba2fb15fe3ea277430f1bb07131c",
"index": 2544,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(TSA, V)\n",
"step-3": "l, w, h = map(int, input().split())\nTSA = 2 * (l * w + w * h + h * l)\nV = l * w * h\nprint(TSA, V)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
'''
Created on 4 Oct 2016
@author: MetalInvest
'''
def isHammerHangman(high, low, open, close):
body = abs(open - close)
leg = min(open, close) - low
return leg / body >= 2.0 and high/max(open, close) <= 1.08
def isEngulfing(df, bottom = True):
open_0 = df['open'][-1]
close_0 = df['close'][-1]
open_1 = df['open'][-2]
close_1 = df['close'][-2]
body_0 = close_0 - open_0
body_1 = close_1 - open_1
if bottom:
return body_0 > 0 and body_1 < 0 and body_0 > abs(body_1)
else:
return body_0 < 0 and body_1 > 0 and abs(body_0) > body_1
def isDarkCloud():
pass
def isPiercing():
pass
def jap_candle_reversal(df, context):
# we check strong trend reversal reversal_pattern
index = 0.0
# hammer & hangman
if isHammerHangman(df['high'][-1], df['low'][-1], df['open'][-1], df['close'][-1]):
index += g.reversal_index
if isEngulfing(df):
index += g.reversal_index
return index
|
normal
|
{
"blob_id": "6e739c30b3e7c15bd90b74cfd5a1d6827e863a44",
"index": 4413,
"step-1": "<mask token>\n\n\ndef isHammerHangman(high, low, open, close):\n body = abs(open - close)\n leg = min(open, close) - low\n return leg / body >= 2.0 and high / max(open, close) <= 1.08\n\n\ndef isEngulfing(df, bottom=True):\n open_0 = df['open'][-1]\n close_0 = df['close'][-1]\n open_1 = df['open'][-2]\n close_1 = df['close'][-2]\n body_0 = close_0 - open_0\n body_1 = close_1 - open_1\n if bottom:\n return body_0 > 0 and body_1 < 0 and body_0 > abs(body_1)\n else:\n return body_0 < 0 and body_1 > 0 and abs(body_0) > body_1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef isHammerHangman(high, low, open, close):\n body = abs(open - close)\n leg = min(open, close) - low\n return leg / body >= 2.0 and high / max(open, close) <= 1.08\n\n\ndef isEngulfing(df, bottom=True):\n open_0 = df['open'][-1]\n close_0 = df['close'][-1]\n open_1 = df['open'][-2]\n close_1 = df['close'][-2]\n body_0 = close_0 - open_0\n body_1 = close_1 - open_1\n if bottom:\n return body_0 > 0 and body_1 < 0 and body_0 > abs(body_1)\n else:\n return body_0 < 0 and body_1 > 0 and abs(body_0) > body_1\n\n\n<mask token>\n\n\ndef jap_candle_reversal(df, context):\n index = 0.0\n if isHammerHangman(df['high'][-1], df['low'][-1], df['open'][-1], df[\n 'close'][-1]):\n index += g.reversal_index\n if isEngulfing(df):\n index += g.reversal_index\n return index\n",
"step-3": "<mask token>\n\n\ndef isHammerHangman(high, low, open, close):\n body = abs(open - close)\n leg = min(open, close) - low\n return leg / body >= 2.0 and high / max(open, close) <= 1.08\n\n\ndef isEngulfing(df, bottom=True):\n open_0 = df['open'][-1]\n close_0 = df['close'][-1]\n open_1 = df['open'][-2]\n close_1 = df['close'][-2]\n body_0 = close_0 - open_0\n body_1 = close_1 - open_1\n if bottom:\n return body_0 > 0 and body_1 < 0 and body_0 > abs(body_1)\n else:\n return body_0 < 0 and body_1 > 0 and abs(body_0) > body_1\n\n\n<mask token>\n\n\ndef isPiercing():\n pass\n\n\ndef jap_candle_reversal(df, context):\n index = 0.0\n if isHammerHangman(df['high'][-1], df['low'][-1], df['open'][-1], df[\n 'close'][-1]):\n index += g.reversal_index\n if isEngulfing(df):\n index += g.reversal_index\n return index\n",
"step-4": "<mask token>\n\n\ndef isHammerHangman(high, low, open, close):\n body = abs(open - close)\n leg = min(open, close) - low\n return leg / body >= 2.0 and high / max(open, close) <= 1.08\n\n\ndef isEngulfing(df, bottom=True):\n open_0 = df['open'][-1]\n close_0 = df['close'][-1]\n open_1 = df['open'][-2]\n close_1 = df['close'][-2]\n body_0 = close_0 - open_0\n body_1 = close_1 - open_1\n if bottom:\n return body_0 > 0 and body_1 < 0 and body_0 > abs(body_1)\n else:\n return body_0 < 0 and body_1 > 0 and abs(body_0) > body_1\n\n\ndef isDarkCloud():\n pass\n\n\ndef isPiercing():\n pass\n\n\ndef jap_candle_reversal(df, context):\n index = 0.0\n if isHammerHangman(df['high'][-1], df['low'][-1], df['open'][-1], df[\n 'close'][-1]):\n index += g.reversal_index\n if isEngulfing(df):\n index += g.reversal_index\n return index\n",
"step-5": "'''\r\nCreated on 4 Oct 2016\r\n\r\n@author: MetalInvest\r\n'''\r\n\r\ndef isHammerHangman(high, low, open, close):\r\n body = abs(open - close)\r\n leg = min(open, close) - low\r\n return leg / body >= 2.0 and high/max(open, close) <= 1.08\r\n \r\ndef isEngulfing(df, bottom = True):\r\n open_0 = df['open'][-1]\r\n close_0 = df['close'][-1]\r\n open_1 = df['open'][-2]\r\n close_1 = df['close'][-2]\r\n body_0 = close_0 - open_0\r\n body_1 = close_1 - open_1\r\n if bottom: \r\n return body_0 > 0 and body_1 < 0 and body_0 > abs(body_1)\r\n else:\r\n return body_0 < 0 and body_1 > 0 and abs(body_0) > body_1\r\n\r\ndef isDarkCloud():\r\n pass\r\n\r\ndef isPiercing():\r\n pass\r\n\r\ndef jap_candle_reversal(df, context):\r\n # we check strong trend reversal reversal_pattern\r\n index = 0.0\r\n # hammer & hangman\r\n if isHammerHangman(df['high'][-1], df['low'][-1], df['open'][-1], df['close'][-1]):\r\n index += g.reversal_index\r\n if isEngulfing(df):\r\n index += g.reversal_index\r\n return index",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/7/14 下午6:06
# @Author : Huang HUi
# @Site :
# @File : Longest Common Prefix.py
# @Software: PyCharm
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if len(strs)==0:
return ''
if len(strs)==1 :
return strs
res=[]
min_=strs[0]
for i in range(len(strs)):
if min_>strs[i]:
min_=strs[i]
for i in range(len(min_)):
count=0
for j in range(len(strs)):
if min_[i] in strs[j][i]:
count+=1
if count==len(strs):
res.append(min_[i])
else:
break
return ''.join(res)
if __name__ == '__main__':
a=["abc","abcc","asc","abcd"]
b=["c","c"]
print(Solution().longestCommonPrefix(b))
|
normal
|
{
"blob_id": "1aed8e92a31ee42a3a609123af927f7074598ec1",
"index": 1820,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n\n\n<mask token>\n",
"step-3": "class Solution(object):\n\n def longestCommonPrefix(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n if len(strs) == 0:\n return ''\n if len(strs) == 1:\n return strs\n res = []\n min_ = strs[0]\n for i in range(len(strs)):\n if min_ > strs[i]:\n min_ = strs[i]\n for i in range(len(min_)):\n count = 0\n for j in range(len(strs)):\n if min_[i] in strs[j][i]:\n count += 1\n if count == len(strs):\n res.append(min_[i])\n else:\n break\n return ''.join(res)\n\n\n<mask token>\n",
"step-4": "class Solution(object):\n\n def longestCommonPrefix(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n if len(strs) == 0:\n return ''\n if len(strs) == 1:\n return strs\n res = []\n min_ = strs[0]\n for i in range(len(strs)):\n if min_ > strs[i]:\n min_ = strs[i]\n for i in range(len(min_)):\n count = 0\n for j in range(len(strs)):\n if min_[i] in strs[j][i]:\n count += 1\n if count == len(strs):\n res.append(min_[i])\n else:\n break\n return ''.join(res)\n\n\nif __name__ == '__main__':\n a = ['abc', 'abcc', 'asc', 'abcd']\n b = ['c', 'c']\n print(Solution().longestCommonPrefix(b))\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/7/14 下午6:06\n# @Author : Huang HUi\n# @Site : \n# @File : Longest Common Prefix.py\n# @Software: PyCharm\n\nclass Solution(object):\n def longestCommonPrefix(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n if len(strs)==0:\n return ''\n if len(strs)==1 :\n return strs\n res=[]\n min_=strs[0]\n for i in range(len(strs)):\n if min_>strs[i]:\n min_=strs[i]\n for i in range(len(min_)):\n count=0\n for j in range(len(strs)):\n if min_[i] in strs[j][i]:\n count+=1\n if count==len(strs):\n res.append(min_[i])\n else:\n break\n return ''.join(res)\n\n\n\n\n\n\n\nif __name__ == '__main__':\n a=[\"abc\",\"abcc\",\"asc\",\"abcd\"]\n b=[\"c\",\"c\"]\n print(Solution().longestCommonPrefix(b))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 2.1.5 on 2019-01-21 22:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('phone_number', models.CharField(max_length=232, primary_key=True, serialize=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='customer', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Dish',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=232)),
('category', models.CharField(max_length=232)),
('picture', models.ImageField(upload_to='uploads/')),
('description', models.TextField(null=True)),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
],
),
migrations.CreateModel(
name='DishCount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.IntegerField(default=1)),
('dish', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Dish')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('meal_date_time', models.DateTimeField()),
('comment', models.TextField(max_length=232, null=True)),
('person_count', models.IntegerField(default=1)),
('status', models.IntegerField(choices=[('NEW', 1), ('IN PROGRESS', 2), ('READY TO MEAL', 3), ('FINISHED', 4)], default=1)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='main.Customer')),
('dishes', models.ManyToManyField(through='main.DishCount', to='main.Dish')),
],
),
migrations.CreateModel(
name='Restaurant',
fields=[
('name', models.CharField(max_length=232)),
('description', models.TextField(max_length=232)),
('picture', models.ImageField(upload_to='uploads/')),
('phone_number', models.CharField(max_length=232, primary_key=True, serialize=False)),
('coord_x', models.DecimalField(decimal_places=10, max_digits=40)),
('coord_y', models.DecimalField(decimal_places=10, max_digits=40)),
('dishes', models.ManyToManyField(to='main.Dish')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='order',
name='restaurant',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='main.Restaurant'),
),
migrations.AddField(
model_name='dishcount',
name='order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Order'),
),
]
|
normal
|
{
"blob_id": "a6cb7a134fb8480d344743bcb7bc8766146d256f",
"index": 8238,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Customer', fields=[(\n 'phone_number', models.CharField(max_length=232, primary_key=True,\n serialize=False)), ('user', models.OneToOneField(on_delete=django.\n db.models.deletion.CASCADE, related_name='customer', to=settings.\n AUTH_USER_MODEL))]), migrations.CreateModel(name='Dish', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('name', models.CharField(\n max_length=232)), ('category', models.CharField(max_length=232)), (\n 'picture', models.ImageField(upload_to='uploads/')), ('description',\n models.TextField(null=True)), ('price', models.DecimalField(\n decimal_places=2, max_digits=10))]), migrations.CreateModel(name=\n 'DishCount', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('count',\n models.IntegerField(default=1)), ('dish', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to='main.Dish'))]),\n migrations.CreateModel(name='Order', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('meal_date_time', models.DateTimeField()), (\n 'comment', models.TextField(max_length=232, null=True)), (\n 'person_count', models.IntegerField(default=1)), ('status', models.\n IntegerField(choices=[('NEW', 1), ('IN PROGRESS', 2), (\n 'READY TO MEAL', 3), ('FINISHED', 4)], default=1)), ('customer',\n models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING,\n to='main.Customer')), ('dishes', models.ManyToManyField(through=\n 'main.DishCount', to='main.Dish'))]), migrations.CreateModel(name=\n 'Restaurant', fields=[('name', models.CharField(max_length=232)), (\n 'description', models.TextField(max_length=232)), ('picture',\n models.ImageField(upload_to='uploads/')), ('phone_number', models.\n CharField(max_length=232, primary_key=True, serialize=False)), (\n 'coord_x', models.DecimalField(decimal_places=10, max_digits=40)),\n ('coord_y', models.DecimalField(decimal_places=10, max_digits=40)),\n ('dishes', models.ManyToManyField(to='main.Dish')), ('user', models\n .OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL))]), migrations.AddField(model_name=\n 'order', name='restaurant', field=models.ForeignKey(on_delete=\n django.db.models.deletion.DO_NOTHING, to='main.Restaurant')),\n migrations.AddField(model_name='dishcount', name='order', field=\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'main.Order'))]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Customer', fields=[(\n 'phone_number', models.CharField(max_length=232, primary_key=True,\n serialize=False)), ('user', models.OneToOneField(on_delete=django.\n db.models.deletion.CASCADE, related_name='customer', to=settings.\n AUTH_USER_MODEL))]), migrations.CreateModel(name='Dish', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('name', models.CharField(\n max_length=232)), ('category', models.CharField(max_length=232)), (\n 'picture', models.ImageField(upload_to='uploads/')), ('description',\n models.TextField(null=True)), ('price', models.DecimalField(\n decimal_places=2, max_digits=10))]), migrations.CreateModel(name=\n 'DishCount', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('count',\n models.IntegerField(default=1)), ('dish', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to='main.Dish'))]),\n migrations.CreateModel(name='Order', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('meal_date_time', models.DateTimeField()), (\n 'comment', models.TextField(max_length=232, null=True)), (\n 'person_count', models.IntegerField(default=1)), ('status', models.\n IntegerField(choices=[('NEW', 1), ('IN PROGRESS', 2), (\n 'READY TO MEAL', 3), ('FINISHED', 4)], default=1)), ('customer',\n models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING,\n to='main.Customer')), ('dishes', models.ManyToManyField(through=\n 'main.DishCount', to='main.Dish'))]), migrations.CreateModel(name=\n 'Restaurant', fields=[('name', models.CharField(max_length=232)), (\n 'description', models.TextField(max_length=232)), ('picture',\n models.ImageField(upload_to='uploads/')), ('phone_number', models.\n CharField(max_length=232, primary_key=True, serialize=False)), (\n 'coord_x', models.DecimalField(decimal_places=10, max_digits=40)),\n ('coord_y', models.DecimalField(decimal_places=10, max_digits=40)),\n ('dishes', models.ManyToManyField(to='main.Dish')), ('user', models\n .OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL))]), migrations.AddField(model_name=\n 'order', name='restaurant', field=models.ForeignKey(on_delete=\n django.db.models.deletion.DO_NOTHING, to='main.Restaurant')),\n migrations.AddField(model_name='dishcount', name='order', field=\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'main.Order'))]\n",
"step-5": "# Generated by Django 2.1.5 on 2019-01-21 22:51\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Customer',\n fields=[\n ('phone_number', models.CharField(max_length=232, primary_key=True, serialize=False)),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='customer', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Dish',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=232)),\n ('category', models.CharField(max_length=232)),\n ('picture', models.ImageField(upload_to='uploads/')),\n ('description', models.TextField(null=True)),\n ('price', models.DecimalField(decimal_places=2, max_digits=10)),\n ],\n ),\n migrations.CreateModel(\n name='DishCount',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('count', models.IntegerField(default=1)),\n ('dish', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Dish')),\n ],\n ),\n migrations.CreateModel(\n name='Order',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('meal_date_time', models.DateTimeField()),\n ('comment', models.TextField(max_length=232, null=True)),\n ('person_count', models.IntegerField(default=1)),\n ('status', models.IntegerField(choices=[('NEW', 1), ('IN PROGRESS', 2), ('READY TO MEAL', 3), ('FINISHED', 4)], default=1)),\n ('customer', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='main.Customer')),\n ('dishes', models.ManyToManyField(through='main.DishCount', to='main.Dish')),\n ],\n ),\n migrations.CreateModel(\n name='Restaurant',\n fields=[\n ('name', models.CharField(max_length=232)),\n ('description', models.TextField(max_length=232)),\n ('picture', models.ImageField(upload_to='uploads/')),\n ('phone_number', models.CharField(max_length=232, primary_key=True, serialize=False)),\n ('coord_x', models.DecimalField(decimal_places=10, max_digits=40)),\n ('coord_y', models.DecimalField(decimal_places=10, max_digits=40)),\n ('dishes', models.ManyToManyField(to='main.Dish')),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AddField(\n model_name='order',\n name='restaurant',\n field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='main.Restaurant'),\n ),\n migrations.AddField(\n model_name='dishcount',\n name='order',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Order'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def test_read_from_file():
"""
Test of function of reading data from file.
:return:
"""
reading_file = d.read_code_from_file()
assert type(reading_file) == list
assert len(reading_file) == 7
assert '\n' not in d.read_code_from_file()
def test_decode_from_base64():
"""
Test of function of decoding messages from base64 to utf-8.
:return:
"""
list_to_be_checked = []
list_of_truth = [')-.7)-\x06\x06AOO', '-57)-0\x06\x06JASJAOOASJ',
')07)2\x06\x06AJSAJAJOAJJAAO', '.7)/\x06\x06AJSSAJSJOOSSOOOS',
'-,70\x06\x06', ',7)-,\x06\x06OAASSOSOAAASAAAAA',
'.7).\x06\x06SOSAOJAOOO']
for x in d.read_code_from_file():
list_to_be_checked.append(d.decode_from_base64(x))
assert list_to_be_checked == list_of_truth
<|reserved_special_token_0|>
def test_decode():
"""
Test of function of decoding.
:return:
"""
decoding = d.decode()
assert type(decoding) == list
assert len(decoding) == 7
assert decoding[0] == '-12;-1\n\nESS'
assert decoding[-1] == '2;-2\n\nWSWESNESSS'
for x in decoding:
assert '\n' in x
def test_decode_messages():
"""
Test of function of decoding messages in SecretGarden class.
:return:
"""
decoding1 = d.decode()
decoding2 = s.decode_messages()
assert decoding1 == decoding2
decoding3 = SecretGarden(filename, 'HELLO, STUDENTS.').decode_messages()
assert decoding1 != decoding3
def test_find_secret_locations():
"""
Test of function of finding secret locations in SecretGarden class.
:return:
"""
list_of_random = [(random.Random(), random.Random()), (random.Random(),
random.Random()), (random.Random(), random.Random()), (random.
Random(), random.Random()), (random.Random(), random.Random()), (
random.Random(), random.Random()), (random.Random(), random.Random())]
list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6,
-13), (2, -6)]
secrets = s.find_secret_locations()
assert type(secrets) == list
for x in secrets:
assert type(x) == tuple
assert secrets == list_of_truth
assert list_of_random != secrets
assert len(list_of_random) == len(secrets)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_read_from_file():
"""
Test of function of reading data from file.
:return:
"""
reading_file = d.read_code_from_file()
assert type(reading_file) == list
assert len(reading_file) == 7
assert '\n' not in d.read_code_from_file()
def test_decode_from_base64():
"""
Test of function of decoding messages from base64 to utf-8.
:return:
"""
list_to_be_checked = []
list_of_truth = [')-.7)-\x06\x06AOO', '-57)-0\x06\x06JASJAOOASJ',
')07)2\x06\x06AJSAJAJOAJJAAO', '.7)/\x06\x06AJSSAJSJOOSSOOOS',
'-,70\x06\x06', ',7)-,\x06\x06OAASSOSOAAASAAAAA',
'.7).\x06\x06SOSAOJAOOO']
for x in d.read_code_from_file():
list_to_be_checked.append(d.decode_from_base64(x))
assert list_to_be_checked == list_of_truth
def test_calculate_cipher_step():
"""
Test of function of calculating the cipher step.
:return:
"""
given_value = d.calculate_cipher_step()
assert type(given_value) == int
assert given_value == 1016
new_decoder = Decoder(filename, 'HELLO THERE!')
new_value = new_decoder.calculate_cipher_step()
assert new_value != given_value
random_number = random.Random()
assert given_value != random_number
def test_decode():
"""
Test of function of decoding.
:return:
"""
decoding = d.decode()
assert type(decoding) == list
assert len(decoding) == 7
assert decoding[0] == '-12;-1\n\nESS'
assert decoding[-1] == '2;-2\n\nWSWESNESSS'
for x in decoding:
assert '\n' in x
def test_decode_messages():
"""
Test of function of decoding messages in SecretGarden class.
:return:
"""
decoding1 = d.decode()
decoding2 = s.decode_messages()
assert decoding1 == decoding2
decoding3 = SecretGarden(filename, 'HELLO, STUDENTS.').decode_messages()
assert decoding1 != decoding3
def test_find_secret_locations():
"""
Test of function of finding secret locations in SecretGarden class.
:return:
"""
list_of_random = [(random.Random(), random.Random()), (random.Random(),
random.Random()), (random.Random(), random.Random()), (random.
Random(), random.Random()), (random.Random(), random.Random()), (
random.Random(), random.Random()), (random.Random(), random.Random())]
list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6,
-13), (2, -6)]
secrets = s.find_secret_locations()
assert type(secrets) == list
for x in secrets:
assert type(x) == tuple
assert secrets == list_of_truth
assert list_of_random != secrets
assert len(list_of_random) == len(secrets)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
filename = 'pr08_example_data.txt'
key = 'Fat Chocobo'
d = Decoder(filename, key)
s = SecretGarden(filename, key)
def test_read_from_file():
"""
Test of function of reading data from file.
:return:
"""
reading_file = d.read_code_from_file()
assert type(reading_file) == list
assert len(reading_file) == 7
assert '\n' not in d.read_code_from_file()
def test_decode_from_base64():
"""
Test of function of decoding messages from base64 to utf-8.
:return:
"""
list_to_be_checked = []
list_of_truth = [')-.7)-\x06\x06AOO', '-57)-0\x06\x06JASJAOOASJ',
')07)2\x06\x06AJSAJAJOAJJAAO', '.7)/\x06\x06AJSSAJSJOOSSOOOS',
'-,70\x06\x06', ',7)-,\x06\x06OAASSOSOAAASAAAAA',
'.7).\x06\x06SOSAOJAOOO']
for x in d.read_code_from_file():
list_to_be_checked.append(d.decode_from_base64(x))
assert list_to_be_checked == list_of_truth
def test_calculate_cipher_step():
"""
Test of function of calculating the cipher step.
:return:
"""
given_value = d.calculate_cipher_step()
assert type(given_value) == int
assert given_value == 1016
new_decoder = Decoder(filename, 'HELLO THERE!')
new_value = new_decoder.calculate_cipher_step()
assert new_value != given_value
random_number = random.Random()
assert given_value != random_number
def test_decode():
"""
Test of function of decoding.
:return:
"""
decoding = d.decode()
assert type(decoding) == list
assert len(decoding) == 7
assert decoding[0] == '-12;-1\n\nESS'
assert decoding[-1] == '2;-2\n\nWSWESNESSS'
for x in decoding:
assert '\n' in x
def test_decode_messages():
"""
Test of function of decoding messages in SecretGarden class.
:return:
"""
decoding1 = d.decode()
decoding2 = s.decode_messages()
assert decoding1 == decoding2
decoding3 = SecretGarden(filename, 'HELLO, STUDENTS.').decode_messages()
assert decoding1 != decoding3
def test_find_secret_locations():
"""
Test of function of finding secret locations in SecretGarden class.
:return:
"""
list_of_random = [(random.Random(), random.Random()), (random.Random(),
random.Random()), (random.Random(), random.Random()), (random.
Random(), random.Random()), (random.Random(), random.Random()), (
random.Random(), random.Random()), (random.Random(), random.Random())]
list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6,
-13), (2, -6)]
secrets = s.find_secret_locations()
assert type(secrets) == list
for x in secrets:
assert type(x) == tuple
assert secrets == list_of_truth
assert list_of_random != secrets
assert len(list_of_random) == len(secrets)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from secret_garden import Decoder, SecretGarden
import random
filename = 'pr08_example_data.txt'
key = 'Fat Chocobo'
d = Decoder(filename, key)
s = SecretGarden(filename, key)
def test_read_from_file():
"""
Test of function of reading data from file.
:return:
"""
reading_file = d.read_code_from_file()
assert type(reading_file) == list
assert len(reading_file) == 7
assert '\n' not in d.read_code_from_file()
def test_decode_from_base64():
"""
Test of function of decoding messages from base64 to utf-8.
:return:
"""
list_to_be_checked = []
list_of_truth = [')-.7)-\x06\x06AOO', '-57)-0\x06\x06JASJAOOASJ',
')07)2\x06\x06AJSAJAJOAJJAAO', '.7)/\x06\x06AJSSAJSJOOSSOOOS',
'-,70\x06\x06', ',7)-,\x06\x06OAASSOSOAAASAAAAA',
'.7).\x06\x06SOSAOJAOOO']
for x in d.read_code_from_file():
list_to_be_checked.append(d.decode_from_base64(x))
assert list_to_be_checked == list_of_truth
def test_calculate_cipher_step():
"""
Test of function of calculating the cipher step.
:return:
"""
given_value = d.calculate_cipher_step()
assert type(given_value) == int
assert given_value == 1016
new_decoder = Decoder(filename, 'HELLO THERE!')
new_value = new_decoder.calculate_cipher_step()
assert new_value != given_value
random_number = random.Random()
assert given_value != random_number
def test_decode():
"""
Test of function of decoding.
:return:
"""
decoding = d.decode()
assert type(decoding) == list
assert len(decoding) == 7
assert decoding[0] == '-12;-1\n\nESS'
assert decoding[-1] == '2;-2\n\nWSWESNESSS'
for x in decoding:
assert '\n' in x
def test_decode_messages():
"""
Test of function of decoding messages in SecretGarden class.
:return:
"""
decoding1 = d.decode()
decoding2 = s.decode_messages()
assert decoding1 == decoding2
decoding3 = SecretGarden(filename, 'HELLO, STUDENTS.').decode_messages()
assert decoding1 != decoding3
def test_find_secret_locations():
"""
Test of function of finding secret locations in SecretGarden class.
:return:
"""
list_of_random = [(random.Random(), random.Random()), (random.Random(),
random.Random()), (random.Random(), random.Random()), (random.
Random(), random.Random()), (random.Random(), random.Random()), (
random.Random(), random.Random()), (random.Random(), random.Random())]
list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6,
-13), (2, -6)]
secrets = s.find_secret_locations()
assert type(secrets) == list
for x in secrets:
assert type(x) == tuple
assert secrets == list_of_truth
assert list_of_random != secrets
assert len(list_of_random) == len(secrets)
<|reserved_special_token_1|>
"""Secret Garden tests."""
from secret_garden import Decoder, SecretGarden
import random
filename = "pr08_example_data.txt"
key = "Fat Chocobo"
d = Decoder(filename, key)
s = SecretGarden(filename, key)
def test_read_from_file():
"""
Test of function of reading data from file.
:return:
"""
reading_file = d.read_code_from_file()
assert type(reading_file) == list
assert len(reading_file) == 7
assert "\n" not in d.read_code_from_file()
def test_decode_from_base64():
"""
Test of function of decoding messages from base64 to utf-8.
:return:
"""
list_to_be_checked = []
list_of_truth = [")-.7)-AOO", "-57)-0JASJAOOASJ", ")07)2AJSAJAJOAJJAAO", ".7)/AJSSAJSJOOSSOOOS",
"-,70", ",7)-,OAASSOSOAAASAAAAA", ".7).SOSAOJAOOO"]
for x in d.read_code_from_file():
list_to_be_checked.append(d.decode_from_base64(x))
assert list_to_be_checked == list_of_truth
def test_calculate_cipher_step():
"""
Test of function of calculating the cipher step.
:return:
"""
given_value = d.calculate_cipher_step()
assert type(given_value) == int
assert given_value == 1016
new_decoder = Decoder(filename, "HELLO THERE!")
new_value = new_decoder.calculate_cipher_step()
assert new_value != given_value
random_number = random.Random()
assert given_value != random_number
def test_decode():
"""
Test of function of decoding.
:return:
"""
decoding = d.decode()
assert type(decoding) == list
assert len(decoding) == 7
assert decoding[0] == '-12;-1\n\nESS'
assert decoding[-1] == '2;-2\n\nWSWESNESSS'
for x in decoding:
assert "\n" in x
def test_decode_messages():
"""
Test of function of decoding messages in SecretGarden class.
:return:
"""
decoding1 = d.decode()
decoding2 = s.decode_messages()
assert decoding1 == decoding2
decoding3 = SecretGarden(filename, "HELLO, STUDENTS.").decode_messages()
assert decoding1 != decoding3
def test_find_secret_locations():
"""
Test of function of finding secret locations in SecretGarden class.
:return:
"""
list_of_random = [(random.Random(), random.Random()), (random.Random(), random.Random()), (random.Random(),
random.Random()),
(random.Random(), random.Random()), (random.Random(), random.Random()),
(random.Random(), random.Random()), (random.Random(), random.Random())]
list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6, -13), (2, -6)]
secrets = s.find_secret_locations()
assert type(secrets) == list
for x in secrets:
assert type(x) == tuple
assert secrets == list_of_truth
assert list_of_random != secrets
assert len(list_of_random) == len(secrets)
|
flexible
|
{
"blob_id": "8cfab525ab3a86dd6964475d5621fdc7c6413e38",
"index": 8019,
"step-1": "<mask token>\n\n\ndef test_read_from_file():\n \"\"\"\n Test of function of reading data from file.\n\n :return:\n \"\"\"\n reading_file = d.read_code_from_file()\n assert type(reading_file) == list\n assert len(reading_file) == 7\n assert '\\n' not in d.read_code_from_file()\n\n\ndef test_decode_from_base64():\n \"\"\"\n Test of function of decoding messages from base64 to utf-8.\n\n :return:\n \"\"\"\n list_to_be_checked = []\n list_of_truth = [')-.7)-\\x06\\x06AOO', '-57)-0\\x06\\x06JASJAOOASJ',\n ')07)2\\x06\\x06AJSAJAJOAJJAAO', '.7)/\\x06\\x06AJSSAJSJOOSSOOOS',\n '-,70\\x06\\x06', ',7)-,\\x06\\x06OAASSOSOAAASAAAAA',\n '.7).\\x06\\x06SOSAOJAOOO']\n for x in d.read_code_from_file():\n list_to_be_checked.append(d.decode_from_base64(x))\n assert list_to_be_checked == list_of_truth\n\n\n<mask token>\n\n\ndef test_decode():\n \"\"\"\n Test of function of decoding.\n\n :return:\n \"\"\"\n decoding = d.decode()\n assert type(decoding) == list\n assert len(decoding) == 7\n assert decoding[0] == '-12;-1\\n\\nESS'\n assert decoding[-1] == '2;-2\\n\\nWSWESNESSS'\n for x in decoding:\n assert '\\n' in x\n\n\ndef test_decode_messages():\n \"\"\"\n Test of function of decoding messages in SecretGarden class.\n\n :return:\n \"\"\"\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, 'HELLO, STUDENTS.').decode_messages()\n assert decoding1 != decoding3\n\n\ndef test_find_secret_locations():\n \"\"\"\n Test of function of finding secret locations in SecretGarden class.\n\n :return:\n \"\"\"\n list_of_random = [(random.Random(), random.Random()), (random.Random(),\n random.Random()), (random.Random(), random.Random()), (random.\n Random(), random.Random()), (random.Random(), random.Random()), (\n random.Random(), random.Random()), (random.Random(), random.Random())]\n list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6, \n -13), (2, -6)]\n secrets = s.find_secret_locations()\n assert type(secrets) == list\n for x in secrets:\n assert type(x) == tuple\n assert secrets == list_of_truth\n assert list_of_random != secrets\n assert len(list_of_random) == len(secrets)\n",
"step-2": "<mask token>\n\n\ndef test_read_from_file():\n \"\"\"\n Test of function of reading data from file.\n\n :return:\n \"\"\"\n reading_file = d.read_code_from_file()\n assert type(reading_file) == list\n assert len(reading_file) == 7\n assert '\\n' not in d.read_code_from_file()\n\n\ndef test_decode_from_base64():\n \"\"\"\n Test of function of decoding messages from base64 to utf-8.\n\n :return:\n \"\"\"\n list_to_be_checked = []\n list_of_truth = [')-.7)-\\x06\\x06AOO', '-57)-0\\x06\\x06JASJAOOASJ',\n ')07)2\\x06\\x06AJSAJAJOAJJAAO', '.7)/\\x06\\x06AJSSAJSJOOSSOOOS',\n '-,70\\x06\\x06', ',7)-,\\x06\\x06OAASSOSOAAASAAAAA',\n '.7).\\x06\\x06SOSAOJAOOO']\n for x in d.read_code_from_file():\n list_to_be_checked.append(d.decode_from_base64(x))\n assert list_to_be_checked == list_of_truth\n\n\ndef test_calculate_cipher_step():\n \"\"\"\n Test of function of calculating the cipher step.\n\n :return:\n \"\"\"\n given_value = d.calculate_cipher_step()\n assert type(given_value) == int\n assert given_value == 1016\n new_decoder = Decoder(filename, 'HELLO THERE!')\n new_value = new_decoder.calculate_cipher_step()\n assert new_value != given_value\n random_number = random.Random()\n assert given_value != random_number\n\n\ndef test_decode():\n \"\"\"\n Test of function of decoding.\n\n :return:\n \"\"\"\n decoding = d.decode()\n assert type(decoding) == list\n assert len(decoding) == 7\n assert decoding[0] == '-12;-1\\n\\nESS'\n assert decoding[-1] == '2;-2\\n\\nWSWESNESSS'\n for x in decoding:\n assert '\\n' in x\n\n\ndef test_decode_messages():\n \"\"\"\n Test of function of decoding messages in SecretGarden class.\n\n :return:\n \"\"\"\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, 'HELLO, STUDENTS.').decode_messages()\n assert decoding1 != decoding3\n\n\ndef test_find_secret_locations():\n \"\"\"\n Test of function of finding secret locations in SecretGarden class.\n\n :return:\n \"\"\"\n list_of_random = [(random.Random(), random.Random()), (random.Random(),\n random.Random()), (random.Random(), random.Random()), (random.\n Random(), random.Random()), (random.Random(), random.Random()), (\n random.Random(), random.Random()), (random.Random(), random.Random())]\n list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6, \n -13), (2, -6)]\n secrets = s.find_secret_locations()\n assert type(secrets) == list\n for x in secrets:\n assert type(x) == tuple\n assert secrets == list_of_truth\n assert list_of_random != secrets\n assert len(list_of_random) == len(secrets)\n",
"step-3": "<mask token>\nfilename = 'pr08_example_data.txt'\nkey = 'Fat Chocobo'\nd = Decoder(filename, key)\ns = SecretGarden(filename, key)\n\n\ndef test_read_from_file():\n \"\"\"\n Test of function of reading data from file.\n\n :return:\n \"\"\"\n reading_file = d.read_code_from_file()\n assert type(reading_file) == list\n assert len(reading_file) == 7\n assert '\\n' not in d.read_code_from_file()\n\n\ndef test_decode_from_base64():\n \"\"\"\n Test of function of decoding messages from base64 to utf-8.\n\n :return:\n \"\"\"\n list_to_be_checked = []\n list_of_truth = [')-.7)-\\x06\\x06AOO', '-57)-0\\x06\\x06JASJAOOASJ',\n ')07)2\\x06\\x06AJSAJAJOAJJAAO', '.7)/\\x06\\x06AJSSAJSJOOSSOOOS',\n '-,70\\x06\\x06', ',7)-,\\x06\\x06OAASSOSOAAASAAAAA',\n '.7).\\x06\\x06SOSAOJAOOO']\n for x in d.read_code_from_file():\n list_to_be_checked.append(d.decode_from_base64(x))\n assert list_to_be_checked == list_of_truth\n\n\ndef test_calculate_cipher_step():\n \"\"\"\n Test of function of calculating the cipher step.\n\n :return:\n \"\"\"\n given_value = d.calculate_cipher_step()\n assert type(given_value) == int\n assert given_value == 1016\n new_decoder = Decoder(filename, 'HELLO THERE!')\n new_value = new_decoder.calculate_cipher_step()\n assert new_value != given_value\n random_number = random.Random()\n assert given_value != random_number\n\n\ndef test_decode():\n \"\"\"\n Test of function of decoding.\n\n :return:\n \"\"\"\n decoding = d.decode()\n assert type(decoding) == list\n assert len(decoding) == 7\n assert decoding[0] == '-12;-1\\n\\nESS'\n assert decoding[-1] == '2;-2\\n\\nWSWESNESSS'\n for x in decoding:\n assert '\\n' in x\n\n\ndef test_decode_messages():\n \"\"\"\n Test of function of decoding messages in SecretGarden class.\n\n :return:\n \"\"\"\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, 'HELLO, STUDENTS.').decode_messages()\n assert decoding1 != decoding3\n\n\ndef test_find_secret_locations():\n \"\"\"\n Test of function of finding secret locations in SecretGarden class.\n\n :return:\n \"\"\"\n list_of_random = [(random.Random(), random.Random()), (random.Random(),\n random.Random()), (random.Random(), random.Random()), (random.\n Random(), random.Random()), (random.Random(), random.Random()), (\n random.Random(), random.Random()), (random.Random(), random.Random())]\n list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6, \n -13), (2, -6)]\n secrets = s.find_secret_locations()\n assert type(secrets) == list\n for x in secrets:\n assert type(x) == tuple\n assert secrets == list_of_truth\n assert list_of_random != secrets\n assert len(list_of_random) == len(secrets)\n",
"step-4": "<mask token>\nfrom secret_garden import Decoder, SecretGarden\nimport random\nfilename = 'pr08_example_data.txt'\nkey = 'Fat Chocobo'\nd = Decoder(filename, key)\ns = SecretGarden(filename, key)\n\n\ndef test_read_from_file():\n \"\"\"\n Test of function of reading data from file.\n\n :return:\n \"\"\"\n reading_file = d.read_code_from_file()\n assert type(reading_file) == list\n assert len(reading_file) == 7\n assert '\\n' not in d.read_code_from_file()\n\n\ndef test_decode_from_base64():\n \"\"\"\n Test of function of decoding messages from base64 to utf-8.\n\n :return:\n \"\"\"\n list_to_be_checked = []\n list_of_truth = [')-.7)-\\x06\\x06AOO', '-57)-0\\x06\\x06JASJAOOASJ',\n ')07)2\\x06\\x06AJSAJAJOAJJAAO', '.7)/\\x06\\x06AJSSAJSJOOSSOOOS',\n '-,70\\x06\\x06', ',7)-,\\x06\\x06OAASSOSOAAASAAAAA',\n '.7).\\x06\\x06SOSAOJAOOO']\n for x in d.read_code_from_file():\n list_to_be_checked.append(d.decode_from_base64(x))\n assert list_to_be_checked == list_of_truth\n\n\ndef test_calculate_cipher_step():\n \"\"\"\n Test of function of calculating the cipher step.\n\n :return:\n \"\"\"\n given_value = d.calculate_cipher_step()\n assert type(given_value) == int\n assert given_value == 1016\n new_decoder = Decoder(filename, 'HELLO THERE!')\n new_value = new_decoder.calculate_cipher_step()\n assert new_value != given_value\n random_number = random.Random()\n assert given_value != random_number\n\n\ndef test_decode():\n \"\"\"\n Test of function of decoding.\n\n :return:\n \"\"\"\n decoding = d.decode()\n assert type(decoding) == list\n assert len(decoding) == 7\n assert decoding[0] == '-12;-1\\n\\nESS'\n assert decoding[-1] == '2;-2\\n\\nWSWESNESSS'\n for x in decoding:\n assert '\\n' in x\n\n\ndef test_decode_messages():\n \"\"\"\n Test of function of decoding messages in SecretGarden class.\n\n :return:\n \"\"\"\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, 'HELLO, STUDENTS.').decode_messages()\n assert decoding1 != decoding3\n\n\ndef test_find_secret_locations():\n \"\"\"\n Test of function of finding secret locations in SecretGarden class.\n\n :return:\n \"\"\"\n list_of_random = [(random.Random(), random.Random()), (random.Random(),\n random.Random()), (random.Random(), random.Random()), (random.\n Random(), random.Random()), (random.Random(), random.Random()), (\n random.Random(), random.Random()), (random.Random(), random.Random())]\n list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6, \n -13), (2, -6)]\n secrets = s.find_secret_locations()\n assert type(secrets) == list\n for x in secrets:\n assert type(x) == tuple\n assert secrets == list_of_truth\n assert list_of_random != secrets\n assert len(list_of_random) == len(secrets)\n",
"step-5": "\"\"\"Secret Garden tests.\"\"\"\nfrom secret_garden import Decoder, SecretGarden\nimport random\n\nfilename = \"pr08_example_data.txt\"\nkey = \"Fat Chocobo\"\nd = Decoder(filename, key)\ns = SecretGarden(filename, key)\n\n\ndef test_read_from_file():\n \"\"\"\n Test of function of reading data from file.\n\n :return:\n \"\"\"\n reading_file = d.read_code_from_file()\n assert type(reading_file) == list\n assert len(reading_file) == 7\n assert \"\\n\" not in d.read_code_from_file()\n\n\ndef test_decode_from_base64():\n \"\"\"\n Test of function of decoding messages from base64 to utf-8.\n\n :return:\n \"\"\"\n list_to_be_checked = []\n list_of_truth = [\")-.7)-\u0006\u0006AOO\", \"-57)-0\u0006\u0006JASJAOOASJ\", \")07)2\u0006\u0006AJSAJAJOAJJAAO\", \".7)/\u0006\u0006AJSSAJSJOOSSOOOS\",\n \"-,70\u0006\u0006\", \",7)-,\u0006\u0006OAASSOSOAAASAAAAA\", \".7).\u0006\u0006SOSAOJAOOO\"]\n for x in d.read_code_from_file():\n list_to_be_checked.append(d.decode_from_base64(x))\n assert list_to_be_checked == list_of_truth\n\n\ndef test_calculate_cipher_step():\n \"\"\"\n Test of function of calculating the cipher step.\n\n :return:\n \"\"\"\n given_value = d.calculate_cipher_step()\n assert type(given_value) == int\n assert given_value == 1016\n new_decoder = Decoder(filename, \"HELLO THERE!\")\n new_value = new_decoder.calculate_cipher_step()\n assert new_value != given_value\n random_number = random.Random()\n assert given_value != random_number\n\n\ndef test_decode():\n \"\"\"\n Test of function of decoding.\n\n :return:\n \"\"\"\n decoding = d.decode()\n assert type(decoding) == list\n assert len(decoding) == 7\n assert decoding[0] == '-12;-1\\n\\nESS'\n assert decoding[-1] == '2;-2\\n\\nWSWESNESSS'\n for x in decoding:\n assert \"\\n\" in x\n\n\ndef test_decode_messages():\n \"\"\"\n Test of function of decoding messages in SecretGarden class.\n\n :return:\n \"\"\"\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, \"HELLO, STUDENTS.\").decode_messages()\n assert decoding1 != decoding3\n\n\ndef test_find_secret_locations():\n \"\"\"\n Test of function of finding secret locations in SecretGarden class.\n\n :return:\n \"\"\"\n list_of_random = [(random.Random(), random.Random()), (random.Random(), random.Random()), (random.Random(),\n random.Random()),\n (random.Random(), random.Random()), (random.Random(), random.Random()),\n (random.Random(), random.Random()), (random.Random(), random.Random())]\n list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6, -13), (2, -6)]\n secrets = s.find_secret_locations()\n assert type(secrets) == list\n for x in secrets:\n assert type(x) == tuple\n assert secrets == list_of_truth\n assert list_of_random != secrets\n assert len(list_of_random) == len(secrets)\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.