code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
def fonct(valeur, a= None):
if type(a) is list:
a.append(valeur)
# a+= valeur
elif type(a) is tuple:
a += tuple((valeur,))
elif type(a) is str:
a += str(valeur)
elif type(a) is set:
a.add(valeur)
else:
a+= valeur
return(a)
print(fonct(4, [1, 2, 3])) # [1, 2, 3, 4]
print(fonct(4, 'eg' )) # eg4
print(fonct(4, (1,2,3))) # (1, 2, 3, 4)
print(fonct(4, {1, 2, 3})) # (1, 2, 3, 4)
|
normal
|
{
"blob_id": "2a13fffa105a5dd546c30c892e59888eb6ead996",
"index": 4645,
"step-1": "<mask token>\n",
"step-2": "def fonct(valeur, a=None):\n if type(a) is list:\n a.append(valeur)\n elif type(a) is tuple:\n a += tuple((valeur,))\n elif type(a) is str:\n a += str(valeur)\n elif type(a) is set:\n a.add(valeur)\n else:\n a += valeur\n return a\n\n\n<mask token>\n",
"step-3": "def fonct(valeur, a=None):\n if type(a) is list:\n a.append(valeur)\n elif type(a) is tuple:\n a += tuple((valeur,))\n elif type(a) is str:\n a += str(valeur)\n elif type(a) is set:\n a.add(valeur)\n else:\n a += valeur\n return a\n\n\nprint(fonct(4, [1, 2, 3]))\nprint(fonct(4, 'eg'))\nprint(fonct(4, (1, 2, 3)))\nprint(fonct(4, {1, 2, 3}))\n",
"step-4": "def fonct(valeur, a= None):\n if type(a) is list:\n a.append(valeur)\n # a+= valeur\n elif type(a) is tuple: \n a += tuple((valeur,)) \n elif type(a) is str: \n a += str(valeur) \n elif type(a) is set: \n a.add(valeur) \n else:\n a+= valeur\n return(a)\n\nprint(fonct(4, [1, 2, 3])) # [1, 2, 3, 4]\nprint(fonct(4, 'eg' )) # eg4\nprint(fonct(4, (1,2,3))) # (1, 2, 3, 4)\nprint(fonct(4, {1, 2, 3})) # (1, 2, 3, 4)\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def main(batch_size, crop_size, learning_rate, segmentation_task_ratio,
weight_decay, save_folder, epochs, alpha):
print(learning_rate)
print(alpha)
print(weight_decay)
train_dataset = ClfSegDataset(subset=[0, 1])
train_loader = get_mixup_loader(train_dataset, batch_size=batch_size,
alpha=alpha)
val_dataset = ClfvalSegDataset(crop_size=crop_size, move=None, subset=[2])
val_loader = get_loader(val_dataset, batch_size=batch_size)
model = densesharp.get_compiled(output_size=1, optimizer=Adam(lr=
learning_rate), loss={'clf': 'binary_crossentropy', 'seg': losses.
DiceLoss()}, metrics={'clf': ['accuracy', metrics.precision,
metrics.recall, metrics.fmeasure, metrics.auc], 'seg': [metrics.
precision, metrics.recall, metrics.fmeasure]}, loss_weights={'clf':
1.0, 'seg': segmentation_task_ratio}, weight_decay=weight_decay,
weights='tmp/test/weights42_222639.h5')
checkpointer = ModelCheckpoint(filepath='tmp/%s/weights.{epoch:02d}.h5' %
save_folder, verbose=1, period=1, save_weights_only=True)
csv_logger = CSVLogger('tmp/%s/training.csv' % save_folder)
tensorboard = TensorBoard(log_dir='tmp/%s/logs/' % save_folder)
best_keeper = ModelCheckpoint(filepath='tmp/%s/best.h5' % save_folder,
verbose=1, save_weights_only=True, monitor='val_clf_acc',
save_best_only=True, period=1, mode='max')
early_stopping = EarlyStopping(monitor='val_clf_acc', min_delta=0, mode
='max', patience=20, verbose=1)
lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.334,
patience=10, verbose=1, mode='min', epsilon=1e-05, cooldown=2, min_lr=0
)
model.fit_generator(generator=train_loader, steps_per_epoch=50,
max_queue_size=10, workers=1, validation_data=val_loader, epochs=
epochs, validation_steps=50, callbacks=[checkpointer, csv_logger,
best_keeper, early_stopping, lr_reducer, tensorboard])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
set_gpu_usage()
<|reserved_special_token_0|>
def main(batch_size, crop_size, learning_rate, segmentation_task_ratio,
weight_decay, save_folder, epochs, alpha):
print(learning_rate)
print(alpha)
print(weight_decay)
train_dataset = ClfSegDataset(subset=[0, 1])
train_loader = get_mixup_loader(train_dataset, batch_size=batch_size,
alpha=alpha)
val_dataset = ClfvalSegDataset(crop_size=crop_size, move=None, subset=[2])
val_loader = get_loader(val_dataset, batch_size=batch_size)
model = densesharp.get_compiled(output_size=1, optimizer=Adam(lr=
learning_rate), loss={'clf': 'binary_crossentropy', 'seg': losses.
DiceLoss()}, metrics={'clf': ['accuracy', metrics.precision,
metrics.recall, metrics.fmeasure, metrics.auc], 'seg': [metrics.
precision, metrics.recall, metrics.fmeasure]}, loss_weights={'clf':
1.0, 'seg': segmentation_task_ratio}, weight_decay=weight_decay,
weights='tmp/test/weights42_222639.h5')
checkpointer = ModelCheckpoint(filepath='tmp/%s/weights.{epoch:02d}.h5' %
save_folder, verbose=1, period=1, save_weights_only=True)
csv_logger = CSVLogger('tmp/%s/training.csv' % save_folder)
tensorboard = TensorBoard(log_dir='tmp/%s/logs/' % save_folder)
best_keeper = ModelCheckpoint(filepath='tmp/%s/best.h5' % save_folder,
verbose=1, save_weights_only=True, monitor='val_clf_acc',
save_best_only=True, period=1, mode='max')
early_stopping = EarlyStopping(monitor='val_clf_acc', min_delta=0, mode
='max', patience=20, verbose=1)
lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.334,
patience=10, verbose=1, mode='min', epsilon=1e-05, cooldown=2, min_lr=0
)
model.fit_generator(generator=train_loader, steps_per_epoch=50,
max_queue_size=10, workers=1, validation_data=val_loader, epochs=
epochs, validation_steps=50, callbacks=[checkpointer, csv_logger,
best_keeper, early_stopping, lr_reducer, tensorboard])
if __name__ == '__main__':
main(batch_size=32, crop_size=[32, 32, 32], learning_rate=1e-05,
segmentation_task_ratio=0.2, weight_decay=0.0, save_folder='test',
epochs=10, alpha=1.0)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
set_gpu_usage()
<|reserved_special_token_0|>
os.environ['CUDA_VISIBLE_DEVICES'] = '/gpu:0'
def main(batch_size, crop_size, learning_rate, segmentation_task_ratio,
weight_decay, save_folder, epochs, alpha):
print(learning_rate)
print(alpha)
print(weight_decay)
train_dataset = ClfSegDataset(subset=[0, 1])
train_loader = get_mixup_loader(train_dataset, batch_size=batch_size,
alpha=alpha)
val_dataset = ClfvalSegDataset(crop_size=crop_size, move=None, subset=[2])
val_loader = get_loader(val_dataset, batch_size=batch_size)
model = densesharp.get_compiled(output_size=1, optimizer=Adam(lr=
learning_rate), loss={'clf': 'binary_crossentropy', 'seg': losses.
DiceLoss()}, metrics={'clf': ['accuracy', metrics.precision,
metrics.recall, metrics.fmeasure, metrics.auc], 'seg': [metrics.
precision, metrics.recall, metrics.fmeasure]}, loss_weights={'clf':
1.0, 'seg': segmentation_task_ratio}, weight_decay=weight_decay,
weights='tmp/test/weights42_222639.h5')
checkpointer = ModelCheckpoint(filepath='tmp/%s/weights.{epoch:02d}.h5' %
save_folder, verbose=1, period=1, save_weights_only=True)
csv_logger = CSVLogger('tmp/%s/training.csv' % save_folder)
tensorboard = TensorBoard(log_dir='tmp/%s/logs/' % save_folder)
best_keeper = ModelCheckpoint(filepath='tmp/%s/best.h5' % save_folder,
verbose=1, save_weights_only=True, monitor='val_clf_acc',
save_best_only=True, period=1, mode='max')
early_stopping = EarlyStopping(monitor='val_clf_acc', min_delta=0, mode
='max', patience=20, verbose=1)
lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.334,
patience=10, verbose=1, mode='min', epsilon=1e-05, cooldown=2, min_lr=0
)
model.fit_generator(generator=train_loader, steps_per_epoch=50,
max_queue_size=10, workers=1, validation_data=val_loader, epochs=
epochs, validation_steps=50, callbacks=[checkpointer, csv_logger,
best_keeper, early_stopping, lr_reducer, tensorboard])
if __name__ == '__main__':
main(batch_size=32, crop_size=[32, 32, 32], learning_rate=1e-05,
segmentation_task_ratio=0.2, weight_decay=0.0, save_folder='test',
epochs=10, alpha=1.0)
<|reserved_special_token_1|>
import os
import pandas as pd
import numpy as np
from dataloader import *
from keras.optimizers import Adam, SGD
from mylib.models.misc import set_gpu_usage
set_gpu_usage()
from mylib.models import densesharp, metrics, losses
from keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, EarlyStopping, ReduceLROnPlateau, LearningRateScheduler
os.environ['CUDA_VISIBLE_DEVICES'] = '/gpu:0'
def main(batch_size, crop_size, learning_rate, segmentation_task_ratio,
weight_decay, save_folder, epochs, alpha):
print(learning_rate)
print(alpha)
print(weight_decay)
train_dataset = ClfSegDataset(subset=[0, 1])
train_loader = get_mixup_loader(train_dataset, batch_size=batch_size,
alpha=alpha)
val_dataset = ClfvalSegDataset(crop_size=crop_size, move=None, subset=[2])
val_loader = get_loader(val_dataset, batch_size=batch_size)
model = densesharp.get_compiled(output_size=1, optimizer=Adam(lr=
learning_rate), loss={'clf': 'binary_crossentropy', 'seg': losses.
DiceLoss()}, metrics={'clf': ['accuracy', metrics.precision,
metrics.recall, metrics.fmeasure, metrics.auc], 'seg': [metrics.
precision, metrics.recall, metrics.fmeasure]}, loss_weights={'clf':
1.0, 'seg': segmentation_task_ratio}, weight_decay=weight_decay,
weights='tmp/test/weights42_222639.h5')
checkpointer = ModelCheckpoint(filepath='tmp/%s/weights.{epoch:02d}.h5' %
save_folder, verbose=1, period=1, save_weights_only=True)
csv_logger = CSVLogger('tmp/%s/training.csv' % save_folder)
tensorboard = TensorBoard(log_dir='tmp/%s/logs/' % save_folder)
best_keeper = ModelCheckpoint(filepath='tmp/%s/best.h5' % save_folder,
verbose=1, save_weights_only=True, monitor='val_clf_acc',
save_best_only=True, period=1, mode='max')
early_stopping = EarlyStopping(monitor='val_clf_acc', min_delta=0, mode
='max', patience=20, verbose=1)
lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.334,
patience=10, verbose=1, mode='min', epsilon=1e-05, cooldown=2, min_lr=0
)
model.fit_generator(generator=train_loader, steps_per_epoch=50,
max_queue_size=10, workers=1, validation_data=val_loader, epochs=
epochs, validation_steps=50, callbacks=[checkpointer, csv_logger,
best_keeper, early_stopping, lr_reducer, tensorboard])
if __name__ == '__main__':
main(batch_size=32, crop_size=[32, 32, 32], learning_rate=1e-05,
segmentation_task_ratio=0.2, weight_decay=0.0, save_folder='test',
epochs=10, alpha=1.0)
<|reserved_special_token_1|>
import os
import pandas as pd
import numpy as np
from dataloader import *
from keras.optimizers import Adam, SGD
from mylib.models.misc import set_gpu_usage
set_gpu_usage()
from mylib.models import densesharp, metrics, losses
from keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, EarlyStopping, ReduceLROnPlateau, \
LearningRateScheduler
os.environ['CUDA_VISIBLE_DEVICES'] = '/gpu:0'
def main(batch_size, crop_size, learning_rate, segmentation_task_ratio, weight_decay, save_folder, epochs,
alpha):
print(learning_rate)
print(alpha)
print(weight_decay)
train_dataset = ClfSegDataset(subset=[0, 1])
train_loader = get_mixup_loader(train_dataset, batch_size=batch_size, alpha=alpha)
val_dataset = ClfvalSegDataset(crop_size=crop_size, move=None, subset=[2])
val_loader = get_loader(val_dataset, batch_size=batch_size)
model = densesharp.get_compiled(output_size=1,
optimizer=Adam(lr=learning_rate),
loss={"clf": 'binary_crossentropy',
"seg": losses.DiceLoss()},
metrics={'clf': ['accuracy', metrics.precision, metrics.recall, metrics.fmeasure,
metrics.auc],
'seg': [metrics.precision, metrics.recall, metrics.fmeasure]},
loss_weights={"clf": 1., "seg": segmentation_task_ratio},
weight_decay=weight_decay, weights='tmp/test/weights42_222639.h5')
checkpointer = ModelCheckpoint(filepath='tmp/%s/weights.{epoch:02d}.h5' % save_folder, verbose=1,
period=1, save_weights_only=True)
csv_logger = CSVLogger('tmp/%s/training.csv' % save_folder)
tensorboard = TensorBoard(log_dir='tmp/%s/logs/' % save_folder)
best_keeper = ModelCheckpoint(filepath='tmp/%s/best.h5' % save_folder, verbose=1, save_weights_only=True,
monitor='val_clf_acc', save_best_only=True, period=1, mode='max')
early_stopping = EarlyStopping(monitor='val_clf_acc', min_delta=0, mode='max',
patience=20, verbose=1)
lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.334, patience=10,
verbose=1, mode='min', epsilon=1.e-5, cooldown=2, min_lr=0)
model.fit_generator(generator=train_loader, steps_per_epoch=50, max_queue_size=10, workers=1,
validation_data=val_loader, epochs=epochs, validation_steps=50,
callbacks=[checkpointer, csv_logger, best_keeper, early_stopping, lr_reducer, tensorboard])
if __name__ == '__main__':
main(batch_size=32,
crop_size=[32, 32, 32],
learning_rate=1.e-5,
segmentation_task_ratio=0.2,
weight_decay=0.0,
save_folder='test',
epochs=10,
alpha=1.0)
|
flexible
|
{
"blob_id": "94b3fa700d7da0ca913adeb0ad5324d1fec0be50",
"index": 7104,
"step-1": "<mask token>\n\n\ndef main(batch_size, crop_size, learning_rate, segmentation_task_ratio,\n weight_decay, save_folder, epochs, alpha):\n print(learning_rate)\n print(alpha)\n print(weight_decay)\n train_dataset = ClfSegDataset(subset=[0, 1])\n train_loader = get_mixup_loader(train_dataset, batch_size=batch_size,\n alpha=alpha)\n val_dataset = ClfvalSegDataset(crop_size=crop_size, move=None, subset=[2])\n val_loader = get_loader(val_dataset, batch_size=batch_size)\n model = densesharp.get_compiled(output_size=1, optimizer=Adam(lr=\n learning_rate), loss={'clf': 'binary_crossentropy', 'seg': losses.\n DiceLoss()}, metrics={'clf': ['accuracy', metrics.precision,\n metrics.recall, metrics.fmeasure, metrics.auc], 'seg': [metrics.\n precision, metrics.recall, metrics.fmeasure]}, loss_weights={'clf':\n 1.0, 'seg': segmentation_task_ratio}, weight_decay=weight_decay,\n weights='tmp/test/weights42_222639.h5')\n checkpointer = ModelCheckpoint(filepath='tmp/%s/weights.{epoch:02d}.h5' %\n save_folder, verbose=1, period=1, save_weights_only=True)\n csv_logger = CSVLogger('tmp/%s/training.csv' % save_folder)\n tensorboard = TensorBoard(log_dir='tmp/%s/logs/' % save_folder)\n best_keeper = ModelCheckpoint(filepath='tmp/%s/best.h5' % save_folder,\n verbose=1, save_weights_only=True, monitor='val_clf_acc',\n save_best_only=True, period=1, mode='max')\n early_stopping = EarlyStopping(monitor='val_clf_acc', min_delta=0, mode\n ='max', patience=20, verbose=1)\n lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.334,\n patience=10, verbose=1, mode='min', epsilon=1e-05, cooldown=2, min_lr=0\n )\n model.fit_generator(generator=train_loader, steps_per_epoch=50,\n max_queue_size=10, workers=1, validation_data=val_loader, epochs=\n epochs, validation_steps=50, callbacks=[checkpointer, csv_logger,\n best_keeper, early_stopping, lr_reducer, tensorboard])\n\n\n<mask token>\n",
"step-2": "<mask token>\nset_gpu_usage()\n<mask token>\n\n\ndef main(batch_size, crop_size, learning_rate, segmentation_task_ratio,\n weight_decay, save_folder, epochs, alpha):\n print(learning_rate)\n print(alpha)\n print(weight_decay)\n train_dataset = ClfSegDataset(subset=[0, 1])\n train_loader = get_mixup_loader(train_dataset, batch_size=batch_size,\n alpha=alpha)\n val_dataset = ClfvalSegDataset(crop_size=crop_size, move=None, subset=[2])\n val_loader = get_loader(val_dataset, batch_size=batch_size)\n model = densesharp.get_compiled(output_size=1, optimizer=Adam(lr=\n learning_rate), loss={'clf': 'binary_crossentropy', 'seg': losses.\n DiceLoss()}, metrics={'clf': ['accuracy', metrics.precision,\n metrics.recall, metrics.fmeasure, metrics.auc], 'seg': [metrics.\n precision, metrics.recall, metrics.fmeasure]}, loss_weights={'clf':\n 1.0, 'seg': segmentation_task_ratio}, weight_decay=weight_decay,\n weights='tmp/test/weights42_222639.h5')\n checkpointer = ModelCheckpoint(filepath='tmp/%s/weights.{epoch:02d}.h5' %\n save_folder, verbose=1, period=1, save_weights_only=True)\n csv_logger = CSVLogger('tmp/%s/training.csv' % save_folder)\n tensorboard = TensorBoard(log_dir='tmp/%s/logs/' % save_folder)\n best_keeper = ModelCheckpoint(filepath='tmp/%s/best.h5' % save_folder,\n verbose=1, save_weights_only=True, monitor='val_clf_acc',\n save_best_only=True, period=1, mode='max')\n early_stopping = EarlyStopping(monitor='val_clf_acc', min_delta=0, mode\n ='max', patience=20, verbose=1)\n lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.334,\n patience=10, verbose=1, mode='min', epsilon=1e-05, cooldown=2, min_lr=0\n )\n model.fit_generator(generator=train_loader, steps_per_epoch=50,\n max_queue_size=10, workers=1, validation_data=val_loader, epochs=\n epochs, validation_steps=50, callbacks=[checkpointer, csv_logger,\n best_keeper, early_stopping, lr_reducer, tensorboard])\n\n\nif __name__ == '__main__':\n main(batch_size=32, crop_size=[32, 32, 32], learning_rate=1e-05,\n segmentation_task_ratio=0.2, weight_decay=0.0, save_folder='test',\n epochs=10, alpha=1.0)\n",
"step-3": "<mask token>\nset_gpu_usage()\n<mask token>\nos.environ['CUDA_VISIBLE_DEVICES'] = '/gpu:0'\n\n\ndef main(batch_size, crop_size, learning_rate, segmentation_task_ratio,\n weight_decay, save_folder, epochs, alpha):\n print(learning_rate)\n print(alpha)\n print(weight_decay)\n train_dataset = ClfSegDataset(subset=[0, 1])\n train_loader = get_mixup_loader(train_dataset, batch_size=batch_size,\n alpha=alpha)\n val_dataset = ClfvalSegDataset(crop_size=crop_size, move=None, subset=[2])\n val_loader = get_loader(val_dataset, batch_size=batch_size)\n model = densesharp.get_compiled(output_size=1, optimizer=Adam(lr=\n learning_rate), loss={'clf': 'binary_crossentropy', 'seg': losses.\n DiceLoss()}, metrics={'clf': ['accuracy', metrics.precision,\n metrics.recall, metrics.fmeasure, metrics.auc], 'seg': [metrics.\n precision, metrics.recall, metrics.fmeasure]}, loss_weights={'clf':\n 1.0, 'seg': segmentation_task_ratio}, weight_decay=weight_decay,\n weights='tmp/test/weights42_222639.h5')\n checkpointer = ModelCheckpoint(filepath='tmp/%s/weights.{epoch:02d}.h5' %\n save_folder, verbose=1, period=1, save_weights_only=True)\n csv_logger = CSVLogger('tmp/%s/training.csv' % save_folder)\n tensorboard = TensorBoard(log_dir='tmp/%s/logs/' % save_folder)\n best_keeper = ModelCheckpoint(filepath='tmp/%s/best.h5' % save_folder,\n verbose=1, save_weights_only=True, monitor='val_clf_acc',\n save_best_only=True, period=1, mode='max')\n early_stopping = EarlyStopping(monitor='val_clf_acc', min_delta=0, mode\n ='max', patience=20, verbose=1)\n lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.334,\n patience=10, verbose=1, mode='min', epsilon=1e-05, cooldown=2, min_lr=0\n )\n model.fit_generator(generator=train_loader, steps_per_epoch=50,\n max_queue_size=10, workers=1, validation_data=val_loader, epochs=\n epochs, validation_steps=50, callbacks=[checkpointer, csv_logger,\n best_keeper, early_stopping, lr_reducer, tensorboard])\n\n\nif __name__ == '__main__':\n main(batch_size=32, crop_size=[32, 32, 32], learning_rate=1e-05,\n segmentation_task_ratio=0.2, weight_decay=0.0, save_folder='test',\n epochs=10, alpha=1.0)\n",
"step-4": "import os\nimport pandas as pd\nimport numpy as np\nfrom dataloader import *\nfrom keras.optimizers import Adam, SGD\nfrom mylib.models.misc import set_gpu_usage\nset_gpu_usage()\nfrom mylib.models import densesharp, metrics, losses\nfrom keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, EarlyStopping, ReduceLROnPlateau, LearningRateScheduler\nos.environ['CUDA_VISIBLE_DEVICES'] = '/gpu:0'\n\n\ndef main(batch_size, crop_size, learning_rate, segmentation_task_ratio,\n weight_decay, save_folder, epochs, alpha):\n print(learning_rate)\n print(alpha)\n print(weight_decay)\n train_dataset = ClfSegDataset(subset=[0, 1])\n train_loader = get_mixup_loader(train_dataset, batch_size=batch_size,\n alpha=alpha)\n val_dataset = ClfvalSegDataset(crop_size=crop_size, move=None, subset=[2])\n val_loader = get_loader(val_dataset, batch_size=batch_size)\n model = densesharp.get_compiled(output_size=1, optimizer=Adam(lr=\n learning_rate), loss={'clf': 'binary_crossentropy', 'seg': losses.\n DiceLoss()}, metrics={'clf': ['accuracy', metrics.precision,\n metrics.recall, metrics.fmeasure, metrics.auc], 'seg': [metrics.\n precision, metrics.recall, metrics.fmeasure]}, loss_weights={'clf':\n 1.0, 'seg': segmentation_task_ratio}, weight_decay=weight_decay,\n weights='tmp/test/weights42_222639.h5')\n checkpointer = ModelCheckpoint(filepath='tmp/%s/weights.{epoch:02d}.h5' %\n save_folder, verbose=1, period=1, save_weights_only=True)\n csv_logger = CSVLogger('tmp/%s/training.csv' % save_folder)\n tensorboard = TensorBoard(log_dir='tmp/%s/logs/' % save_folder)\n best_keeper = ModelCheckpoint(filepath='tmp/%s/best.h5' % save_folder,\n verbose=1, save_weights_only=True, monitor='val_clf_acc',\n save_best_only=True, period=1, mode='max')\n early_stopping = EarlyStopping(monitor='val_clf_acc', min_delta=0, mode\n ='max', patience=20, verbose=1)\n lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.334,\n patience=10, verbose=1, mode='min', epsilon=1e-05, cooldown=2, min_lr=0\n )\n model.fit_generator(generator=train_loader, steps_per_epoch=50,\n max_queue_size=10, workers=1, validation_data=val_loader, epochs=\n epochs, validation_steps=50, callbacks=[checkpointer, csv_logger,\n best_keeper, early_stopping, lr_reducer, tensorboard])\n\n\nif __name__ == '__main__':\n main(batch_size=32, crop_size=[32, 32, 32], learning_rate=1e-05,\n segmentation_task_ratio=0.2, weight_decay=0.0, save_folder='test',\n epochs=10, alpha=1.0)\n",
"step-5": "import os\nimport pandas as pd\nimport numpy as np\n\nfrom dataloader import *\nfrom keras.optimizers import Adam, SGD\nfrom mylib.models.misc import set_gpu_usage\n\nset_gpu_usage()\n\nfrom mylib.models import densesharp, metrics, losses\nfrom keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, EarlyStopping, ReduceLROnPlateau, \\\n LearningRateScheduler\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '/gpu:0'\n\n\ndef main(batch_size, crop_size, learning_rate, segmentation_task_ratio, weight_decay, save_folder, epochs,\n alpha):\n\n print(learning_rate)\n print(alpha)\n print(weight_decay)\n\n train_dataset = ClfSegDataset(subset=[0, 1])\n train_loader = get_mixup_loader(train_dataset, batch_size=batch_size, alpha=alpha)\n\n val_dataset = ClfvalSegDataset(crop_size=crop_size, move=None, subset=[2])\n val_loader = get_loader(val_dataset, batch_size=batch_size)\n\n model = densesharp.get_compiled(output_size=1,\n optimizer=Adam(lr=learning_rate),\n loss={\"clf\": 'binary_crossentropy',\n \"seg\": losses.DiceLoss()},\n metrics={'clf': ['accuracy', metrics.precision, metrics.recall, metrics.fmeasure,\n metrics.auc],\n 'seg': [metrics.precision, metrics.recall, metrics.fmeasure]},\n loss_weights={\"clf\": 1., \"seg\": segmentation_task_ratio},\n weight_decay=weight_decay, weights='tmp/test/weights42_222639.h5')\n\n checkpointer = ModelCheckpoint(filepath='tmp/%s/weights.{epoch:02d}.h5' % save_folder, verbose=1,\n period=1, save_weights_only=True)\n csv_logger = CSVLogger('tmp/%s/training.csv' % save_folder)\n tensorboard = TensorBoard(log_dir='tmp/%s/logs/' % save_folder)\n\n best_keeper = ModelCheckpoint(filepath='tmp/%s/best.h5' % save_folder, verbose=1, save_weights_only=True,\n monitor='val_clf_acc', save_best_only=True, period=1, mode='max')\n\n early_stopping = EarlyStopping(monitor='val_clf_acc', min_delta=0, mode='max',\n patience=20, verbose=1)\n\n lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.334, patience=10,\n verbose=1, mode='min', epsilon=1.e-5, cooldown=2, min_lr=0)\n\n model.fit_generator(generator=train_loader, steps_per_epoch=50, max_queue_size=10, workers=1,\n validation_data=val_loader, epochs=epochs, validation_steps=50,\n callbacks=[checkpointer, csv_logger, best_keeper, early_stopping, lr_reducer, tensorboard])\n\n\nif __name__ == '__main__':\n main(batch_size=32,\n crop_size=[32, 32, 32],\n learning_rate=1.e-5,\n segmentation_task_ratio=0.2,\n weight_decay=0.0,\n save_folder='test',\n epochs=10,\n alpha=1.0)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import html
import logging
import re
import pyarabic.araby as araby
ACCEPTED_MODELS = [
"bert-base-arabertv01",
"bert-base-arabert",
"bert-base-arabertv02",
"bert-base-arabertv2",
"bert-large-arabertv02",
"bert-large-arabertv2",
"araelectra-base",
"araelectra-base-discriminator",
"araelectra-base-generator",
"aragpt2-base",
"aragpt2-medium",
"aragpt2-large",
"aragpt2-mega",
]
SEGMENTED_MODELS = [
"bert-base-arabert",
"bert-base-arabertv2",
"bert-large-arabertv2",
]
class ArbertmoPreprocessor:
"""
A Preprocessor class that cleans and preprocesses text for all models in the AraBERT repo.
It also can unprocess the text ouput of the generated text
Args:
model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to "bert-base-arabertv02". Current accepted models are:
- :obj:`"bert-base-arabertv01"`: No farasa segmentation.
- :obj:`"bert-base-arabert"`: with farasa segmentation.
- :obj:`"bert-base-arabertv02"`: No farasas egmentation.
- :obj:`"bert-base-arabertv2"`: with farasa segmentation.
- :obj:`"bert-large-arabertv02"`: No farasas egmentation.
- :obj:`"bert-large-arabertv2"`: with farasa segmentation.
- :obj:`"araelectra-base"`: No farasa segmentation.
- :obj:`"araelectra-base-discriminator"`: No farasa segmentation.
- :obj:`"araelectra-base-generator"`: No farasa segmentation.
- :obj:`"aragpt2-base"`: No farasa segmentation.
- :obj:`"aragpt2-medium"`: No farasa segmentation.
- :obj:`"aragpt2-large"`: No farasa segmentation.
- :obj:`"aragpt2-mega"`: No farasa segmentation.
keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False
remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True
replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True
strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)
strip_tatweel(:obj: `bool`): remove tatweel '\\u0640'
insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words
remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character
Returns:
ArBERTMoPreprocessor: the preprocessor class
Example:
from preprocess import ArBERTMoPreprocessor
arabert_prep = ArBERTMoPreprocessor("aubmindlab/bert-base-arabertv2")
arabert_prep.preprocess("SOME ARABIC TEXT")
"""
def __init__(
self,
model_name,
keep_emojis=False,
remove_html_markup=True,
replace_urls_emails_mentions=True,
strip_tashkeel=True,
strip_tatweel=True,
insert_white_spaces=True,
remove_elongation=True,
):
"""
model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to "bert-base-arabertv02". Current accepted models are:
- :obj:`"bert-base-arabertv01"`: No farasa segmentation.
- :obj:`"bert-base-arabert"`: with farasa segmentation.
- :obj:`"bert-base-arabertv02"`: No farasas egmentation.
- :obj:`"bert-base-arabertv2"`: with farasa segmentation.
- :obj:`"bert-large-arabertv02"`: No farasas egmentation.
- :obj:`"bert-large-arabertv2"`: with farasa segmentation.
- :obj:`"araelectra-base"`: No farasa segmentation.
- :obj:`"araelectra-base-discriminator"`: No farasa segmentation.
- :obj:`"araelectra-base-generator"`: No farasa segmentation.
- :obj:`"aragpt2-base"`: No farasa segmentation.
- :obj:`"aragpt2-medium"`: No farasa segmentation.
- :obj:`"aragpt2-large"`: No farasa segmentation.
- :obj:`"aragpt2-mega"`: No farasa segmentation.
keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False
remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True
replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True
strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)
strip_tatweel(:obj: `bool`): remove tatweel '\\u0640'
insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words
remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character
"""
model_name = model_name.replace("aubmindlab/", "")
if model_name not in ACCEPTED_MODELS:
logging.warning(
"Model provided is not in the accepted model list. Assuming you don't want Farasa Segmentation"
)
self.model_name = "bert-base-arabertv02"
else:
self.model_name = model_name
if self.model_name in SEGMENTED_MODELS:
logging.info(
"Selected Model requires pre-segmentation, Initializing FarasaSegmenter"
)
try:
from farasa.segmenter import FarasaSegmenter
self.farasa_segmenter = FarasaSegmenter(interactive=True)
except:
logging.warning(
"farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy"
)
else:
logging.info(
"Selected Model doesn't require pre-segmentation, skipping FarasaSegmenter initialization"
)
self.keep_emojis = keep_emojis
if self.keep_emojis:
import emoji
self.emoji = emoji
if self.model_name in SEGMENTED_MODELS:
logging.warning(
"Keeping tweets with Farasa Segmentation is 10 times slower"
)
self.remove_html_markup = remove_html_markup
self.replace_urls_emails_mentions = replace_urls_emails_mentions
self.strip_tashkeel = strip_tashkeel
self.strip_tatweel = strip_tatweel
self.insert_white_spaces = insert_white_spaces
self.remove_elongation = remove_elongation
def preprocess(self, text):
"""
Preprocess takes an input text line an applies the same preprocessing used in AraBERT
pretraining
Args:
text (:obj:`str`): inout text string
Returns:
string: A preprocessed string depending on which model was selected
"""
if self.model_name == "bert-base-arabert":
return self._old_preprocess(
text,
do_farasa_tokenization=True,
)
if self.model_name == "bert-base-arabertv01":
return self._old_preprocess(text, do_farasa_tokenization=False)
text = str(text)
text = html.unescape(text)
if self.strip_tashkeel:
text = araby.strip_tashkeel(text)
if self.strip_tatweel:
text = araby.strip_tatweel(text)
if self.replace_urls_emails_mentions:
# replace all possible URLs
for reg in url_regexes:
text = re.sub(reg, " [رابط] ", text)
# REplace Emails with [بريد]
for reg in email_regexes:
text = re.sub(reg, " [بريد] ", text)
# replace mentions with [مستخدم]
text = re.sub(user_mention_regex, " [مستخدم] ", text)
if self.remove_html_markup:
# remove html line breaks
text = re.sub("<br />", " ", text)
# remove html markup
text = re.sub("</?[^>]+>", " ", text)
# remove repeated characters >2
if self.remove_elongation:
text = self._remove_elongation(text)
# insert whitespace before and after all non Arabic digits or English Digits and Alphabet and the 2 brackets
if self.insert_white_spaces:
text = re.sub(
"([^0-9\u0621-\u063A\u0641-\u064A\u0660-\u0669a-zA-Z\[\]])",
r" \1 ",
text,
)
# insert whitespace between words and numbers or numbers and words
text = re.sub(
"(\d+)([\u0621-\u063A\u0641-\u064A\u0660-\u066C]+)", r" \1 \2 ", text
)
text = re.sub(
"([\u0621-\u063A\u0641-\u064A\u0660-\u066C]+)(\d+)", r" \1 \2 ", text
)
# remove unwanted characters
if self.keep_emojis:
emoji_regex = "".join(list(self.emoji.UNICODE_EMOJI["en"].keys()))
rejected_chars_regex2 = "[^%s%s]" % (chars_regex, emoji_regex)
text = re.sub(rejected_chars_regex2, " ", text)
else:
text = re.sub(rejected_chars_regex, " ", text)
# remove extra spaces
text = " ".join(text.replace("\uFE0F", "").split())
if (
self.model_name == "bert-base-arabertv2"
or self.model_name == "bert-large-arabertv2"
):
if self.keep_emojis:
new_text = []
for word in text.split():
if word in list(self.emoji.UNICODE_EMOJI["en"].keys()):
new_text.append(word)
else:
new_text.append(self.farasa_segmenter.segment(word))
text = " ".join(new_text)
else:
text = self.farasa_segmenter.segment(text)
return self._farasa_segment(text)
# ALl the other models dont require Farasa Segmentation
return text
def unpreprocess(self, text, desegment=True):
"""Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.
The objective is to make the generated text of any model appear natural and not preprocessed.
Args:
text (str): input text to be un-preprocessed
desegment (bool, optional): [whether or not to remove farasa pre-segmentation before]. Defaults to True.
Returns:
str: The unpreprocessed (and possibly Farasa-desegmented) text.
"""
if self.model_name in SEGMENTED_MODELS and desegment:
text = self.desegment(text)
# removes the spaces around quotation marks ex: i " ate " an apple --> i "ate" an apple
# https://stackoverflow.com/a/53436792/5381220
text = re.sub(white_spaced_double_quotation_regex, '"' + r"\1" + '"', text)
text = re.sub(white_spaced_single_quotation_regex, "'" + r"\1" + "'", text)
text = re.sub(white_spaced_back_quotation_regex, "\`" + r"\1" + "\`", text)
text = re.sub(white_spaced_back_quotation_regex, "\—" + r"\1" + "\—", text)
# during generation, sometimes the models don't put a space after the dot, this handles it
text = text.replace(".", " . ")
text = " ".join(text.split())
# handle decimals
text = re.sub(r"(\d+) \. (\d+)", r"\1.\2", text)
text = re.sub(r"(\d+) \, (\d+)", r"\1,\2", text)
text = re.sub(left_and_right_spaced_chars, r"\1", text)
text = re.sub(left_spaced_chars, r"\1", text)
text = re.sub(right_spaced_chars, r"\1", text)
return text
def desegment(self, text):
"""
Use this function if sentence tokenization was done using
`from arabert.preprocess_arabert import preprocess` with Farasa enabled
AraBERT segmentation using Farasa adds a space after the '+' for prefixes,
and after before the '+' for suffixes
Example:
>>> desegment('ال+ دراس +ات')
الدراسات
"""
text = text.replace("+ ", "+")
text = text.replace(" +", "+")
text = " ".join([self._desegmentword(word) for word in text.split(" ")])
return text
def _desegmentword(self, orig_word: str) -> str:
"""
Word segmentor that takes a Farasa Segmented Word and removes the '+' signs
Example:
>>> _desegmentword("ال+يومي+ة")
اليومية
"""
word = orig_word.replace("ل+ال+", "لل")
if "ال+ال" not in orig_word:
word = word.replace("ل+ال", "لل")
word = word.replace("+", "")
word = word.replace("للل", "لل")
return word
def _old_preprocess(self, text, do_farasa_tokenization):
"""
AraBERTv1 preprocessing Function
"""
text = str(text)
if self.strip_tashkeel:
text = araby.strip_tashkeel(text)
text = re.sub(r"\d+\/[ء-ي]+\/\d+\]", "", text)
text = re.sub("ـ", "", text)
text = re.sub("[«»]", ' " ', text)
if self.replace_urls_emails_mentions:
# replace the [رابط] token with space if you want to clean links
text = re.sub(regex_url_step1, "[رابط]", text)
text = re.sub(regex_url_step2, "[رابط]", text)
text = re.sub(regex_url, "[رابط]", text)
text = re.sub(regex_email, "[بريد]", text)
text = re.sub(regex_mention, "[مستخدم]", text)
text = re.sub("…", r"\.", text).strip()
text = self._remove_redundant_punct(text)
if self.replace_urls_emails_mentions:
text = re.sub(r"\[ رابط \]|\[ رابط\]|\[رابط \]", " [رابط] ", text)
text = re.sub(r"\[ بريد \]|\[ بريد\]|\[بريد \]", " [بريد] ", text)
text = re.sub(r"\[ مستخدم \]|\[ مستخدم\]|\[مستخدم \]", " [مستخدم] ", text)
if self.remove_elongation:
text = self._remove_elongation(text)
if self.insert_white_spaces:
text = re.sub(
"([^0-9\u0621-\u063A\u0641-\u0669\u0671-\u0673a-zA-Z\[\]])",
r" \1 ",
text,
)
if do_farasa_tokenization:
text = self._tokenize_arabic_words_farasa(text)
return text.strip()
def _farasa_segment(self, text):
line_farasa = text.split()
segmented_line = []
for index, word in enumerate(line_farasa):
if word in ["[", "]"]:
continue
if word in ["رابط", "بريد", "مستخدم"] and line_farasa[index - 1] in [
"[",
"]",
]:
segmented_line.append("[" + word + "]")
continue
if "+" not in word:
segmented_line.append(word)
continue
segmented_word = self._split_farasa_output(word)
segmented_line.extend(segmented_word)
return " ".join(segmented_line)
def _split_farasa_output(self, word):
segmented_word = []
temp_token = ""
for i, c in enumerate(word):
if c == "+":
# if the token is KAF, it could be a suffix or prefix
if temp_token == "ك":
# if we are at the second token, then KAF is surely a prefix
if i == 1:
segmented_word.append(temp_token + "+")
temp_token = ""
# If the KAF token is between 2 tokens
elif word[i - 2] == "+":
# if the previous token is prefix, then this KAF must be a prefix
if segmented_word[-1][-1] == "+":
segmented_word.append(temp_token + "+")
temp_token = ""
# else it is a suffix, this KAF could not be a second suffix
else:
segmented_word.append("+" + temp_token)
temp_token = ""
# if Kaf is at the end, this is handled with the statement after the loop
elif temp_token in prefix_list:
segmented_word.append(temp_token + "+")
temp_token = ""
elif temp_token in suffix_list:
segmented_word.append("+" + temp_token)
temp_token = ""
else:
segmented_word.append(temp_token)
temp_token = ""
continue
temp_token += c
if temp_token != "":
if temp_token in suffix_list:
segmented_word.append("+" + temp_token)
else:
segmented_word.append(temp_token)
return segmented_word
def _tokenize_arabic_words_farasa(self, line_input):
if self.keep_emojis:
# insert whitespace before and after all non Arabic digits or English Digits and Alphabet and the 2 brackets
line_farasa = []
for word in line_input.split():
if word in list(self.emoji.UNICODE_EMOJI["en"].keys()):
line_farasa.append(word)
else:
line_farasa.append(self.farasa_segmenter.segment(word))
else:
line_farasa = self.farasa_segmenter.segment(line_input).split()
segmented_line = []
for index, word in enumerate(line_farasa):
if word in ["[", "]"]:
continue
if word in ["رابط", "بريد", "مستخدم"] and line_farasa[index - 1] in [
"[",
"]",
]:
segmented_line.append("[" + word + "]")
continue
segmented_word = []
for token in word.split("+"):
if token in prefix_list:
segmented_word.append(token + "+")
elif token in suffix_list:
segmented_word.append("+" + token)
else:
segmented_word.append(token)
segmented_line.extend(segmented_word)
return " ".join(segmented_line)
def _remove_elongation(self, text):
"""
:param text: the input text to remove elongation
:return: delongated text
"""
# loop over the number of times the regex matched the text
for index_ in range(len(re.findall(regex_tatweel, text))):
elongation = re.search(regex_tatweel, text)
if elongation:
elongation_pattern = elongation.group()
elongation_replacement = elongation_pattern[0]
elongation_pattern = re.escape(elongation_pattern)
text = re.sub(
elongation_pattern, elongation_replacement, text, flags=re.MULTILINE
)
else:
break
return text
def _remove_redundant_punct(self, text):
text_ = text
result = re.search(redundant_punct_pattern, text)
dif = 0
while result:
sub = result.group()
sub = sorted(set(sub), key=sub.index)
sub = " " + "".join(list(sub)) + " "
text = "".join(
(text[: result.span()[0] + dif], sub, text[result.span()[1] + dif :])
)
text_ = "".join(
(text_[: result.span()[0]], text_[result.span()[1] :])
).strip()
dif = abs(len(text) - len(text_))
result = re.search(redundant_punct_pattern, text_)
text = re.sub(r"\s+", " ", text)
return text.strip()
prefix_list = [
"ال",
"و",
"ف",
"ب",
"ك",
"ل",
"لل",
"\u0627\u0644",
"\u0648",
"\u0641",
"\u0628",
"\u0643",
"\u0644",
"\u0644\u0644",
"س",
]
suffix_list = [
"ه",
"ها",
"ك",
"ي",
"هما",
"كما",
"نا",
"كم",
"هم",
"هن",
"كن",
"ا",
"ان",
"ين",
"ون",
"وا",
"ات",
"ت",
"ن",
"ة",
"\u0647",
"\u0647\u0627",
"\u0643",
"\u064a",
"\u0647\u0645\u0627",
"\u0643\u0645\u0627",
"\u0646\u0627",
"\u0643\u0645",
"\u0647\u0645",
"\u0647\u0646",
"\u0643\u0646",
"\u0627",
"\u0627\u0646",
"\u064a\u0646",
"\u0648\u0646",
"\u0648\u0627",
"\u0627\u062a",
"\u062a",
"\u0646",
"\u0629",
]
other_tokens = ["[رابط]", "[مستخدم]", "[بريد]"]
# the never_split list is ussed with the transformers library
prefix_symbols = [x + "+" for x in prefix_list]
suffix_symblos = ["+" + x for x in suffix_list]
never_split_tokens = list(set(prefix_symbols + suffix_symblos + other_tokens))
url_regexes = [
r"(http(s)?:\/\/.)?(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)",
r"@(https?|ftp)://(-\.)?([^\s/?\.#-]+\.?)+(/[^\s]*)?$@iS",
r"http[s]?://[a-zA-Z0-9_\-./~\?=%&]+",
r"www[a-zA-Z0-9_\-?=%&/.~]+",
r"[a-zA-Z]+\.com",
r"(?=http)[^\s]+",
r"(?=www)[^\s]+",
r"://",
]
user_mention_regex = r"@[\w\d]+"
email_regexes = [r"[\w-]+@([\w-]+\.)+[\w-]+", r"\S+@\S+"]
redundant_punct_pattern = (
r"([!\"#\$%\'\(\)\*\+,\.:;\-<=·>?@\[\\\]\^_ـ`{\|}~—٪’،؟`୍“؛”ۚ【»؛\s+«–…‘]{2,})"
)
regex_tatweel = r"(\D)\1{2,}"
rejected_chars_regex = r"[^0-9\u0621-\u063A\u0640-\u066C\u0671-\u0674a-zA-Z\[\]!\"#\$%\'\(\)\*\+,\.:;\-<=·>?@\[\\\]\^_ـ`{\|}~—٪’،؟`୍“؛”ۚ»؛\s+«–…‘]"
regex_url_step1 = r"(?=http)[^\s]+"
regex_url_step2 = r"(?=www)[^\s]+"
regex_url = r"(http(s)?:\/\/.)?(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)"
regex_mention = r"@[\w\d]+"
regex_email = r"\S+@\S+"
chars_regex = r"0-9\u0621-\u063A\u0640-\u066C\u0671-\u0674a-zA-Z\[\]!\"#\$%\'\(\)\*\+,\.:;\-<=·>?@\[\\\]\^_ـ`{\|}~—٪’،؟`୍“؛”ۚ»؛\s+«–…‘"
white_spaced_double_quotation_regex = r'\"\s+([^"]+)\s+\"'
white_spaced_single_quotation_regex = r"\'\s+([^']+)\s+\'"
white_spaced_back_quotation_regex = r"\`\s+([^`]+)\s+\`"
white_spaced_em_dash = r"\—\s+([^—]+)\s+\—"
left_spaced_chars = r" ([\]!#\$%\),\.:;\?}٪’،؟”؛…»·])"
right_spaced_chars = r"([\[\(\{“«‘*\~]) "
left_and_right_spaced_chars = r" ([\+\-\<\=\>\@\\\^\_\|\–]) "
|
normal
|
{
"blob_id": "6c3f60f05adbebe521ba08d7a7e9fc10b1cc914f",
"index": 2907,
"step-1": "<mask token>\n\n\nclass ArbertmoPreprocessor:\n <mask token>\n\n def __init__(self, model_name, keep_emojis=False, remove_html_markup=\n True, replace_urls_emails_mentions=True, strip_tashkeel=True,\n strip_tatweel=True, insert_white_spaces=True, remove_elongation=True):\n \"\"\"\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n \"\"\"\n model_name = model_name.replace('aubmindlab/', '')\n if model_name not in ACCEPTED_MODELS:\n logging.warning(\n \"Model provided is not in the accepted model list. Assuming you don't want Farasa Segmentation\"\n )\n self.model_name = 'bert-base-arabertv02'\n else:\n self.model_name = model_name\n if self.model_name in SEGMENTED_MODELS:\n logging.info(\n 'Selected Model requires pre-segmentation, Initializing FarasaSegmenter'\n )\n try:\n from farasa.segmenter import FarasaSegmenter\n self.farasa_segmenter = FarasaSegmenter(interactive=True)\n except:\n logging.warning(\n 'farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy'\n )\n else:\n logging.info(\n \"Selected Model doesn't require pre-segmentation, skipping FarasaSegmenter initialization\"\n )\n self.keep_emojis = keep_emojis\n if self.keep_emojis:\n import emoji\n self.emoji = emoji\n if self.model_name in SEGMENTED_MODELS:\n logging.warning(\n 'Keeping tweets with Farasa Segmentation is 10 times slower'\n )\n self.remove_html_markup = remove_html_markup\n self.replace_urls_emails_mentions = replace_urls_emails_mentions\n self.strip_tashkeel = strip_tashkeel\n self.strip_tatweel = strip_tatweel\n self.insert_white_spaces = insert_white_spaces\n self.remove_elongation = remove_elongation\n\n def preprocess(self, text):\n \"\"\"\n Preprocess takes an input text line an applies the same preprocessing used in AraBERT\n pretraining\n\n Args:\n\n text (:obj:`str`): inout text string\n\n Returns:\n\n string: A preprocessed string depending on which model was selected\n \"\"\"\n if self.model_name == 'bert-base-arabert':\n return self._old_preprocess(text, do_farasa_tokenization=True)\n if self.model_name == 'bert-base-arabertv01':\n return self._old_preprocess(text, do_farasa_tokenization=False)\n text = str(text)\n text = html.unescape(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n if self.strip_tatweel:\n text = araby.strip_tatweel(text)\n if self.replace_urls_emails_mentions:\n for reg in url_regexes:\n text = re.sub(reg, ' [رابط] ', text)\n for reg in email_regexes:\n text = re.sub(reg, ' [بريد] ', text)\n text = re.sub(user_mention_regex, ' [مستخدم] ', text)\n if self.remove_html_markup:\n text = re.sub('<br />', ' ', text)\n text = re.sub('</?[^>]+>', ' ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-ي٠-٩a-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n text = re.sub('(\\\\d+)([ء-غف-ي٠-٬]+)', ' \\\\1 \\\\2 ', text)\n text = re.sub('([ء-غف-ي٠-٬]+)(\\\\d+)', ' \\\\1 \\\\2 ', text)\n if self.keep_emojis:\n emoji_regex = ''.join(list(self.emoji.UNICODE_EMOJI['en'].keys()))\n rejected_chars_regex2 = '[^%s%s]' % (chars_regex, emoji_regex)\n text = re.sub(rejected_chars_regex2, ' ', text)\n else:\n text = re.sub(rejected_chars_regex, ' ', text)\n text = ' '.join(text.replace('️', '').split())\n if (self.model_name == 'bert-base-arabertv2' or self.model_name ==\n 'bert-large-arabertv2'):\n if self.keep_emojis:\n new_text = []\n for word in text.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n new_text.append(word)\n else:\n new_text.append(self.farasa_segmenter.segment(word))\n text = ' '.join(new_text)\n else:\n text = self.farasa_segmenter.segment(text)\n return self._farasa_segment(text)\n return text\n\n def unpreprocess(self, text, desegment=True):\n \"\"\"Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.\n The objective is to make the generated text of any model appear natural and not preprocessed.\n\n Args:\n text (str): input text to be un-preprocessed\n desegment (bool, optional): [whether or not to remove farasa pre-segmentation before]. Defaults to True.\n\n Returns:\n str: The unpreprocessed (and possibly Farasa-desegmented) text.\n \"\"\"\n if self.model_name in SEGMENTED_MODELS and desegment:\n text = self.desegment(text)\n text = re.sub(white_spaced_double_quotation_regex, '\"' + '\\\\1' +\n '\"', text)\n text = re.sub(white_spaced_single_quotation_regex, \"'\" + '\\\\1' +\n \"'\", text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\`' + '\\\\1' +\n '\\\\`', text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\—' + '\\\\1' +\n '\\\\—', text)\n text = text.replace('.', ' . ')\n text = ' '.join(text.split())\n text = re.sub('(\\\\d+) \\\\. (\\\\d+)', '\\\\1.\\\\2', text)\n text = re.sub('(\\\\d+) \\\\, (\\\\d+)', '\\\\1,\\\\2', text)\n text = re.sub(left_and_right_spaced_chars, '\\\\1', text)\n text = re.sub(left_spaced_chars, '\\\\1', text)\n text = re.sub(right_spaced_chars, '\\\\1', text)\n return text\n\n def desegment(self, text):\n \"\"\"\n Use this function if sentence tokenization was done using\n `from arabert.preprocess_arabert import preprocess` with Farasa enabled\n AraBERT segmentation using Farasa adds a space after the '+' for prefixes,\n and after before the '+' for suffixes\n\n Example:\n >>> desegment('ال+ دراس +ات')\n الدراسات\n \"\"\"\n text = text.replace('+ ', '+')\n text = text.replace(' +', '+')\n text = ' '.join([self._desegmentword(word) for word in text.split(' ')]\n )\n return text\n\n def _desegmentword(self, orig_word: str) ->str:\n \"\"\"\n Word segmentor that takes a Farasa Segmented Word and removes the '+' signs\n\n Example:\n >>> _desegmentword(\"ال+يومي+ة\")\n اليومية\n \"\"\"\n word = orig_word.replace('ل+ال+', 'لل')\n if 'ال+ال' not in orig_word:\n word = word.replace('ل+ال', 'لل')\n word = word.replace('+', '')\n word = word.replace('للل', 'لل')\n return word\n\n def _old_preprocess(self, text, do_farasa_tokenization):\n \"\"\"\n AraBERTv1 preprocessing Function\n \"\"\"\n text = str(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n text = re.sub('\\\\d+\\\\/[ء-ي]+\\\\/\\\\d+\\\\]', '', text)\n text = re.sub('ـ', '', text)\n text = re.sub('[«»]', ' \" ', text)\n if self.replace_urls_emails_mentions:\n text = re.sub(regex_url_step1, '[رابط]', text)\n text = re.sub(regex_url_step2, '[رابط]', text)\n text = re.sub(regex_url, '[رابط]', text)\n text = re.sub(regex_email, '[بريد]', text)\n text = re.sub(regex_mention, '[مستخدم]', text)\n text = re.sub('…', '\\\\.', text).strip()\n text = self._remove_redundant_punct(text)\n if self.replace_urls_emails_mentions:\n text = re.sub('\\\\[ رابط \\\\]|\\\\[ رابط\\\\]|\\\\[رابط \\\\]',\n ' [رابط] ', text)\n text = re.sub('\\\\[ بريد \\\\]|\\\\[ بريد\\\\]|\\\\[بريد \\\\]',\n ' [بريد] ', text)\n text = re.sub('\\\\[ مستخدم \\\\]|\\\\[ مستخدم\\\\]|\\\\[مستخدم \\\\]',\n ' [مستخدم] ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-٩ٱ-ٳa-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n if do_farasa_tokenization:\n text = self._tokenize_arabic_words_farasa(text)\n return text.strip()\n\n def _farasa_segment(self, text):\n line_farasa = text.split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n if '+' not in word:\n segmented_line.append(word)\n continue\n segmented_word = self._split_farasa_output(word)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _split_farasa_output(self, word):\n segmented_word = []\n temp_token = ''\n for i, c in enumerate(word):\n if c == '+':\n if temp_token == 'ك':\n if i == 1:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif word[i - 2] == '+':\n if segmented_word[-1][-1] == '+':\n segmented_word.append(temp_token + '+')\n temp_token = ''\n else:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n elif temp_token in prefix_list:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n else:\n segmented_word.append(temp_token)\n temp_token = ''\n continue\n temp_token += c\n if temp_token != '':\n if temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n else:\n segmented_word.append(temp_token)\n return segmented_word\n\n def _tokenize_arabic_words_farasa(self, line_input):\n if self.keep_emojis:\n line_farasa = []\n for word in line_input.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n line_farasa.append(word)\n else:\n line_farasa.append(self.farasa_segmenter.segment(word))\n else:\n line_farasa = self.farasa_segmenter.segment(line_input).split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n segmented_word = []\n for token in word.split('+'):\n if token in prefix_list:\n segmented_word.append(token + '+')\n elif token in suffix_list:\n segmented_word.append('+' + token)\n else:\n segmented_word.append(token)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _remove_elongation(self, text):\n \"\"\"\n :param text: the input text to remove elongation\n :return: delongated text\n \"\"\"\n for index_ in range(len(re.findall(regex_tatweel, text))):\n elongation = re.search(regex_tatweel, text)\n if elongation:\n elongation_pattern = elongation.group()\n elongation_replacement = elongation_pattern[0]\n elongation_pattern = re.escape(elongation_pattern)\n text = re.sub(elongation_pattern, elongation_replacement,\n text, flags=re.MULTILINE)\n else:\n break\n return text\n\n def _remove_redundant_punct(self, text):\n text_ = text\n result = re.search(redundant_punct_pattern, text)\n dif = 0\n while result:\n sub = result.group()\n sub = sorted(set(sub), key=sub.index)\n sub = ' ' + ''.join(list(sub)) + ' '\n text = ''.join((text[:result.span()[0] + dif], sub, text[result\n .span()[1] + dif:]))\n text_ = ''.join((text_[:result.span()[0]], text_[result.span()[\n 1]:])).strip()\n dif = abs(len(text) - len(text_))\n result = re.search(redundant_punct_pattern, text_)\n text = re.sub('\\\\s+', ' ', text)\n return text.strip()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ArbertmoPreprocessor:\n \"\"\"\n A Preprocessor class that cleans and preprocesses text for all models in the AraBERT repo.\n It also can unprocess the text ouput of the generated text\n\n Args:\n\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n\n Returns:\n\n ArBERTMoPreprocessor: the preprocessor class\n\n Example:\n\n from preprocess import ArBERTMoPreprocessor\n\n arabert_prep = ArBERTMoPreprocessor(\"aubmindlab/bert-base-arabertv2\")\n\n arabert_prep.preprocess(\"SOME ARABIC TEXT\")\n \"\"\"\n\n def __init__(self, model_name, keep_emojis=False, remove_html_markup=\n True, replace_urls_emails_mentions=True, strip_tashkeel=True,\n strip_tatweel=True, insert_white_spaces=True, remove_elongation=True):\n \"\"\"\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n \"\"\"\n model_name = model_name.replace('aubmindlab/', '')\n if model_name not in ACCEPTED_MODELS:\n logging.warning(\n \"Model provided is not in the accepted model list. Assuming you don't want Farasa Segmentation\"\n )\n self.model_name = 'bert-base-arabertv02'\n else:\n self.model_name = model_name\n if self.model_name in SEGMENTED_MODELS:\n logging.info(\n 'Selected Model requires pre-segmentation, Initializing FarasaSegmenter'\n )\n try:\n from farasa.segmenter import FarasaSegmenter\n self.farasa_segmenter = FarasaSegmenter(interactive=True)\n except:\n logging.warning(\n 'farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy'\n )\n else:\n logging.info(\n \"Selected Model doesn't require pre-segmentation, skipping FarasaSegmenter initialization\"\n )\n self.keep_emojis = keep_emojis\n if self.keep_emojis:\n import emoji\n self.emoji = emoji\n if self.model_name in SEGMENTED_MODELS:\n logging.warning(\n 'Keeping tweets with Farasa Segmentation is 10 times slower'\n )\n self.remove_html_markup = remove_html_markup\n self.replace_urls_emails_mentions = replace_urls_emails_mentions\n self.strip_tashkeel = strip_tashkeel\n self.strip_tatweel = strip_tatweel\n self.insert_white_spaces = insert_white_spaces\n self.remove_elongation = remove_elongation\n\n def preprocess(self, text):\n \"\"\"\n Preprocess takes an input text line an applies the same preprocessing used in AraBERT\n pretraining\n\n Args:\n\n text (:obj:`str`): inout text string\n\n Returns:\n\n string: A preprocessed string depending on which model was selected\n \"\"\"\n if self.model_name == 'bert-base-arabert':\n return self._old_preprocess(text, do_farasa_tokenization=True)\n if self.model_name == 'bert-base-arabertv01':\n return self._old_preprocess(text, do_farasa_tokenization=False)\n text = str(text)\n text = html.unescape(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n if self.strip_tatweel:\n text = araby.strip_tatweel(text)\n if self.replace_urls_emails_mentions:\n for reg in url_regexes:\n text = re.sub(reg, ' [رابط] ', text)\n for reg in email_regexes:\n text = re.sub(reg, ' [بريد] ', text)\n text = re.sub(user_mention_regex, ' [مستخدم] ', text)\n if self.remove_html_markup:\n text = re.sub('<br />', ' ', text)\n text = re.sub('</?[^>]+>', ' ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-ي٠-٩a-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n text = re.sub('(\\\\d+)([ء-غف-ي٠-٬]+)', ' \\\\1 \\\\2 ', text)\n text = re.sub('([ء-غف-ي٠-٬]+)(\\\\d+)', ' \\\\1 \\\\2 ', text)\n if self.keep_emojis:\n emoji_regex = ''.join(list(self.emoji.UNICODE_EMOJI['en'].keys()))\n rejected_chars_regex2 = '[^%s%s]' % (chars_regex, emoji_regex)\n text = re.sub(rejected_chars_regex2, ' ', text)\n else:\n text = re.sub(rejected_chars_regex, ' ', text)\n text = ' '.join(text.replace('️', '').split())\n if (self.model_name == 'bert-base-arabertv2' or self.model_name ==\n 'bert-large-arabertv2'):\n if self.keep_emojis:\n new_text = []\n for word in text.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n new_text.append(word)\n else:\n new_text.append(self.farasa_segmenter.segment(word))\n text = ' '.join(new_text)\n else:\n text = self.farasa_segmenter.segment(text)\n return self._farasa_segment(text)\n return text\n\n def unpreprocess(self, text, desegment=True):\n \"\"\"Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.\n The objective is to make the generated text of any model appear natural and not preprocessed.\n\n Args:\n text (str): input text to be un-preprocessed\n desegment (bool, optional): [whether or not to remove farasa pre-segmentation before]. Defaults to True.\n\n Returns:\n str: The unpreprocessed (and possibly Farasa-desegmented) text.\n \"\"\"\n if self.model_name in SEGMENTED_MODELS and desegment:\n text = self.desegment(text)\n text = re.sub(white_spaced_double_quotation_regex, '\"' + '\\\\1' +\n '\"', text)\n text = re.sub(white_spaced_single_quotation_regex, \"'\" + '\\\\1' +\n \"'\", text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\`' + '\\\\1' +\n '\\\\`', text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\—' + '\\\\1' +\n '\\\\—', text)\n text = text.replace('.', ' . ')\n text = ' '.join(text.split())\n text = re.sub('(\\\\d+) \\\\. (\\\\d+)', '\\\\1.\\\\2', text)\n text = re.sub('(\\\\d+) \\\\, (\\\\d+)', '\\\\1,\\\\2', text)\n text = re.sub(left_and_right_spaced_chars, '\\\\1', text)\n text = re.sub(left_spaced_chars, '\\\\1', text)\n text = re.sub(right_spaced_chars, '\\\\1', text)\n return text\n\n def desegment(self, text):\n \"\"\"\n Use this function if sentence tokenization was done using\n `from arabert.preprocess_arabert import preprocess` with Farasa enabled\n AraBERT segmentation using Farasa adds a space after the '+' for prefixes,\n and after before the '+' for suffixes\n\n Example:\n >>> desegment('ال+ دراس +ات')\n الدراسات\n \"\"\"\n text = text.replace('+ ', '+')\n text = text.replace(' +', '+')\n text = ' '.join([self._desegmentword(word) for word in text.split(' ')]\n )\n return text\n\n def _desegmentword(self, orig_word: str) ->str:\n \"\"\"\n Word segmentor that takes a Farasa Segmented Word and removes the '+' signs\n\n Example:\n >>> _desegmentword(\"ال+يومي+ة\")\n اليومية\n \"\"\"\n word = orig_word.replace('ل+ال+', 'لل')\n if 'ال+ال' not in orig_word:\n word = word.replace('ل+ال', 'لل')\n word = word.replace('+', '')\n word = word.replace('للل', 'لل')\n return word\n\n def _old_preprocess(self, text, do_farasa_tokenization):\n \"\"\"\n AraBERTv1 preprocessing Function\n \"\"\"\n text = str(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n text = re.sub('\\\\d+\\\\/[ء-ي]+\\\\/\\\\d+\\\\]', '', text)\n text = re.sub('ـ', '', text)\n text = re.sub('[«»]', ' \" ', text)\n if self.replace_urls_emails_mentions:\n text = re.sub(regex_url_step1, '[رابط]', text)\n text = re.sub(regex_url_step2, '[رابط]', text)\n text = re.sub(regex_url, '[رابط]', text)\n text = re.sub(regex_email, '[بريد]', text)\n text = re.sub(regex_mention, '[مستخدم]', text)\n text = re.sub('…', '\\\\.', text).strip()\n text = self._remove_redundant_punct(text)\n if self.replace_urls_emails_mentions:\n text = re.sub('\\\\[ رابط \\\\]|\\\\[ رابط\\\\]|\\\\[رابط \\\\]',\n ' [رابط] ', text)\n text = re.sub('\\\\[ بريد \\\\]|\\\\[ بريد\\\\]|\\\\[بريد \\\\]',\n ' [بريد] ', text)\n text = re.sub('\\\\[ مستخدم \\\\]|\\\\[ مستخدم\\\\]|\\\\[مستخدم \\\\]',\n ' [مستخدم] ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-٩ٱ-ٳa-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n if do_farasa_tokenization:\n text = self._tokenize_arabic_words_farasa(text)\n return text.strip()\n\n def _farasa_segment(self, text):\n line_farasa = text.split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n if '+' not in word:\n segmented_line.append(word)\n continue\n segmented_word = self._split_farasa_output(word)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _split_farasa_output(self, word):\n segmented_word = []\n temp_token = ''\n for i, c in enumerate(word):\n if c == '+':\n if temp_token == 'ك':\n if i == 1:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif word[i - 2] == '+':\n if segmented_word[-1][-1] == '+':\n segmented_word.append(temp_token + '+')\n temp_token = ''\n else:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n elif temp_token in prefix_list:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n else:\n segmented_word.append(temp_token)\n temp_token = ''\n continue\n temp_token += c\n if temp_token != '':\n if temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n else:\n segmented_word.append(temp_token)\n return segmented_word\n\n def _tokenize_arabic_words_farasa(self, line_input):\n if self.keep_emojis:\n line_farasa = []\n for word in line_input.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n line_farasa.append(word)\n else:\n line_farasa.append(self.farasa_segmenter.segment(word))\n else:\n line_farasa = self.farasa_segmenter.segment(line_input).split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n segmented_word = []\n for token in word.split('+'):\n if token in prefix_list:\n segmented_word.append(token + '+')\n elif token in suffix_list:\n segmented_word.append('+' + token)\n else:\n segmented_word.append(token)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _remove_elongation(self, text):\n \"\"\"\n :param text: the input text to remove elongation\n :return: delongated text\n \"\"\"\n for index_ in range(len(re.findall(regex_tatweel, text))):\n elongation = re.search(regex_tatweel, text)\n if elongation:\n elongation_pattern = elongation.group()\n elongation_replacement = elongation_pattern[0]\n elongation_pattern = re.escape(elongation_pattern)\n text = re.sub(elongation_pattern, elongation_replacement,\n text, flags=re.MULTILINE)\n else:\n break\n return text\n\n def _remove_redundant_punct(self, text):\n text_ = text\n result = re.search(redundant_punct_pattern, text)\n dif = 0\n while result:\n sub = result.group()\n sub = sorted(set(sub), key=sub.index)\n sub = ' ' + ''.join(list(sub)) + ' '\n text = ''.join((text[:result.span()[0] + dif], sub, text[result\n .span()[1] + dif:]))\n text_ = ''.join((text_[:result.span()[0]], text_[result.span()[\n 1]:])).strip()\n dif = abs(len(text) - len(text_))\n result = re.search(redundant_punct_pattern, text_)\n text = re.sub('\\\\s+', ' ', text)\n return text.strip()\n\n\n<mask token>\n",
"step-3": "<mask token>\nACCEPTED_MODELS = ['bert-base-arabertv01', 'bert-base-arabert',\n 'bert-base-arabertv02', 'bert-base-arabertv2', 'bert-large-arabertv02',\n 'bert-large-arabertv2', 'araelectra-base',\n 'araelectra-base-discriminator', 'araelectra-base-generator',\n 'aragpt2-base', 'aragpt2-medium', 'aragpt2-large', 'aragpt2-mega']\nSEGMENTED_MODELS = ['bert-base-arabert', 'bert-base-arabertv2',\n 'bert-large-arabertv2']\n\n\nclass ArbertmoPreprocessor:\n \"\"\"\n A Preprocessor class that cleans and preprocesses text for all models in the AraBERT repo.\n It also can unprocess the text ouput of the generated text\n\n Args:\n\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n\n Returns:\n\n ArBERTMoPreprocessor: the preprocessor class\n\n Example:\n\n from preprocess import ArBERTMoPreprocessor\n\n arabert_prep = ArBERTMoPreprocessor(\"aubmindlab/bert-base-arabertv2\")\n\n arabert_prep.preprocess(\"SOME ARABIC TEXT\")\n \"\"\"\n\n def __init__(self, model_name, keep_emojis=False, remove_html_markup=\n True, replace_urls_emails_mentions=True, strip_tashkeel=True,\n strip_tatweel=True, insert_white_spaces=True, remove_elongation=True):\n \"\"\"\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n \"\"\"\n model_name = model_name.replace('aubmindlab/', '')\n if model_name not in ACCEPTED_MODELS:\n logging.warning(\n \"Model provided is not in the accepted model list. Assuming you don't want Farasa Segmentation\"\n )\n self.model_name = 'bert-base-arabertv02'\n else:\n self.model_name = model_name\n if self.model_name in SEGMENTED_MODELS:\n logging.info(\n 'Selected Model requires pre-segmentation, Initializing FarasaSegmenter'\n )\n try:\n from farasa.segmenter import FarasaSegmenter\n self.farasa_segmenter = FarasaSegmenter(interactive=True)\n except:\n logging.warning(\n 'farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy'\n )\n else:\n logging.info(\n \"Selected Model doesn't require pre-segmentation, skipping FarasaSegmenter initialization\"\n )\n self.keep_emojis = keep_emojis\n if self.keep_emojis:\n import emoji\n self.emoji = emoji\n if self.model_name in SEGMENTED_MODELS:\n logging.warning(\n 'Keeping tweets with Farasa Segmentation is 10 times slower'\n )\n self.remove_html_markup = remove_html_markup\n self.replace_urls_emails_mentions = replace_urls_emails_mentions\n self.strip_tashkeel = strip_tashkeel\n self.strip_tatweel = strip_tatweel\n self.insert_white_spaces = insert_white_spaces\n self.remove_elongation = remove_elongation\n\n def preprocess(self, text):\n \"\"\"\n Preprocess takes an input text line an applies the same preprocessing used in AraBERT\n pretraining\n\n Args:\n\n text (:obj:`str`): inout text string\n\n Returns:\n\n string: A preprocessed string depending on which model was selected\n \"\"\"\n if self.model_name == 'bert-base-arabert':\n return self._old_preprocess(text, do_farasa_tokenization=True)\n if self.model_name == 'bert-base-arabertv01':\n return self._old_preprocess(text, do_farasa_tokenization=False)\n text = str(text)\n text = html.unescape(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n if self.strip_tatweel:\n text = araby.strip_tatweel(text)\n if self.replace_urls_emails_mentions:\n for reg in url_regexes:\n text = re.sub(reg, ' [رابط] ', text)\n for reg in email_regexes:\n text = re.sub(reg, ' [بريد] ', text)\n text = re.sub(user_mention_regex, ' [مستخدم] ', text)\n if self.remove_html_markup:\n text = re.sub('<br />', ' ', text)\n text = re.sub('</?[^>]+>', ' ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-ي٠-٩a-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n text = re.sub('(\\\\d+)([ء-غف-ي٠-٬]+)', ' \\\\1 \\\\2 ', text)\n text = re.sub('([ء-غف-ي٠-٬]+)(\\\\d+)', ' \\\\1 \\\\2 ', text)\n if self.keep_emojis:\n emoji_regex = ''.join(list(self.emoji.UNICODE_EMOJI['en'].keys()))\n rejected_chars_regex2 = '[^%s%s]' % (chars_regex, emoji_regex)\n text = re.sub(rejected_chars_regex2, ' ', text)\n else:\n text = re.sub(rejected_chars_regex, ' ', text)\n text = ' '.join(text.replace('️', '').split())\n if (self.model_name == 'bert-base-arabertv2' or self.model_name ==\n 'bert-large-arabertv2'):\n if self.keep_emojis:\n new_text = []\n for word in text.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n new_text.append(word)\n else:\n new_text.append(self.farasa_segmenter.segment(word))\n text = ' '.join(new_text)\n else:\n text = self.farasa_segmenter.segment(text)\n return self._farasa_segment(text)\n return text\n\n def unpreprocess(self, text, desegment=True):\n \"\"\"Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.\n The objective is to make the generated text of any model appear natural and not preprocessed.\n\n Args:\n text (str): input text to be un-preprocessed\n desegment (bool, optional): [whether or not to remove farasa pre-segmentation before]. Defaults to True.\n\n Returns:\n str: The unpreprocessed (and possibly Farasa-desegmented) text.\n \"\"\"\n if self.model_name in SEGMENTED_MODELS and desegment:\n text = self.desegment(text)\n text = re.sub(white_spaced_double_quotation_regex, '\"' + '\\\\1' +\n '\"', text)\n text = re.sub(white_spaced_single_quotation_regex, \"'\" + '\\\\1' +\n \"'\", text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\`' + '\\\\1' +\n '\\\\`', text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\—' + '\\\\1' +\n '\\\\—', text)\n text = text.replace('.', ' . ')\n text = ' '.join(text.split())\n text = re.sub('(\\\\d+) \\\\. (\\\\d+)', '\\\\1.\\\\2', text)\n text = re.sub('(\\\\d+) \\\\, (\\\\d+)', '\\\\1,\\\\2', text)\n text = re.sub(left_and_right_spaced_chars, '\\\\1', text)\n text = re.sub(left_spaced_chars, '\\\\1', text)\n text = re.sub(right_spaced_chars, '\\\\1', text)\n return text\n\n def desegment(self, text):\n \"\"\"\n Use this function if sentence tokenization was done using\n `from arabert.preprocess_arabert import preprocess` with Farasa enabled\n AraBERT segmentation using Farasa adds a space after the '+' for prefixes,\n and after before the '+' for suffixes\n\n Example:\n >>> desegment('ال+ دراس +ات')\n الدراسات\n \"\"\"\n text = text.replace('+ ', '+')\n text = text.replace(' +', '+')\n text = ' '.join([self._desegmentword(word) for word in text.split(' ')]\n )\n return text\n\n def _desegmentword(self, orig_word: str) ->str:\n \"\"\"\n Word segmentor that takes a Farasa Segmented Word and removes the '+' signs\n\n Example:\n >>> _desegmentword(\"ال+يومي+ة\")\n اليومية\n \"\"\"\n word = orig_word.replace('ل+ال+', 'لل')\n if 'ال+ال' not in orig_word:\n word = word.replace('ل+ال', 'لل')\n word = word.replace('+', '')\n word = word.replace('للل', 'لل')\n return word\n\n def _old_preprocess(self, text, do_farasa_tokenization):\n \"\"\"\n AraBERTv1 preprocessing Function\n \"\"\"\n text = str(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n text = re.sub('\\\\d+\\\\/[ء-ي]+\\\\/\\\\d+\\\\]', '', text)\n text = re.sub('ـ', '', text)\n text = re.sub('[«»]', ' \" ', text)\n if self.replace_urls_emails_mentions:\n text = re.sub(regex_url_step1, '[رابط]', text)\n text = re.sub(regex_url_step2, '[رابط]', text)\n text = re.sub(regex_url, '[رابط]', text)\n text = re.sub(regex_email, '[بريد]', text)\n text = re.sub(regex_mention, '[مستخدم]', text)\n text = re.sub('…', '\\\\.', text).strip()\n text = self._remove_redundant_punct(text)\n if self.replace_urls_emails_mentions:\n text = re.sub('\\\\[ رابط \\\\]|\\\\[ رابط\\\\]|\\\\[رابط \\\\]',\n ' [رابط] ', text)\n text = re.sub('\\\\[ بريد \\\\]|\\\\[ بريد\\\\]|\\\\[بريد \\\\]',\n ' [بريد] ', text)\n text = re.sub('\\\\[ مستخدم \\\\]|\\\\[ مستخدم\\\\]|\\\\[مستخدم \\\\]',\n ' [مستخدم] ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-٩ٱ-ٳa-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n if do_farasa_tokenization:\n text = self._tokenize_arabic_words_farasa(text)\n return text.strip()\n\n def _farasa_segment(self, text):\n line_farasa = text.split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n if '+' not in word:\n segmented_line.append(word)\n continue\n segmented_word = self._split_farasa_output(word)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _split_farasa_output(self, word):\n segmented_word = []\n temp_token = ''\n for i, c in enumerate(word):\n if c == '+':\n if temp_token == 'ك':\n if i == 1:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif word[i - 2] == '+':\n if segmented_word[-1][-1] == '+':\n segmented_word.append(temp_token + '+')\n temp_token = ''\n else:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n elif temp_token in prefix_list:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n else:\n segmented_word.append(temp_token)\n temp_token = ''\n continue\n temp_token += c\n if temp_token != '':\n if temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n else:\n segmented_word.append(temp_token)\n return segmented_word\n\n def _tokenize_arabic_words_farasa(self, line_input):\n if self.keep_emojis:\n line_farasa = []\n for word in line_input.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n line_farasa.append(word)\n else:\n line_farasa.append(self.farasa_segmenter.segment(word))\n else:\n line_farasa = self.farasa_segmenter.segment(line_input).split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n segmented_word = []\n for token in word.split('+'):\n if token in prefix_list:\n segmented_word.append(token + '+')\n elif token in suffix_list:\n segmented_word.append('+' + token)\n else:\n segmented_word.append(token)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _remove_elongation(self, text):\n \"\"\"\n :param text: the input text to remove elongation\n :return: delongated text\n \"\"\"\n for index_ in range(len(re.findall(regex_tatweel, text))):\n elongation = re.search(regex_tatweel, text)\n if elongation:\n elongation_pattern = elongation.group()\n elongation_replacement = elongation_pattern[0]\n elongation_pattern = re.escape(elongation_pattern)\n text = re.sub(elongation_pattern, elongation_replacement,\n text, flags=re.MULTILINE)\n else:\n break\n return text\n\n def _remove_redundant_punct(self, text):\n text_ = text\n result = re.search(redundant_punct_pattern, text)\n dif = 0\n while result:\n sub = result.group()\n sub = sorted(set(sub), key=sub.index)\n sub = ' ' + ''.join(list(sub)) + ' '\n text = ''.join((text[:result.span()[0] + dif], sub, text[result\n .span()[1] + dif:]))\n text_ = ''.join((text_[:result.span()[0]], text_[result.span()[\n 1]:])).strip()\n dif = abs(len(text) - len(text_))\n result = re.search(redundant_punct_pattern, text_)\n text = re.sub('\\\\s+', ' ', text)\n return text.strip()\n\n\nprefix_list = ['ال', 'و', 'ف', 'ب', 'ك', 'ل', 'لل', 'ال', 'و', 'ف', 'ب',\n 'ك', 'ل', 'لل', 'س']\nsuffix_list = ['ه', 'ها', 'ك', 'ي', 'هما', 'كما', 'نا', 'كم', 'هم', 'هن',\n 'كن', 'ا', 'ان', 'ين', 'ون', 'وا', 'ات', 'ت', 'ن', 'ة', 'ه', 'ها', 'ك',\n 'ي', 'هما', 'كما', 'نا', 'كم', 'هم', 'هن', 'كن', 'ا', 'ان', 'ين', 'ون',\n 'وا', 'ات', 'ت', 'ن', 'ة']\nother_tokens = ['[رابط]', '[مستخدم]', '[بريد]']\nprefix_symbols = [(x + '+') for x in prefix_list]\nsuffix_symblos = [('+' + x) for x in suffix_list]\nnever_split_tokens = list(set(prefix_symbols + suffix_symblos + other_tokens))\nurl_regexes = [\n '(http(s)?:\\\\/\\\\/.)?(www\\\\.)?[-a-zA-Z0-9@:%._\\\\+~#=]{2,256}\\\\.[a-z]{2,6}\\\\b([-a-zA-Z0-9@:%_\\\\+.~#?&//=]*)'\n , '@(https?|ftp)://(-\\\\.)?([^\\\\s/?\\\\.#-]+\\\\.?)+(/[^\\\\s]*)?$@iS',\n 'http[s]?://[a-zA-Z0-9_\\\\-./~\\\\?=%&]+', 'www[a-zA-Z0-9_\\\\-?=%&/.~]+',\n '[a-zA-Z]+\\\\.com', '(?=http)[^\\\\s]+', '(?=www)[^\\\\s]+', '://']\nuser_mention_regex = '@[\\\\w\\\\d]+'\nemail_regexes = ['[\\\\w-]+@([\\\\w-]+\\\\.)+[\\\\w-]+', '\\\\S+@\\\\S+']\nredundant_punct_pattern = (\n '([!\\\\\"#\\\\$%\\\\\\'\\\\(\\\\)\\\\*\\\\+,\\\\.:;\\\\-<=·>?@\\\\[\\\\\\\\\\\\]\\\\^_ـ`{\\\\|}~—٪’،؟`୍“؛”ۚ【»؛\\\\s+«–…‘]{2,})'\n )\nregex_tatweel = '(\\\\D)\\\\1{2,}'\nrejected_chars_regex = (\n '[^0-9\\\\u0621-\\\\u063A\\\\u0640-\\\\u066C\\\\u0671-\\\\u0674a-zA-Z\\\\[\\\\]!\\\\\"#\\\\$%\\\\\\'\\\\(\\\\)\\\\*\\\\+,\\\\.:;\\\\-<=·>?@\\\\[\\\\\\\\\\\\]\\\\^_ـ`{\\\\|}~—٪’،؟`୍“؛”ۚ»؛\\\\s+«–…‘]'\n )\nregex_url_step1 = '(?=http)[^\\\\s]+'\nregex_url_step2 = '(?=www)[^\\\\s]+'\nregex_url = (\n '(http(s)?:\\\\/\\\\/.)?(www\\\\.)?[-a-zA-Z0-9@:%._\\\\+~#=]{2,256}\\\\.[a-z]{2,6}\\\\b([-a-zA-Z0-9@:%_\\\\+.~#?&//=]*)'\n )\nregex_mention = '@[\\\\w\\\\d]+'\nregex_email = '\\\\S+@\\\\S+'\nchars_regex = (\n '0-9\\\\u0621-\\\\u063A\\\\u0640-\\\\u066C\\\\u0671-\\\\u0674a-zA-Z\\\\[\\\\]!\\\\\"#\\\\$%\\\\\\'\\\\(\\\\)\\\\*\\\\+,\\\\.:;\\\\-<=·>?@\\\\[\\\\\\\\\\\\]\\\\^_ـ`{\\\\|}~—٪’،؟`୍“؛”ۚ»؛\\\\s+«–…‘'\n )\nwhite_spaced_double_quotation_regex = '\\\\\"\\\\s+([^\"]+)\\\\s+\\\\\"'\nwhite_spaced_single_quotation_regex = \"\\\\'\\\\s+([^']+)\\\\s+\\\\'\"\nwhite_spaced_back_quotation_regex = '\\\\`\\\\s+([^`]+)\\\\s+\\\\`'\nwhite_spaced_em_dash = '\\\\—\\\\s+([^—]+)\\\\s+\\\\—'\nleft_spaced_chars = ' ([\\\\]!#\\\\$%\\\\),\\\\.:;\\\\?}٪’،؟”؛…»·])'\nright_spaced_chars = '([\\\\[\\\\(\\\\{“«‘*\\\\~]) '\nleft_and_right_spaced_chars = ' ([\\\\+\\\\-\\\\<\\\\=\\\\>\\\\@\\\\\\\\\\\\^\\\\_\\\\|\\\\–]) '\n",
"step-4": "import html\nimport logging\nimport re\nimport pyarabic.araby as araby\nACCEPTED_MODELS = ['bert-base-arabertv01', 'bert-base-arabert',\n 'bert-base-arabertv02', 'bert-base-arabertv2', 'bert-large-arabertv02',\n 'bert-large-arabertv2', 'araelectra-base',\n 'araelectra-base-discriminator', 'araelectra-base-generator',\n 'aragpt2-base', 'aragpt2-medium', 'aragpt2-large', 'aragpt2-mega']\nSEGMENTED_MODELS = ['bert-base-arabert', 'bert-base-arabertv2',\n 'bert-large-arabertv2']\n\n\nclass ArbertmoPreprocessor:\n \"\"\"\n A Preprocessor class that cleans and preprocesses text for all models in the AraBERT repo.\n It also can unprocess the text ouput of the generated text\n\n Args:\n\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n\n Returns:\n\n ArBERTMoPreprocessor: the preprocessor class\n\n Example:\n\n from preprocess import ArBERTMoPreprocessor\n\n arabert_prep = ArBERTMoPreprocessor(\"aubmindlab/bert-base-arabertv2\")\n\n arabert_prep.preprocess(\"SOME ARABIC TEXT\")\n \"\"\"\n\n def __init__(self, model_name, keep_emojis=False, remove_html_markup=\n True, replace_urls_emails_mentions=True, strip_tashkeel=True,\n strip_tatweel=True, insert_white_spaces=True, remove_elongation=True):\n \"\"\"\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n \"\"\"\n model_name = model_name.replace('aubmindlab/', '')\n if model_name not in ACCEPTED_MODELS:\n logging.warning(\n \"Model provided is not in the accepted model list. Assuming you don't want Farasa Segmentation\"\n )\n self.model_name = 'bert-base-arabertv02'\n else:\n self.model_name = model_name\n if self.model_name in SEGMENTED_MODELS:\n logging.info(\n 'Selected Model requires pre-segmentation, Initializing FarasaSegmenter'\n )\n try:\n from farasa.segmenter import FarasaSegmenter\n self.farasa_segmenter = FarasaSegmenter(interactive=True)\n except:\n logging.warning(\n 'farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy'\n )\n else:\n logging.info(\n \"Selected Model doesn't require pre-segmentation, skipping FarasaSegmenter initialization\"\n )\n self.keep_emojis = keep_emojis\n if self.keep_emojis:\n import emoji\n self.emoji = emoji\n if self.model_name in SEGMENTED_MODELS:\n logging.warning(\n 'Keeping tweets with Farasa Segmentation is 10 times slower'\n )\n self.remove_html_markup = remove_html_markup\n self.replace_urls_emails_mentions = replace_urls_emails_mentions\n self.strip_tashkeel = strip_tashkeel\n self.strip_tatweel = strip_tatweel\n self.insert_white_spaces = insert_white_spaces\n self.remove_elongation = remove_elongation\n\n def preprocess(self, text):\n \"\"\"\n Preprocess takes an input text line an applies the same preprocessing used in AraBERT\n pretraining\n\n Args:\n\n text (:obj:`str`): inout text string\n\n Returns:\n\n string: A preprocessed string depending on which model was selected\n \"\"\"\n if self.model_name == 'bert-base-arabert':\n return self._old_preprocess(text, do_farasa_tokenization=True)\n if self.model_name == 'bert-base-arabertv01':\n return self._old_preprocess(text, do_farasa_tokenization=False)\n text = str(text)\n text = html.unescape(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n if self.strip_tatweel:\n text = araby.strip_tatweel(text)\n if self.replace_urls_emails_mentions:\n for reg in url_regexes:\n text = re.sub(reg, ' [رابط] ', text)\n for reg in email_regexes:\n text = re.sub(reg, ' [بريد] ', text)\n text = re.sub(user_mention_regex, ' [مستخدم] ', text)\n if self.remove_html_markup:\n text = re.sub('<br />', ' ', text)\n text = re.sub('</?[^>]+>', ' ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-ي٠-٩a-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n text = re.sub('(\\\\d+)([ء-غف-ي٠-٬]+)', ' \\\\1 \\\\2 ', text)\n text = re.sub('([ء-غف-ي٠-٬]+)(\\\\d+)', ' \\\\1 \\\\2 ', text)\n if self.keep_emojis:\n emoji_regex = ''.join(list(self.emoji.UNICODE_EMOJI['en'].keys()))\n rejected_chars_regex2 = '[^%s%s]' % (chars_regex, emoji_regex)\n text = re.sub(rejected_chars_regex2, ' ', text)\n else:\n text = re.sub(rejected_chars_regex, ' ', text)\n text = ' '.join(text.replace('️', '').split())\n if (self.model_name == 'bert-base-arabertv2' or self.model_name ==\n 'bert-large-arabertv2'):\n if self.keep_emojis:\n new_text = []\n for word in text.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n new_text.append(word)\n else:\n new_text.append(self.farasa_segmenter.segment(word))\n text = ' '.join(new_text)\n else:\n text = self.farasa_segmenter.segment(text)\n return self._farasa_segment(text)\n return text\n\n def unpreprocess(self, text, desegment=True):\n \"\"\"Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.\n The objective is to make the generated text of any model appear natural and not preprocessed.\n\n Args:\n text (str): input text to be un-preprocessed\n desegment (bool, optional): [whether or not to remove farasa pre-segmentation before]. Defaults to True.\n\n Returns:\n str: The unpreprocessed (and possibly Farasa-desegmented) text.\n \"\"\"\n if self.model_name in SEGMENTED_MODELS and desegment:\n text = self.desegment(text)\n text = re.sub(white_spaced_double_quotation_regex, '\"' + '\\\\1' +\n '\"', text)\n text = re.sub(white_spaced_single_quotation_regex, \"'\" + '\\\\1' +\n \"'\", text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\`' + '\\\\1' +\n '\\\\`', text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\—' + '\\\\1' +\n '\\\\—', text)\n text = text.replace('.', ' . ')\n text = ' '.join(text.split())\n text = re.sub('(\\\\d+) \\\\. (\\\\d+)', '\\\\1.\\\\2', text)\n text = re.sub('(\\\\d+) \\\\, (\\\\d+)', '\\\\1,\\\\2', text)\n text = re.sub(left_and_right_spaced_chars, '\\\\1', text)\n text = re.sub(left_spaced_chars, '\\\\1', text)\n text = re.sub(right_spaced_chars, '\\\\1', text)\n return text\n\n def desegment(self, text):\n \"\"\"\n Use this function if sentence tokenization was done using\n `from arabert.preprocess_arabert import preprocess` with Farasa enabled\n AraBERT segmentation using Farasa adds a space after the '+' for prefixes,\n and after before the '+' for suffixes\n\n Example:\n >>> desegment('ال+ دراس +ات')\n الدراسات\n \"\"\"\n text = text.replace('+ ', '+')\n text = text.replace(' +', '+')\n text = ' '.join([self._desegmentword(word) for word in text.split(' ')]\n )\n return text\n\n def _desegmentword(self, orig_word: str) ->str:\n \"\"\"\n Word segmentor that takes a Farasa Segmented Word and removes the '+' signs\n\n Example:\n >>> _desegmentword(\"ال+يومي+ة\")\n اليومية\n \"\"\"\n word = orig_word.replace('ل+ال+', 'لل')\n if 'ال+ال' not in orig_word:\n word = word.replace('ل+ال', 'لل')\n word = word.replace('+', '')\n word = word.replace('للل', 'لل')\n return word\n\n def _old_preprocess(self, text, do_farasa_tokenization):\n \"\"\"\n AraBERTv1 preprocessing Function\n \"\"\"\n text = str(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n text = re.sub('\\\\d+\\\\/[ء-ي]+\\\\/\\\\d+\\\\]', '', text)\n text = re.sub('ـ', '', text)\n text = re.sub('[«»]', ' \" ', text)\n if self.replace_urls_emails_mentions:\n text = re.sub(regex_url_step1, '[رابط]', text)\n text = re.sub(regex_url_step2, '[رابط]', text)\n text = re.sub(regex_url, '[رابط]', text)\n text = re.sub(regex_email, '[بريد]', text)\n text = re.sub(regex_mention, '[مستخدم]', text)\n text = re.sub('…', '\\\\.', text).strip()\n text = self._remove_redundant_punct(text)\n if self.replace_urls_emails_mentions:\n text = re.sub('\\\\[ رابط \\\\]|\\\\[ رابط\\\\]|\\\\[رابط \\\\]',\n ' [رابط] ', text)\n text = re.sub('\\\\[ بريد \\\\]|\\\\[ بريد\\\\]|\\\\[بريد \\\\]',\n ' [بريد] ', text)\n text = re.sub('\\\\[ مستخدم \\\\]|\\\\[ مستخدم\\\\]|\\\\[مستخدم \\\\]',\n ' [مستخدم] ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-٩ٱ-ٳa-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n if do_farasa_tokenization:\n text = self._tokenize_arabic_words_farasa(text)\n return text.strip()\n\n def _farasa_segment(self, text):\n line_farasa = text.split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n if '+' not in word:\n segmented_line.append(word)\n continue\n segmented_word = self._split_farasa_output(word)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _split_farasa_output(self, word):\n segmented_word = []\n temp_token = ''\n for i, c in enumerate(word):\n if c == '+':\n if temp_token == 'ك':\n if i == 1:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif word[i - 2] == '+':\n if segmented_word[-1][-1] == '+':\n segmented_word.append(temp_token + '+')\n temp_token = ''\n else:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n elif temp_token in prefix_list:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n else:\n segmented_word.append(temp_token)\n temp_token = ''\n continue\n temp_token += c\n if temp_token != '':\n if temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n else:\n segmented_word.append(temp_token)\n return segmented_word\n\n def _tokenize_arabic_words_farasa(self, line_input):\n if self.keep_emojis:\n line_farasa = []\n for word in line_input.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n line_farasa.append(word)\n else:\n line_farasa.append(self.farasa_segmenter.segment(word))\n else:\n line_farasa = self.farasa_segmenter.segment(line_input).split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n segmented_word = []\n for token in word.split('+'):\n if token in prefix_list:\n segmented_word.append(token + '+')\n elif token in suffix_list:\n segmented_word.append('+' + token)\n else:\n segmented_word.append(token)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _remove_elongation(self, text):\n \"\"\"\n :param text: the input text to remove elongation\n :return: delongated text\n \"\"\"\n for index_ in range(len(re.findall(regex_tatweel, text))):\n elongation = re.search(regex_tatweel, text)\n if elongation:\n elongation_pattern = elongation.group()\n elongation_replacement = elongation_pattern[0]\n elongation_pattern = re.escape(elongation_pattern)\n text = re.sub(elongation_pattern, elongation_replacement,\n text, flags=re.MULTILINE)\n else:\n break\n return text\n\n def _remove_redundant_punct(self, text):\n text_ = text\n result = re.search(redundant_punct_pattern, text)\n dif = 0\n while result:\n sub = result.group()\n sub = sorted(set(sub), key=sub.index)\n sub = ' ' + ''.join(list(sub)) + ' '\n text = ''.join((text[:result.span()[0] + dif], sub, text[result\n .span()[1] + dif:]))\n text_ = ''.join((text_[:result.span()[0]], text_[result.span()[\n 1]:])).strip()\n dif = abs(len(text) - len(text_))\n result = re.search(redundant_punct_pattern, text_)\n text = re.sub('\\\\s+', ' ', text)\n return text.strip()\n\n\nprefix_list = ['ال', 'و', 'ف', 'ب', 'ك', 'ل', 'لل', 'ال', 'و', 'ف', 'ب',\n 'ك', 'ل', 'لل', 'س']\nsuffix_list = ['ه', 'ها', 'ك', 'ي', 'هما', 'كما', 'نا', 'كم', 'هم', 'هن',\n 'كن', 'ا', 'ان', 'ين', 'ون', 'وا', 'ات', 'ت', 'ن', 'ة', 'ه', 'ها', 'ك',\n 'ي', 'هما', 'كما', 'نا', 'كم', 'هم', 'هن', 'كن', 'ا', 'ان', 'ين', 'ون',\n 'وا', 'ات', 'ت', 'ن', 'ة']\nother_tokens = ['[رابط]', '[مستخدم]', '[بريد]']\nprefix_symbols = [(x + '+') for x in prefix_list]\nsuffix_symblos = [('+' + x) for x in suffix_list]\nnever_split_tokens = list(set(prefix_symbols + suffix_symblos + other_tokens))\nurl_regexes = [\n '(http(s)?:\\\\/\\\\/.)?(www\\\\.)?[-a-zA-Z0-9@:%._\\\\+~#=]{2,256}\\\\.[a-z]{2,6}\\\\b([-a-zA-Z0-9@:%_\\\\+.~#?&//=]*)'\n , '@(https?|ftp)://(-\\\\.)?([^\\\\s/?\\\\.#-]+\\\\.?)+(/[^\\\\s]*)?$@iS',\n 'http[s]?://[a-zA-Z0-9_\\\\-./~\\\\?=%&]+', 'www[a-zA-Z0-9_\\\\-?=%&/.~]+',\n '[a-zA-Z]+\\\\.com', '(?=http)[^\\\\s]+', '(?=www)[^\\\\s]+', '://']\nuser_mention_regex = '@[\\\\w\\\\d]+'\nemail_regexes = ['[\\\\w-]+@([\\\\w-]+\\\\.)+[\\\\w-]+', '\\\\S+@\\\\S+']\nredundant_punct_pattern = (\n '([!\\\\\"#\\\\$%\\\\\\'\\\\(\\\\)\\\\*\\\\+,\\\\.:;\\\\-<=·>?@\\\\[\\\\\\\\\\\\]\\\\^_ـ`{\\\\|}~—٪’،؟`୍“؛”ۚ【»؛\\\\s+«–…‘]{2,})'\n )\nregex_tatweel = '(\\\\D)\\\\1{2,}'\nrejected_chars_regex = (\n '[^0-9\\\\u0621-\\\\u063A\\\\u0640-\\\\u066C\\\\u0671-\\\\u0674a-zA-Z\\\\[\\\\]!\\\\\"#\\\\$%\\\\\\'\\\\(\\\\)\\\\*\\\\+,\\\\.:;\\\\-<=·>?@\\\\[\\\\\\\\\\\\]\\\\^_ـ`{\\\\|}~—٪’،؟`୍“؛”ۚ»؛\\\\s+«–…‘]'\n )\nregex_url_step1 = '(?=http)[^\\\\s]+'\nregex_url_step2 = '(?=www)[^\\\\s]+'\nregex_url = (\n '(http(s)?:\\\\/\\\\/.)?(www\\\\.)?[-a-zA-Z0-9@:%._\\\\+~#=]{2,256}\\\\.[a-z]{2,6}\\\\b([-a-zA-Z0-9@:%_\\\\+.~#?&//=]*)'\n )\nregex_mention = '@[\\\\w\\\\d]+'\nregex_email = '\\\\S+@\\\\S+'\nchars_regex = (\n '0-9\\\\u0621-\\\\u063A\\\\u0640-\\\\u066C\\\\u0671-\\\\u0674a-zA-Z\\\\[\\\\]!\\\\\"#\\\\$%\\\\\\'\\\\(\\\\)\\\\*\\\\+,\\\\.:;\\\\-<=·>?@\\\\[\\\\\\\\\\\\]\\\\^_ـ`{\\\\|}~—٪’،؟`୍“؛”ۚ»؛\\\\s+«–…‘'\n )\nwhite_spaced_double_quotation_regex = '\\\\\"\\\\s+([^\"]+)\\\\s+\\\\\"'\nwhite_spaced_single_quotation_regex = \"\\\\'\\\\s+([^']+)\\\\s+\\\\'\"\nwhite_spaced_back_quotation_regex = '\\\\`\\\\s+([^`]+)\\\\s+\\\\`'\nwhite_spaced_em_dash = '\\\\—\\\\s+([^—]+)\\\\s+\\\\—'\nleft_spaced_chars = ' ([\\\\]!#\\\\$%\\\\),\\\\.:;\\\\?}٪’،؟”؛…»·])'\nright_spaced_chars = '([\\\\[\\\\(\\\\{“«‘*\\\\~]) '\nleft_and_right_spaced_chars = ' ([\\\\+\\\\-\\\\<\\\\=\\\\>\\\\@\\\\\\\\\\\\^\\\\_\\\\|\\\\–]) '\n",
"step-5": "import html\nimport logging\nimport re\n\nimport pyarabic.araby as araby\n\nACCEPTED_MODELS = [\n \"bert-base-arabertv01\",\n \"bert-base-arabert\",\n \"bert-base-arabertv02\",\n \"bert-base-arabertv2\",\n \"bert-large-arabertv02\",\n \"bert-large-arabertv2\",\n \"araelectra-base\",\n \"araelectra-base-discriminator\",\n \"araelectra-base-generator\",\n \"aragpt2-base\",\n \"aragpt2-medium\",\n \"aragpt2-large\",\n \"aragpt2-mega\",\n]\n\nSEGMENTED_MODELS = [\n \"bert-base-arabert\",\n \"bert-base-arabertv2\",\n \"bert-large-arabertv2\",\n]\n\n\nclass ArbertmoPreprocessor:\n \"\"\"\n A Preprocessor class that cleans and preprocesses text for all models in the AraBERT repo.\n It also can unprocess the text ouput of the generated text\n\n Args:\n\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n\n Returns:\n\n ArBERTMoPreprocessor: the preprocessor class\n\n Example:\n\n from preprocess import ArBERTMoPreprocessor\n\n arabert_prep = ArBERTMoPreprocessor(\"aubmindlab/bert-base-arabertv2\")\n\n arabert_prep.preprocess(\"SOME ARABIC TEXT\")\n \"\"\"\n\n def __init__(\n self,\n model_name,\n keep_emojis=False,\n remove_html_markup=True,\n replace_urls_emails_mentions=True,\n strip_tashkeel=True,\n strip_tatweel=True,\n insert_white_spaces=True,\n remove_elongation=True,\n ):\n \"\"\"\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n \"\"\"\n model_name = model_name.replace(\"aubmindlab/\", \"\")\n\n if model_name not in ACCEPTED_MODELS:\n logging.warning(\n \"Model provided is not in the accepted model list. Assuming you don't want Farasa Segmentation\"\n )\n self.model_name = \"bert-base-arabertv02\"\n else:\n self.model_name = model_name\n\n if self.model_name in SEGMENTED_MODELS:\n logging.info(\n \"Selected Model requires pre-segmentation, Initializing FarasaSegmenter\"\n )\n try:\n from farasa.segmenter import FarasaSegmenter\n\n self.farasa_segmenter = FarasaSegmenter(interactive=True)\n except:\n logging.warning(\n \"farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy\"\n )\n else:\n logging.info(\n \"Selected Model doesn't require pre-segmentation, skipping FarasaSegmenter initialization\"\n )\n\n self.keep_emojis = keep_emojis\n if self.keep_emojis:\n import emoji\n\n self.emoji = emoji\n if self.model_name in SEGMENTED_MODELS:\n logging.warning(\n \"Keeping tweets with Farasa Segmentation is 10 times slower\"\n )\n\n self.remove_html_markup = remove_html_markup\n self.replace_urls_emails_mentions = replace_urls_emails_mentions\n self.strip_tashkeel = strip_tashkeel\n self.strip_tatweel = strip_tatweel\n self.insert_white_spaces = insert_white_spaces\n self.remove_elongation = remove_elongation\n\n def preprocess(self, text):\n \"\"\"\n Preprocess takes an input text line an applies the same preprocessing used in AraBERT\n pretraining\n\n Args:\n\n text (:obj:`str`): inout text string\n\n Returns:\n\n string: A preprocessed string depending on which model was selected\n \"\"\"\n if self.model_name == \"bert-base-arabert\":\n return self._old_preprocess(\n text,\n do_farasa_tokenization=True,\n )\n\n if self.model_name == \"bert-base-arabertv01\":\n return self._old_preprocess(text, do_farasa_tokenization=False)\n\n text = str(text)\n text = html.unescape(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n if self.strip_tatweel:\n text = araby.strip_tatweel(text)\n\n if self.replace_urls_emails_mentions:\n # replace all possible URLs\n for reg in url_regexes:\n text = re.sub(reg, \" [رابط] \", text)\n # REplace Emails with [بريد]\n for reg in email_regexes:\n text = re.sub(reg, \" [بريد] \", text)\n # replace mentions with [مستخدم]\n text = re.sub(user_mention_regex, \" [مستخدم] \", text)\n\n if self.remove_html_markup:\n # remove html line breaks\n text = re.sub(\"<br />\", \" \", text)\n # remove html markup\n text = re.sub(\"</?[^>]+>\", \" \", text)\n\n # remove repeated characters >2\n if self.remove_elongation:\n text = self._remove_elongation(text)\n\n # insert whitespace before and after all non Arabic digits or English Digits and Alphabet and the 2 brackets\n if self.insert_white_spaces:\n text = re.sub(\n \"([^0-9\\u0621-\\u063A\\u0641-\\u064A\\u0660-\\u0669a-zA-Z\\[\\]])\",\n r\" \\1 \",\n text,\n )\n\n # insert whitespace between words and numbers or numbers and words\n text = re.sub(\n \"(\\d+)([\\u0621-\\u063A\\u0641-\\u064A\\u0660-\\u066C]+)\", r\" \\1 \\2 \", text\n )\n text = re.sub(\n \"([\\u0621-\\u063A\\u0641-\\u064A\\u0660-\\u066C]+)(\\d+)\", r\" \\1 \\2 \", text\n )\n\n # remove unwanted characters\n if self.keep_emojis:\n emoji_regex = \"\".join(list(self.emoji.UNICODE_EMOJI[\"en\"].keys()))\n rejected_chars_regex2 = \"[^%s%s]\" % (chars_regex, emoji_regex)\n text = re.sub(rejected_chars_regex2, \" \", text)\n else:\n text = re.sub(rejected_chars_regex, \" \", text)\n\n # remove extra spaces\n text = \" \".join(text.replace(\"\\uFE0F\", \"\").split())\n\n if (\n self.model_name == \"bert-base-arabertv2\"\n or self.model_name == \"bert-large-arabertv2\"\n ):\n if self.keep_emojis:\n new_text = []\n for word in text.split():\n if word in list(self.emoji.UNICODE_EMOJI[\"en\"].keys()):\n new_text.append(word)\n else:\n new_text.append(self.farasa_segmenter.segment(word))\n text = \" \".join(new_text)\n else:\n text = self.farasa_segmenter.segment(text)\n return self._farasa_segment(text)\n\n # ALl the other models dont require Farasa Segmentation\n return text\n\n def unpreprocess(self, text, desegment=True):\n \"\"\"Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.\n The objective is to make the generated text of any model appear natural and not preprocessed.\n\n Args:\n text (str): input text to be un-preprocessed\n desegment (bool, optional): [whether or not to remove farasa pre-segmentation before]. Defaults to True.\n\n Returns:\n str: The unpreprocessed (and possibly Farasa-desegmented) text.\n \"\"\"\n\n if self.model_name in SEGMENTED_MODELS and desegment:\n text = self.desegment(text)\n\n # removes the spaces around quotation marks ex: i \" ate \" an apple --> i \"ate\" an apple\n # https://stackoverflow.com/a/53436792/5381220\n text = re.sub(white_spaced_double_quotation_regex, '\"' + r\"\\1\" + '\"', text)\n text = re.sub(white_spaced_single_quotation_regex, \"'\" + r\"\\1\" + \"'\", text)\n text = re.sub(white_spaced_back_quotation_regex, \"\\`\" + r\"\\1\" + \"\\`\", text)\n text = re.sub(white_spaced_back_quotation_regex, \"\\—\" + r\"\\1\" + \"\\—\", text)\n\n # during generation, sometimes the models don't put a space after the dot, this handles it\n text = text.replace(\".\", \" . \")\n text = \" \".join(text.split())\n\n # handle decimals\n text = re.sub(r\"(\\d+) \\. (\\d+)\", r\"\\1.\\2\", text)\n text = re.sub(r\"(\\d+) \\, (\\d+)\", r\"\\1,\\2\", text)\n\n text = re.sub(left_and_right_spaced_chars, r\"\\1\", text)\n text = re.sub(left_spaced_chars, r\"\\1\", text)\n text = re.sub(right_spaced_chars, r\"\\1\", text)\n\n return text\n\n def desegment(self, text):\n \"\"\"\n Use this function if sentence tokenization was done using\n `from arabert.preprocess_arabert import preprocess` with Farasa enabled\n AraBERT segmentation using Farasa adds a space after the '+' for prefixes,\n and after before the '+' for suffixes\n\n Example:\n >>> desegment('ال+ دراس +ات')\n الدراسات\n \"\"\"\n text = text.replace(\"+ \", \"+\")\n text = text.replace(\" +\", \"+\")\n text = \" \".join([self._desegmentword(word) for word in text.split(\" \")])\n return text\n\n def _desegmentword(self, orig_word: str) -> str:\n \"\"\"\n Word segmentor that takes a Farasa Segmented Word and removes the '+' signs\n\n Example:\n >>> _desegmentword(\"ال+يومي+ة\")\n اليومية\n \"\"\"\n word = orig_word.replace(\"ل+ال+\", \"لل\")\n if \"ال+ال\" not in orig_word:\n word = word.replace(\"ل+ال\", \"لل\")\n word = word.replace(\"+\", \"\")\n word = word.replace(\"للل\", \"لل\")\n return word\n\n def _old_preprocess(self, text, do_farasa_tokenization):\n \"\"\"\n AraBERTv1 preprocessing Function\n \"\"\"\n text = str(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n\n text = re.sub(r\"\\d+\\/[ء-ي]+\\/\\d+\\]\", \"\", text)\n text = re.sub(\"ـ\", \"\", text)\n text = re.sub(\"[«»]\", ' \" ', text)\n\n if self.replace_urls_emails_mentions:\n # replace the [رابط] token with space if you want to clean links\n text = re.sub(regex_url_step1, \"[رابط]\", text)\n text = re.sub(regex_url_step2, \"[رابط]\", text)\n text = re.sub(regex_url, \"[رابط]\", text)\n text = re.sub(regex_email, \"[بريد]\", text)\n text = re.sub(regex_mention, \"[مستخدم]\", text)\n text = re.sub(\"…\", r\"\\.\", text).strip()\n text = self._remove_redundant_punct(text)\n\n if self.replace_urls_emails_mentions:\n text = re.sub(r\"\\[ رابط \\]|\\[ رابط\\]|\\[رابط \\]\", \" [رابط] \", text)\n text = re.sub(r\"\\[ بريد \\]|\\[ بريد\\]|\\[بريد \\]\", \" [بريد] \", text)\n text = re.sub(r\"\\[ مستخدم \\]|\\[ مستخدم\\]|\\[مستخدم \\]\", \" [مستخدم] \", text)\n\n if self.remove_elongation:\n text = self._remove_elongation(text)\n\n if self.insert_white_spaces:\n text = re.sub(\n \"([^0-9\\u0621-\\u063A\\u0641-\\u0669\\u0671-\\u0673a-zA-Z\\[\\]])\",\n r\" \\1 \",\n text,\n )\n if do_farasa_tokenization:\n text = self._tokenize_arabic_words_farasa(text)\n\n return text.strip()\n\n def _farasa_segment(self, text):\n line_farasa = text.split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in [\"[\", \"]\"]:\n continue\n if word in [\"رابط\", \"بريد\", \"مستخدم\"] and line_farasa[index - 1] in [\n \"[\",\n \"]\",\n ]:\n segmented_line.append(\"[\" + word + \"]\")\n continue\n if \"+\" not in word:\n segmented_line.append(word)\n continue\n segmented_word = self._split_farasa_output(word)\n segmented_line.extend(segmented_word)\n\n return \" \".join(segmented_line)\n\n def _split_farasa_output(self, word):\n segmented_word = []\n temp_token = \"\"\n for i, c in enumerate(word):\n if c == \"+\":\n # if the token is KAF, it could be a suffix or prefix\n if temp_token == \"ك\":\n # if we are at the second token, then KAF is surely a prefix\n if i == 1:\n segmented_word.append(temp_token + \"+\")\n temp_token = \"\"\n # If the KAF token is between 2 tokens\n elif word[i - 2] == \"+\":\n # if the previous token is prefix, then this KAF must be a prefix\n if segmented_word[-1][-1] == \"+\":\n segmented_word.append(temp_token + \"+\")\n temp_token = \"\"\n # else it is a suffix, this KAF could not be a second suffix\n else:\n segmented_word.append(\"+\" + temp_token)\n temp_token = \"\"\n # if Kaf is at the end, this is handled with the statement after the loop\n elif temp_token in prefix_list:\n segmented_word.append(temp_token + \"+\")\n temp_token = \"\"\n elif temp_token in suffix_list:\n segmented_word.append(\"+\" + temp_token)\n temp_token = \"\"\n else:\n segmented_word.append(temp_token)\n temp_token = \"\"\n continue\n temp_token += c\n if temp_token != \"\":\n if temp_token in suffix_list:\n segmented_word.append(\"+\" + temp_token)\n else:\n segmented_word.append(temp_token)\n return segmented_word\n\n def _tokenize_arabic_words_farasa(self, line_input):\n\n if self.keep_emojis:\n # insert whitespace before and after all non Arabic digits or English Digits and Alphabet and the 2 brackets\n line_farasa = []\n for word in line_input.split():\n if word in list(self.emoji.UNICODE_EMOJI[\"en\"].keys()):\n line_farasa.append(word)\n else:\n line_farasa.append(self.farasa_segmenter.segment(word))\n else:\n line_farasa = self.farasa_segmenter.segment(line_input).split()\n\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in [\"[\", \"]\"]:\n continue\n if word in [\"رابط\", \"بريد\", \"مستخدم\"] and line_farasa[index - 1] in [\n \"[\",\n \"]\",\n ]:\n segmented_line.append(\"[\" + word + \"]\")\n continue\n segmented_word = []\n for token in word.split(\"+\"):\n if token in prefix_list:\n segmented_word.append(token + \"+\")\n elif token in suffix_list:\n segmented_word.append(\"+\" + token)\n else:\n segmented_word.append(token)\n segmented_line.extend(segmented_word)\n return \" \".join(segmented_line)\n\n def _remove_elongation(self, text):\n \"\"\"\n :param text: the input text to remove elongation\n :return: delongated text\n \"\"\"\n # loop over the number of times the regex matched the text\n for index_ in range(len(re.findall(regex_tatweel, text))):\n elongation = re.search(regex_tatweel, text)\n if elongation:\n elongation_pattern = elongation.group()\n elongation_replacement = elongation_pattern[0]\n elongation_pattern = re.escape(elongation_pattern)\n text = re.sub(\n elongation_pattern, elongation_replacement, text, flags=re.MULTILINE\n )\n else:\n break\n return text\n\n def _remove_redundant_punct(self, text):\n text_ = text\n result = re.search(redundant_punct_pattern, text)\n dif = 0\n while result:\n sub = result.group()\n sub = sorted(set(sub), key=sub.index)\n sub = \" \" + \"\".join(list(sub)) + \" \"\n text = \"\".join(\n (text[: result.span()[0] + dif], sub, text[result.span()[1] + dif :])\n )\n text_ = \"\".join(\n (text_[: result.span()[0]], text_[result.span()[1] :])\n ).strip()\n dif = abs(len(text) - len(text_))\n result = re.search(redundant_punct_pattern, text_)\n text = re.sub(r\"\\s+\", \" \", text)\n return text.strip()\n\n\nprefix_list = [\n \"ال\",\n \"و\",\n \"ف\",\n \"ب\",\n \"ك\",\n \"ل\",\n \"لل\",\n \"\\u0627\\u0644\",\n \"\\u0648\",\n \"\\u0641\",\n \"\\u0628\",\n \"\\u0643\",\n \"\\u0644\",\n \"\\u0644\\u0644\",\n \"س\",\n]\nsuffix_list = [\n \"ه\",\n \"ها\",\n \"ك\",\n \"ي\",\n \"هما\",\n \"كما\",\n \"نا\",\n \"كم\",\n \"هم\",\n \"هن\",\n \"كن\",\n \"ا\",\n \"ان\",\n \"ين\",\n \"ون\",\n \"وا\",\n \"ات\",\n \"ت\",\n \"ن\",\n \"ة\",\n \"\\u0647\",\n \"\\u0647\\u0627\",\n \"\\u0643\",\n \"\\u064a\",\n \"\\u0647\\u0645\\u0627\",\n \"\\u0643\\u0645\\u0627\",\n \"\\u0646\\u0627\",\n \"\\u0643\\u0645\",\n \"\\u0647\\u0645\",\n \"\\u0647\\u0646\",\n \"\\u0643\\u0646\",\n \"\\u0627\",\n \"\\u0627\\u0646\",\n \"\\u064a\\u0646\",\n \"\\u0648\\u0646\",\n \"\\u0648\\u0627\",\n \"\\u0627\\u062a\",\n \"\\u062a\",\n \"\\u0646\",\n \"\\u0629\",\n]\nother_tokens = [\"[رابط]\", \"[مستخدم]\", \"[بريد]\"]\n\n# the never_split list is ussed with the transformers library\nprefix_symbols = [x + \"+\" for x in prefix_list]\nsuffix_symblos = [\"+\" + x for x in suffix_list]\nnever_split_tokens = list(set(prefix_symbols + suffix_symblos + other_tokens))\n\nurl_regexes = [\n r\"(http(s)?:\\/\\/.)?(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{2,256}\\.[a-z]{2,6}\\b([-a-zA-Z0-9@:%_\\+.~#?&//=]*)\",\n r\"@(https?|ftp)://(-\\.)?([^\\s/?\\.#-]+\\.?)+(/[^\\s]*)?$@iS\",\n r\"http[s]?://[a-zA-Z0-9_\\-./~\\?=%&]+\",\n r\"www[a-zA-Z0-9_\\-?=%&/.~]+\",\n r\"[a-zA-Z]+\\.com\",\n r\"(?=http)[^\\s]+\",\n r\"(?=www)[^\\s]+\",\n r\"://\",\n]\nuser_mention_regex = r\"@[\\w\\d]+\"\nemail_regexes = [r\"[\\w-]+@([\\w-]+\\.)+[\\w-]+\", r\"\\S+@\\S+\"]\nredundant_punct_pattern = (\n r\"([!\\\"#\\$%\\'\\(\\)\\*\\+,\\.:;\\-<=·>?@\\[\\\\\\]\\^_ـ`{\\|}~—٪’،؟`୍“؛”ۚ【»؛\\s+«–…‘]{2,})\"\n)\nregex_tatweel = r\"(\\D)\\1{2,}\"\nrejected_chars_regex = r\"[^0-9\\u0621-\\u063A\\u0640-\\u066C\\u0671-\\u0674a-zA-Z\\[\\]!\\\"#\\$%\\'\\(\\)\\*\\+,\\.:;\\-<=·>?@\\[\\\\\\]\\^_ـ`{\\|}~—٪’،؟`୍“؛”ۚ»؛\\s+«–…‘]\"\n\nregex_url_step1 = r\"(?=http)[^\\s]+\"\nregex_url_step2 = r\"(?=www)[^\\s]+\"\nregex_url = r\"(http(s)?:\\/\\/.)?(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{2,256}\\.[a-z]{2,6}\\b([-a-zA-Z0-9@:%_\\+.~#?&//=]*)\"\nregex_mention = r\"@[\\w\\d]+\"\nregex_email = r\"\\S+@\\S+\"\n\nchars_regex = r\"0-9\\u0621-\\u063A\\u0640-\\u066C\\u0671-\\u0674a-zA-Z\\[\\]!\\\"#\\$%\\'\\(\\)\\*\\+,\\.:;\\-<=·>?@\\[\\\\\\]\\^_ـ`{\\|}~—٪’،؟`୍“؛”ۚ»؛\\s+«–…‘\"\n\nwhite_spaced_double_quotation_regex = r'\\\"\\s+([^\"]+)\\s+\\\"'\nwhite_spaced_single_quotation_regex = r\"\\'\\s+([^']+)\\s+\\'\"\nwhite_spaced_back_quotation_regex = r\"\\`\\s+([^`]+)\\s+\\`\"\nwhite_spaced_em_dash = r\"\\—\\s+([^—]+)\\s+\\—\"\n\nleft_spaced_chars = r\" ([\\]!#\\$%\\),\\.:;\\?}٪’،؟”؛…»·])\"\nright_spaced_chars = r\"([\\[\\(\\{“«‘*\\~]) \"\nleft_and_right_spaced_chars = r\" ([\\+\\-\\<\\=\\>\\@\\\\\\^\\_\\|\\–]) \"\n",
"step-ids": [
12,
13,
14,
15,
16
]
}
|
[
12,
13,
14,
15,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
root.title('Attendance')
root.geometry('+450+250')
<|reserved_special_token_0|>
with open(fileName, newline='') as file:
reader = csv.reader(file)
r = 0
for col in reader:
c = 0
for row in col:
row = row.strip("['']")
if r == 0:
label = tkinter.Label(root, width=20, height=4, text=row,
bg='#7d807e', relief=tkinter.RIDGE)
label.grid(row=r, column=c)
else:
label = tkinter.Label(root, width=20, height=4, text=row,
relief=tkinter.RIDGE)
label.grid(row=r, column=c)
c += 1
r += 1
<|reserved_special_token_0|>
with open(fileName, newline='') as file:
reader = csv.reader(file)
r2 = 0
for col in reader:
r2 += 1
<|reserved_special_token_0|>
print(total)
<|reserved_special_token_0|>
label.grid(row=r + 1, column=0)
<|reserved_special_token_0|>
label.grid(row=r + 1, column=1)
<|reserved_special_token_0|>
label.grid(row=r + 1, column=2)
<|reserved_special_token_0|>
label.grid(row=r + 1, column=3)
root.mainloop()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
root = tkinter.Tk()
root.title('Attendance')
root.geometry('+450+250')
ts = time.time()
date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
fileName = 'Attendance/Attendance_' + date + '.csv'
with open(fileName, newline='') as file:
reader = csv.reader(file)
r = 0
for col in reader:
c = 0
for row in col:
row = row.strip("['']")
if r == 0:
label = tkinter.Label(root, width=20, height=4, text=row,
bg='#7d807e', relief=tkinter.RIDGE)
label.grid(row=r, column=c)
else:
label = tkinter.Label(root, width=20, height=4, text=row,
relief=tkinter.RIDGE)
label.grid(row=r, column=c)
c += 1
r += 1
fileName2 = 'StudentDetails/StudentDetails.csv'
with open(fileName, newline='') as file:
reader = csv.reader(file)
r2 = 0
for col in reader:
r2 += 1
total = r2 - 1
print(total)
present = r - 1
absent = total - present
label = tkinter.Label(root, width=20, height=4, text='Present', fg='green',
relief=tkinter.RIDGE)
label.grid(row=r + 1, column=0)
label = tkinter.Label(root, width=20, height=4, text=present, relief=
tkinter.RIDGE)
label.grid(row=r + 1, column=1)
label = tkinter.Label(root, width=20, height=4, text='Absent', fg='red',
relief=tkinter.RIDGE)
label.grid(row=r + 1, column=2)
label = tkinter.Label(root, width=20, height=4, text=absent, relief=tkinter
.RIDGE)
label.grid(row=r + 1, column=3)
root.mainloop()
<|reserved_special_token_1|>
import tkinter
import csv
import datetime
import time
root = tkinter.Tk()
root.title('Attendance')
root.geometry('+450+250')
ts = time.time()
date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
fileName = 'Attendance/Attendance_' + date + '.csv'
with open(fileName, newline='') as file:
reader = csv.reader(file)
r = 0
for col in reader:
c = 0
for row in col:
row = row.strip("['']")
if r == 0:
label = tkinter.Label(root, width=20, height=4, text=row,
bg='#7d807e', relief=tkinter.RIDGE)
label.grid(row=r, column=c)
else:
label = tkinter.Label(root, width=20, height=4, text=row,
relief=tkinter.RIDGE)
label.grid(row=r, column=c)
c += 1
r += 1
fileName2 = 'StudentDetails/StudentDetails.csv'
with open(fileName, newline='') as file:
reader = csv.reader(file)
r2 = 0
for col in reader:
r2 += 1
total = r2 - 1
print(total)
present = r - 1
absent = total - present
label = tkinter.Label(root, width=20, height=4, text='Present', fg='green',
relief=tkinter.RIDGE)
label.grid(row=r + 1, column=0)
label = tkinter.Label(root, width=20, height=4, text=present, relief=
tkinter.RIDGE)
label.grid(row=r + 1, column=1)
label = tkinter.Label(root, width=20, height=4, text='Absent', fg='red',
relief=tkinter.RIDGE)
label.grid(row=r + 1, column=2)
label = tkinter.Label(root, width=20, height=4, text=absent, relief=tkinter
.RIDGE)
label.grid(row=r + 1, column=3)
root.mainloop()
<|reserved_special_token_1|>
import tkinter
import csv
import datetime
import time
root = tkinter.Tk()
root.title("Attendance")
root.geometry("+450+250")
ts = time.time()
date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
fileName = "Attendance/Attendance_"+date+".csv"
# open file
with open(fileName, newline="") as file:
reader = csv.reader(file)
# r and c tell us where to grid the labels
r = 0
for col in reader:
c = 0
for row in col:
row = row.strip("['']")
if r == 0:
label = tkinter.Label(root, width=20, height=4,
text=row, bg="#7d807e", relief=tkinter.RIDGE)
label.grid(row=r, column=c)
else:
label = tkinter.Label(root, width=20, height=4,
text=row, relief=tkinter.RIDGE)
label.grid(row=r, column=c)
c += 1
r += 1
fileName2 = "StudentDetails/StudentDetails.csv"
with open(fileName, newline="") as file:
reader = csv.reader(file)
# r and c tell us where to grid the labels
r2 = 0
for col in reader:
r2 += 1
total = r2-1
print(total)
present = r - 1
absent = total - present
label = tkinter.Label(root, width=20, height=4,
text="Present", fg="green", relief=tkinter.RIDGE)
label.grid(row=r+1, column=0)
label = tkinter.Label(root, width=20, height=4,
text=present, relief=tkinter.RIDGE)
label.grid(row=r+1, column=1)
label = tkinter.Label(root, width=20, height=4,
text="Absent", fg="red", relief=tkinter.RIDGE)
label.grid(row=r+1, column=2)
label = tkinter.Label(root, width=20, height=4,
text=absent, relief=tkinter.RIDGE)
label.grid(row=r+1, column=3)
root.mainloop()
|
flexible
|
{
"blob_id": "2343a9d3e253b5a0347b5890a5d7b9c3be777669",
"index": 5958,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nroot.title('Attendance')\nroot.geometry('+450+250')\n<mask token>\nwith open(fileName, newline='') as file:\n reader = csv.reader(file)\n r = 0\n for col in reader:\n c = 0\n for row in col:\n row = row.strip(\"['']\")\n if r == 0:\n label = tkinter.Label(root, width=20, height=4, text=row,\n bg='#7d807e', relief=tkinter.RIDGE)\n label.grid(row=r, column=c)\n else:\n label = tkinter.Label(root, width=20, height=4, text=row,\n relief=tkinter.RIDGE)\n label.grid(row=r, column=c)\n c += 1\n r += 1\n<mask token>\nwith open(fileName, newline='') as file:\n reader = csv.reader(file)\n r2 = 0\n for col in reader:\n r2 += 1\n<mask token>\nprint(total)\n<mask token>\nlabel.grid(row=r + 1, column=0)\n<mask token>\nlabel.grid(row=r + 1, column=1)\n<mask token>\nlabel.grid(row=r + 1, column=2)\n<mask token>\nlabel.grid(row=r + 1, column=3)\nroot.mainloop()\n",
"step-3": "<mask token>\nroot = tkinter.Tk()\nroot.title('Attendance')\nroot.geometry('+450+250')\nts = time.time()\ndate = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')\nfileName = 'Attendance/Attendance_' + date + '.csv'\nwith open(fileName, newline='') as file:\n reader = csv.reader(file)\n r = 0\n for col in reader:\n c = 0\n for row in col:\n row = row.strip(\"['']\")\n if r == 0:\n label = tkinter.Label(root, width=20, height=4, text=row,\n bg='#7d807e', relief=tkinter.RIDGE)\n label.grid(row=r, column=c)\n else:\n label = tkinter.Label(root, width=20, height=4, text=row,\n relief=tkinter.RIDGE)\n label.grid(row=r, column=c)\n c += 1\n r += 1\nfileName2 = 'StudentDetails/StudentDetails.csv'\nwith open(fileName, newline='') as file:\n reader = csv.reader(file)\n r2 = 0\n for col in reader:\n r2 += 1\ntotal = r2 - 1\nprint(total)\npresent = r - 1\nabsent = total - present\nlabel = tkinter.Label(root, width=20, height=4, text='Present', fg='green',\n relief=tkinter.RIDGE)\nlabel.grid(row=r + 1, column=0)\nlabel = tkinter.Label(root, width=20, height=4, text=present, relief=\n tkinter.RIDGE)\nlabel.grid(row=r + 1, column=1)\nlabel = tkinter.Label(root, width=20, height=4, text='Absent', fg='red',\n relief=tkinter.RIDGE)\nlabel.grid(row=r + 1, column=2)\nlabel = tkinter.Label(root, width=20, height=4, text=absent, relief=tkinter\n .RIDGE)\nlabel.grid(row=r + 1, column=3)\nroot.mainloop()\n",
"step-4": "import tkinter\nimport csv\nimport datetime\nimport time\nroot = tkinter.Tk()\nroot.title('Attendance')\nroot.geometry('+450+250')\nts = time.time()\ndate = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')\nfileName = 'Attendance/Attendance_' + date + '.csv'\nwith open(fileName, newline='') as file:\n reader = csv.reader(file)\n r = 0\n for col in reader:\n c = 0\n for row in col:\n row = row.strip(\"['']\")\n if r == 0:\n label = tkinter.Label(root, width=20, height=4, text=row,\n bg='#7d807e', relief=tkinter.RIDGE)\n label.grid(row=r, column=c)\n else:\n label = tkinter.Label(root, width=20, height=4, text=row,\n relief=tkinter.RIDGE)\n label.grid(row=r, column=c)\n c += 1\n r += 1\nfileName2 = 'StudentDetails/StudentDetails.csv'\nwith open(fileName, newline='') as file:\n reader = csv.reader(file)\n r2 = 0\n for col in reader:\n r2 += 1\ntotal = r2 - 1\nprint(total)\npresent = r - 1\nabsent = total - present\nlabel = tkinter.Label(root, width=20, height=4, text='Present', fg='green',\n relief=tkinter.RIDGE)\nlabel.grid(row=r + 1, column=0)\nlabel = tkinter.Label(root, width=20, height=4, text=present, relief=\n tkinter.RIDGE)\nlabel.grid(row=r + 1, column=1)\nlabel = tkinter.Label(root, width=20, height=4, text='Absent', fg='red',\n relief=tkinter.RIDGE)\nlabel.grid(row=r + 1, column=2)\nlabel = tkinter.Label(root, width=20, height=4, text=absent, relief=tkinter\n .RIDGE)\nlabel.grid(row=r + 1, column=3)\nroot.mainloop()\n",
"step-5": "import tkinter\nimport csv\nimport datetime\nimport time\n\nroot = tkinter.Tk()\nroot.title(\"Attendance\")\nroot.geometry(\"+450+250\")\n\nts = time.time()\ndate = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')\nfileName = \"Attendance/Attendance_\"+date+\".csv\"\n# open file\nwith open(fileName, newline=\"\") as file:\n reader = csv.reader(file)\n # r and c tell us where to grid the labels\n r = 0\n for col in reader:\n c = 0\n for row in col:\n\n row = row.strip(\"['']\")\n\n if r == 0:\n label = tkinter.Label(root, width=20, height=4,\n text=row, bg=\"#7d807e\", relief=tkinter.RIDGE)\n label.grid(row=r, column=c)\n else:\n label = tkinter.Label(root, width=20, height=4,\n text=row, relief=tkinter.RIDGE)\n label.grid(row=r, column=c)\n\n c += 1\n r += 1\n\nfileName2 = \"StudentDetails/StudentDetails.csv\"\nwith open(fileName, newline=\"\") as file:\n reader = csv.reader(file)\n # r and c tell us where to grid the labels\n r2 = 0\n for col in reader:\n r2 += 1\n\ntotal = r2-1\nprint(total)\npresent = r - 1\nabsent = total - present\nlabel = tkinter.Label(root, width=20, height=4,\n text=\"Present\", fg=\"green\", relief=tkinter.RIDGE)\nlabel.grid(row=r+1, column=0)\nlabel = tkinter.Label(root, width=20, height=4,\n text=present, relief=tkinter.RIDGE)\nlabel.grid(row=r+1, column=1)\nlabel = tkinter.Label(root, width=20, height=4,\n text=\"Absent\", fg=\"red\", relief=tkinter.RIDGE)\nlabel.grid(row=r+1, column=2)\nlabel = tkinter.Label(root, width=20, height=4,\n text=absent, relief=tkinter.RIDGE)\nlabel.grid(row=r+1, column=3)\nroot.mainloop()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
input = """
t(Z) :- t0(Z).
t(Z) :- g(X,Y,Z), t(X), not t(Y).
t0(2).
g(5,1,3).
g(1,2,4).
g(3,4,5).
"""
output = """
t(Z) :- t0(Z).
t(Z) :- g(X,Y,Z), t(X), not t(Y).
t0(2).
g(5,1,3).
g(1,2,4).
g(3,4,5).
"""
|
flexible
|
{
"blob_id": "df5c79c79d827b6b3de7ceb4b1e3c652c8956346",
"index": 2620,
"step-1": "<mask token>\n",
"step-2": "input = \"\"\"\nt(Z) :- t0(Z).\nt(Z) :- g(X,Y,Z), t(X), not t(Y).\n\nt0(2).\ng(5,1,3).\ng(1,2,4).\ng(3,4,5).\n\n\"\"\"\noutput = \"\"\"\nt(Z) :- t0(Z).\nt(Z) :- g(X,Y,Z), t(X), not t(Y).\n\nt0(2).\ng(5,1,3).\ng(1,2,4).\ng(3,4,5).\n\n\"\"\"\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# This file imports all files for this module for easy inclusions around the game.
from viewController import *
from navigationController import *
from noticer import *
from Images import *
from fancyButton import *
from constants import *
from textObject import *
from UIButton import *
# from spriteFromRect import *
from UIView import *
from UIAlertView import *
|
normal
|
{
"blob_id": "7168a8eb401478aa26ee9033262bb5c8fe33f186",
"index": 7011,
"step-1": "<mask token>\n",
"step-2": "from viewController import *\nfrom navigationController import *\nfrom noticer import *\nfrom Images import *\nfrom fancyButton import *\nfrom constants import *\nfrom textObject import *\nfrom UIButton import *\nfrom UIView import *\nfrom UIAlertView import *\n",
"step-3": "\n# This file imports all files for this module for easy inclusions around the game.\n\n\nfrom viewController import *\nfrom navigationController import *\nfrom noticer import *\nfrom Images import *\nfrom fancyButton import *\nfrom constants import *\nfrom textObject import *\nfrom UIButton import *\n# from spriteFromRect import *\nfrom UIView import *\nfrom UIAlertView import *",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BoardingPass:
<|reserved_special_token_0|>
def export(self):
fileName = 'reservations/data_reservation/boarding_passes'
file = open(fileName, 'a')
flights = self.reservation.getFlights()
string = ''
for i in range(len(flights)):
flight = flights[i]
gate = randint(1, 8)
for passenger in self.reservation.getPassengers():
string += 'BOARDING PASS'
string += 'NAME OF PASSENGER:\n'
string += passenger.getLastName(
) + ' / ' + passenger.getFirstName() + '\n'
string += 'FROM: ' + flight.getOrigin() + '\n'
string += 'TO: ' + flight.getDestination() + '\n'
string += 'SEAT: ' + passenger.getSeats()[i]
string += 'GATE: ' + str(gate) + '\n'
string += '\n\n'
print(string, file=file)
file.close()
return fileName
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BoardingPass:
def __init__(self, reservation):
self.reservation = reservation
self.export()
def export(self):
fileName = 'reservations/data_reservation/boarding_passes'
file = open(fileName, 'a')
flights = self.reservation.getFlights()
string = ''
for i in range(len(flights)):
flight = flights[i]
gate = randint(1, 8)
for passenger in self.reservation.getPassengers():
string += 'BOARDING PASS'
string += 'NAME OF PASSENGER:\n'
string += passenger.getLastName(
) + ' / ' + passenger.getFirstName() + '\n'
string += 'FROM: ' + flight.getOrigin() + '\n'
string += 'TO: ' + flight.getDestination() + '\n'
string += 'SEAT: ' + passenger.getSeats()[i]
string += 'GATE: ' + str(gate) + '\n'
string += '\n\n'
print(string, file=file)
file.close()
return fileName
<|reserved_special_token_1|>
from random import randint
class BoardingPass:
def __init__(self, reservation):
self.reservation = reservation
self.export()
def export(self):
fileName = 'reservations/data_reservation/boarding_passes'
file = open(fileName, 'a')
flights = self.reservation.getFlights()
string = ''
for i in range(len(flights)):
flight = flights[i]
gate = randint(1, 8)
for passenger in self.reservation.getPassengers():
string += 'BOARDING PASS'
string += 'NAME OF PASSENGER:\n'
string += passenger.getLastName(
) + ' / ' + passenger.getFirstName() + '\n'
string += 'FROM: ' + flight.getOrigin() + '\n'
string += 'TO: ' + flight.getDestination() + '\n'
string += 'SEAT: ' + passenger.getSeats()[i]
string += 'GATE: ' + str(gate) + '\n'
string += '\n\n'
print(string, file=file)
file.close()
return fileName
<|reserved_special_token_1|>
# Name: BoardingPass.py
# Description: Class to create and output a boarding pass
# Ver. Writer Date Notes
# 1.0 Shuvam Chatterjee 05/22/20 Original
from random import randint
class BoardingPass:
def __init__(self, reservation):
self.reservation = reservation
self.export()
def export(self):
fileName = "reservations/data_reservation/boarding_passes"
file = open(fileName, "a")
flights = self.reservation.getFlights()
string = ""
for i in range(len(flights)):
flight = flights[i]
gate = randint(1, 8)
for passenger in self.reservation.getPassengers():
string += "BOARDING PASS"
string += "NAME OF PASSENGER:\n"
string += passenger.getLastName() + " / " + passenger.getFirstName() + "\n"
string += "FROM: " + flight.getOrigin() + "\n"
string += "TO: " + flight.getDestination() + "\n"
string += "SEAT: " + passenger.getSeats()[i]
string += "GATE: " + str(gate) + "\n"
string += "\n\n"
print(string, file=file)
file.close()
return fileName
|
flexible
|
{
"blob_id": "a3662b4b9569046e67c39c1002234c1fbd85c650",
"index": 8102,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass BoardingPass:\n <mask token>\n\n def export(self):\n fileName = 'reservations/data_reservation/boarding_passes'\n file = open(fileName, 'a')\n flights = self.reservation.getFlights()\n string = ''\n for i in range(len(flights)):\n flight = flights[i]\n gate = randint(1, 8)\n for passenger in self.reservation.getPassengers():\n string += 'BOARDING PASS'\n string += 'NAME OF PASSENGER:\\n'\n string += passenger.getLastName(\n ) + ' / ' + passenger.getFirstName() + '\\n'\n string += 'FROM: ' + flight.getOrigin() + '\\n'\n string += 'TO: ' + flight.getDestination() + '\\n'\n string += 'SEAT: ' + passenger.getSeats()[i]\n string += 'GATE: ' + str(gate) + '\\n'\n string += '\\n\\n'\n print(string, file=file)\n file.close()\n return fileName\n",
"step-3": "<mask token>\n\n\nclass BoardingPass:\n\n def __init__(self, reservation):\n self.reservation = reservation\n self.export()\n\n def export(self):\n fileName = 'reservations/data_reservation/boarding_passes'\n file = open(fileName, 'a')\n flights = self.reservation.getFlights()\n string = ''\n for i in range(len(flights)):\n flight = flights[i]\n gate = randint(1, 8)\n for passenger in self.reservation.getPassengers():\n string += 'BOARDING PASS'\n string += 'NAME OF PASSENGER:\\n'\n string += passenger.getLastName(\n ) + ' / ' + passenger.getFirstName() + '\\n'\n string += 'FROM: ' + flight.getOrigin() + '\\n'\n string += 'TO: ' + flight.getDestination() + '\\n'\n string += 'SEAT: ' + passenger.getSeats()[i]\n string += 'GATE: ' + str(gate) + '\\n'\n string += '\\n\\n'\n print(string, file=file)\n file.close()\n return fileName\n",
"step-4": "from random import randint\n\n\nclass BoardingPass:\n\n def __init__(self, reservation):\n self.reservation = reservation\n self.export()\n\n def export(self):\n fileName = 'reservations/data_reservation/boarding_passes'\n file = open(fileName, 'a')\n flights = self.reservation.getFlights()\n string = ''\n for i in range(len(flights)):\n flight = flights[i]\n gate = randint(1, 8)\n for passenger in self.reservation.getPassengers():\n string += 'BOARDING PASS'\n string += 'NAME OF PASSENGER:\\n'\n string += passenger.getLastName(\n ) + ' / ' + passenger.getFirstName() + '\\n'\n string += 'FROM: ' + flight.getOrigin() + '\\n'\n string += 'TO: ' + flight.getDestination() + '\\n'\n string += 'SEAT: ' + passenger.getSeats()[i]\n string += 'GATE: ' + str(gate) + '\\n'\n string += '\\n\\n'\n print(string, file=file)\n file.close()\n return fileName\n",
"step-5": "# Name: BoardingPass.py\n# Description: Class to create and output a boarding pass\n\n# Ver. Writer Date Notes\n# 1.0 Shuvam Chatterjee 05/22/20 Original\n\nfrom random import randint\n\nclass BoardingPass:\n def __init__(self, reservation):\n self.reservation = reservation\n self.export()\n\n def export(self):\n fileName = \"reservations/data_reservation/boarding_passes\"\n file = open(fileName, \"a\")\n\n flights = self.reservation.getFlights()\n string = \"\"\n for i in range(len(flights)):\n flight = flights[i]\n gate = randint(1, 8)\n\n for passenger in self.reservation.getPassengers():\n string += \"BOARDING PASS\"\n string += \"NAME OF PASSENGER:\\n\"\n string += passenger.getLastName() + \" / \" + passenger.getFirstName() + \"\\n\"\n string += \"FROM: \" + flight.getOrigin() + \"\\n\"\n string += \"TO: \" + flight.getDestination() + \"\\n\"\n string += \"SEAT: \" + passenger.getSeats()[i]\n string += \"GATE: \" + str(gate) + \"\\n\"\n string += \"\\n\\n\"\n\n print(string, file=file)\n\n file.close()\n\n return fileName",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for train_index, test_index in kf.split(x):
xtr = x.iloc[train_index]
ytr = y[train_index]
<|reserved_special_token_0|>
if k % 2 == 0:
k = k + 1
else:
k = k
<|reserved_special_token_0|>
print('Skor KNN: ', round(cross_val_score(knn, xtr, ytr, cv=5).mean() * 100
), ' %')
print('Skor Logistic Regression: ', round(cross_val_score(logreg, xtr, ytr,
cv=5).mean() * 100), ' %')
print('Skor Random Forest: ', round(cross_val_score(ranfor, xtr, ytr, cv=5)
.mean() * 100), ' %')
print('Skor Decision Tree: ', round(cross_val_score(dec, xtr, ytr, cv=5).
mean() * 100), ' %')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
df = pd.read_csv('data.csv')
df = df.fillna(np.NaN)
df['Target'] = 0
df['Target_name'] = 'Non-Target'
df['Target'][(df['Age'] <= 25) & (df['Overall'] >= 80) & (df['Potential'] >=
80)] = 1
df['Target_name'][(df['Age'] <= 25) & (df['Overall'] >= 80) & (df[
'Potential'] >= 80)] = 'Target'
x = df.loc[:, ['Age', 'Overall', 'Potential']]
y = df['Target']
kf = KFold(n_splits=5)
for train_index, test_index in kf.split(x):
xtr = x.iloc[train_index]
ytr = y[train_index]
<|reserved_special_token_0|>
k = round(len(x) ** 0.5)
if k % 2 == 0:
k = k + 1
else:
k = k
knn = KNeighborsClassifier(n_neighbors=k)
<|reserved_special_token_0|>
logreg = LogisticRegression(multi_class='auto', solver='liblinear')
<|reserved_special_token_0|>
ranfor = RandomForestClassifier(n_estimators=50)
<|reserved_special_token_0|>
dec = DecisionTreeClassifier()
print('Skor KNN: ', round(cross_val_score(knn, xtr, ytr, cv=5).mean() * 100
), ' %')
print('Skor Logistic Regression: ', round(cross_val_score(logreg, xtr, ytr,
cv=5).mean() * 100), ' %')
print('Skor Random Forest: ', round(cross_val_score(ranfor, xtr, ytr, cv=5)
.mean() * 100), ' %')
print('Skor Decision Tree: ', round(cross_val_score(dec, xtr, ytr, cv=5).
mean() * 100), ' %')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
df = pd.read_csv('data.csv')
df = df.fillna(np.NaN)
df['Target'] = 0
df['Target_name'] = 'Non-Target'
df['Target'][(df['Age'] <= 25) & (df['Overall'] >= 80) & (df['Potential'] >=
80)] = 1
df['Target_name'][(df['Age'] <= 25) & (df['Overall'] >= 80) & (df[
'Potential'] >= 80)] = 'Target'
x = df.loc[:, ['Age', 'Overall', 'Potential']]
y = df['Target']
kf = KFold(n_splits=5)
for train_index, test_index in kf.split(x):
xtr = x.iloc[train_index]
ytr = y[train_index]
<|reserved_special_token_0|>
k = round(len(x) ** 0.5)
if k % 2 == 0:
k = k + 1
else:
k = k
knn = KNeighborsClassifier(n_neighbors=k)
<|reserved_special_token_0|>
logreg = LogisticRegression(multi_class='auto', solver='liblinear')
<|reserved_special_token_0|>
ranfor = RandomForestClassifier(n_estimators=50)
<|reserved_special_token_0|>
dec = DecisionTreeClassifier()
print('Skor KNN: ', round(cross_val_score(knn, xtr, ytr, cv=5).mean() * 100
), ' %')
print('Skor Logistic Regression: ', round(cross_val_score(logreg, xtr, ytr,
cv=5).mean() * 100), ' %')
print('Skor Random Forest: ', round(cross_val_score(ranfor, xtr, ytr, cv=5)
.mean() * 100), ' %')
print('Skor Decision Tree: ', round(cross_val_score(dec, xtr, ytr, cv=5).
mean() * 100), ' %')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
df=pd.read_csv('data.csv')
df=df.fillna(np.NaN)
#kita isi df dengan kolom target = 0, target_name = 0 , agar memudahkan untuk training
df['Target']=0
df['Target_name']='Non-Target'
# print(df)
#tandai target dengan angka 1,target_name='Target' pada dataframe usia <= 25, overall >= 80, dan potential >= 80
df['Target'][(df['Age']<=25)&(df['Overall']>=80)&(df['Potential']>=80)]=1
df['Target_name'][(df['Age']<=25)&(df['Overall']>=80)&(df['Potential']>=80)]='Target'
x=df.loc[:,['Age','Overall','Potential']]
y=df['Target']
kf=KFold(n_splits = 5)
for train_index,test_index in kf.split(x):
xtr=x.iloc[train_index]
ytr=y[train_index]
'''
KNN
nilai k terbaik atau n terbaik dapat dicari dengan cara sqrt(n_data) lalu pilih yg odd/ganjil
cari len dari data (banyak data) lalu kalikan pangkat setengah
'''
k = round(len(x) ** .5)
if((k%2) == 0):
k=k+1
else:
k=k
knn=KNeighborsClassifier(n_neighbors=k)
'''
Logistic Regression
'''
logreg=LogisticRegression(multi_class='auto',solver='liblinear')
'''
Random Forest
'''
ranfor=RandomForestClassifier(n_estimators=50)
'''
Decision Tree
'''
dec=DecisionTreeClassifier()
print("Skor KNN: ",round(cross_val_score(knn,xtr,ytr,cv=5).mean()*100),' %')
print("Skor Logistic Regression: ",round(cross_val_score(logreg,xtr,ytr,cv=5).mean()*100),' %')
print("Skor Random Forest: ",round(cross_val_score(ranfor,xtr,ytr,cv=5).mean()*100),' %')
print("Skor Decision Tree: ",round(cross_val_score(dec,xtr,ytr,cv=5).mean()*100),' %')
'''
Skor KNN: 96.0 %
Skor Logistic Regression: 97.0 %
Skor Random Forest: 96.0 %
Skor Decision Tree: 93.0 %
'''
|
flexible
|
{
"blob_id": "84db1803a352e0ed8c01b7166f522d46ec89b6f5",
"index": 2487,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor train_index, test_index in kf.split(x):\n xtr = x.iloc[train_index]\n ytr = y[train_index]\n<mask token>\nif k % 2 == 0:\n k = k + 1\nelse:\n k = k\n<mask token>\nprint('Skor KNN: ', round(cross_val_score(knn, xtr, ytr, cv=5).mean() * 100\n ), ' %')\nprint('Skor Logistic Regression: ', round(cross_val_score(logreg, xtr, ytr,\n cv=5).mean() * 100), ' %')\nprint('Skor Random Forest: ', round(cross_val_score(ranfor, xtr, ytr, cv=5)\n .mean() * 100), ' %')\nprint('Skor Decision Tree: ', round(cross_val_score(dec, xtr, ytr, cv=5).\n mean() * 100), ' %')\n<mask token>\n",
"step-3": "<mask token>\ndf = pd.read_csv('data.csv')\ndf = df.fillna(np.NaN)\ndf['Target'] = 0\ndf['Target_name'] = 'Non-Target'\ndf['Target'][(df['Age'] <= 25) & (df['Overall'] >= 80) & (df['Potential'] >=\n 80)] = 1\ndf['Target_name'][(df['Age'] <= 25) & (df['Overall'] >= 80) & (df[\n 'Potential'] >= 80)] = 'Target'\nx = df.loc[:, ['Age', 'Overall', 'Potential']]\ny = df['Target']\nkf = KFold(n_splits=5)\nfor train_index, test_index in kf.split(x):\n xtr = x.iloc[train_index]\n ytr = y[train_index]\n<mask token>\nk = round(len(x) ** 0.5)\nif k % 2 == 0:\n k = k + 1\nelse:\n k = k\nknn = KNeighborsClassifier(n_neighbors=k)\n<mask token>\nlogreg = LogisticRegression(multi_class='auto', solver='liblinear')\n<mask token>\nranfor = RandomForestClassifier(n_estimators=50)\n<mask token>\ndec = DecisionTreeClassifier()\nprint('Skor KNN: ', round(cross_val_score(knn, xtr, ytr, cv=5).mean() * 100\n ), ' %')\nprint('Skor Logistic Regression: ', round(cross_val_score(logreg, xtr, ytr,\n cv=5).mean() * 100), ' %')\nprint('Skor Random Forest: ', round(cross_val_score(ranfor, xtr, ytr, cv=5)\n .mean() * 100), ' %')\nprint('Skor Decision Tree: ', round(cross_val_score(dec, xtr, ytr, cv=5).\n mean() * 100), ' %')\n<mask token>\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import KFold\ndf = pd.read_csv('data.csv')\ndf = df.fillna(np.NaN)\ndf['Target'] = 0\ndf['Target_name'] = 'Non-Target'\ndf['Target'][(df['Age'] <= 25) & (df['Overall'] >= 80) & (df['Potential'] >=\n 80)] = 1\ndf['Target_name'][(df['Age'] <= 25) & (df['Overall'] >= 80) & (df[\n 'Potential'] >= 80)] = 'Target'\nx = df.loc[:, ['Age', 'Overall', 'Potential']]\ny = df['Target']\nkf = KFold(n_splits=5)\nfor train_index, test_index in kf.split(x):\n xtr = x.iloc[train_index]\n ytr = y[train_index]\n<mask token>\nk = round(len(x) ** 0.5)\nif k % 2 == 0:\n k = k + 1\nelse:\n k = k\nknn = KNeighborsClassifier(n_neighbors=k)\n<mask token>\nlogreg = LogisticRegression(multi_class='auto', solver='liblinear')\n<mask token>\nranfor = RandomForestClassifier(n_estimators=50)\n<mask token>\ndec = DecisionTreeClassifier()\nprint('Skor KNN: ', round(cross_val_score(knn, xtr, ytr, cv=5).mean() * 100\n ), ' %')\nprint('Skor Logistic Regression: ', round(cross_val_score(logreg, xtr, ytr,\n cv=5).mean() * 100), ' %')\nprint('Skor Random Forest: ', round(cross_val_score(ranfor, xtr, ytr, cv=5)\n .mean() * 100), ' %')\nprint('Skor Decision Tree: ', round(cross_val_score(dec, xtr, ytr, cv=5).\n mean() * 100), ' %')\n<mask token>\n",
"step-5": "import numpy as np \r\nimport pandas as pd \r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.model_selection import KFold\r\n\r\ndf=pd.read_csv('data.csv')\r\ndf=df.fillna(np.NaN)\r\n#kita isi df dengan kolom target = 0, target_name = 0 , agar memudahkan untuk training\r\ndf['Target']=0\r\ndf['Target_name']='Non-Target'\r\n# print(df)\r\n#tandai target dengan angka 1,target_name='Target' pada dataframe usia <= 25, overall >= 80, dan potential >= 80 \r\ndf['Target'][(df['Age']<=25)&(df['Overall']>=80)&(df['Potential']>=80)]=1\r\ndf['Target_name'][(df['Age']<=25)&(df['Overall']>=80)&(df['Potential']>=80)]='Target'\r\n\r\nx=df.loc[:,['Age','Overall','Potential']]\r\ny=df['Target']\r\nkf=KFold(n_splits = 5)\r\nfor train_index,test_index in kf.split(x):\r\n xtr=x.iloc[train_index]\r\n ytr=y[train_index]\r\n\r\n'''\r\nKNN\r\nnilai k terbaik atau n terbaik dapat dicari dengan cara sqrt(n_data) lalu pilih yg odd/ganjil\r\ncari len dari data (banyak data) lalu kalikan pangkat setengah\r\n'''\r\nk = round(len(x) ** .5)\r\nif((k%2) == 0):\r\n k=k+1\r\nelse:\r\n k=k\r\nknn=KNeighborsClassifier(n_neighbors=k)\r\n\r\n'''\r\nLogistic Regression\r\n'''\r\nlogreg=LogisticRegression(multi_class='auto',solver='liblinear')\r\n\r\n'''\r\nRandom Forest\r\n'''\r\nranfor=RandomForestClassifier(n_estimators=50)\r\n\r\n'''\r\nDecision Tree\r\n'''\r\ndec=DecisionTreeClassifier()\r\nprint(\"Skor KNN: \",round(cross_val_score(knn,xtr,ytr,cv=5).mean()*100),' %')\r\nprint(\"Skor Logistic Regression: \",round(cross_val_score(logreg,xtr,ytr,cv=5).mean()*100),' %')\r\nprint(\"Skor Random Forest: \",round(cross_val_score(ranfor,xtr,ytr,cv=5).mean()*100),' %')\r\nprint(\"Skor Decision Tree: \",round(cross_val_score(dec,xtr,ytr,cv=5).mean()*100),' %')\r\n\r\n'''\r\nSkor KNN: 96.0 %\r\nSkor Logistic Regression: 97.0 %\r\nSkor Random Forest: 96.0 %\r\nSkor Decision Tree: 93.0 %\r\n'''",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import csv
from pprint import pprint as pp
with open('nodes_tags.csv', 'r') as f:
tags = csv.DictReader(f)
for row in tags:
if row['key'] == 'FIXME':
pp(row)
|
normal
|
{
"blob_id": "d0981d279f7090d5309aa564252dba731a34a66b",
"index": 1424,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('nodes_tags.csv', 'r') as f:\n tags = csv.DictReader(f)\n for row in tags:\n if row['key'] == 'FIXME':\n pp(row)\n",
"step-3": "import csv\nfrom pprint import pprint as pp\nwith open('nodes_tags.csv', 'r') as f:\n tags = csv.DictReader(f)\n for row in tags:\n if row['key'] == 'FIXME':\n pp(row)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Copyright (c) 2021 Cisco and/or its affiliates.
#
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
#
# Licensed under the Apache License 2.0 or
# GNU General Public License v2.0 or later; you may not use this file
# except in compliance with one of these Licenses. You
# may obtain a copy of the Licenses at:
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
#
# Note: If this file is linked with Scapy, which is GPLv2+, your use of it
# must be under GPLv2+. If at any point in the future it is no longer linked
# with Scapy (or other GPLv2+ licensed software), you are free to choose
# Apache 2.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Traffic scripts argument parser library."""
import argparse
class TrafficScriptArg:
"""Traffic scripts argument parser.
Parse arguments for traffic script. Default has two arguments '--tx_if'
and '--rx_if'. You can provide more arguments. All arguments have string
representation of the value. You can add also optional arguments. Default
value for optional arguments is empty string.
:param more_args: List of additional arguments (optional).
:param opt_args: List of optional arguments (optional).
:type more_args: list
:type opt_args: list
:Example:
>>> from TrafficScriptArg import TrafficScriptArg
>>> args = TrafficScriptArg(['src_mac', 'dst_mac', 'src_ip', 'dst_ip'])
"""
def __init__(self, more_args=None, opt_args=None):
parser = argparse.ArgumentParser()
parser.add_argument(u"--tx_if", help=u"interface that sends traffic")
parser.add_argument(u"--rx_if", help=u"interface that receives traffic")
if more_args is not None:
for arg in more_args:
arg_name = f"--{arg}"
parser.add_argument(arg_name)
if opt_args is not None:
for arg in opt_args:
arg_name = f"--{arg}"
parser.add_argument(arg_name, nargs=u"?", default=u"")
self._parser = parser
self._args = vars(parser.parse_args())
def get_arg(self, arg_name):
"""Get argument value.
:param arg_name: Argument name.
:type arg_name: str
:returns: Argument value.
:rtype: str
"""
arg_val = self._args.get(arg_name)
if arg_val is None:
raise Exception(f"Argument '{arg_name}' not found")
return arg_val
|
normal
|
{
"blob_id": "ea6d726e8163ed0f93b8078323fa5f4e9115ad73",
"index": 1639,
"step-1": "<mask token>\n\n\nclass TrafficScriptArg:\n <mask token>\n <mask token>\n\n def get_arg(self, arg_name):\n \"\"\"Get argument value.\n\n :param arg_name: Argument name.\n :type arg_name: str\n :returns: Argument value.\n :rtype: str\n \"\"\"\n arg_val = self._args.get(arg_name)\n if arg_val is None:\n raise Exception(f\"Argument '{arg_name}' not found\")\n return arg_val\n",
"step-2": "<mask token>\n\n\nclass TrafficScriptArg:\n <mask token>\n\n def __init__(self, more_args=None, opt_args=None):\n parser = argparse.ArgumentParser()\n parser.add_argument(u'--tx_if', help=u'interface that sends traffic')\n parser.add_argument(u'--rx_if', help=u'interface that receives traffic'\n )\n if more_args is not None:\n for arg in more_args:\n arg_name = f'--{arg}'\n parser.add_argument(arg_name)\n if opt_args is not None:\n for arg in opt_args:\n arg_name = f'--{arg}'\n parser.add_argument(arg_name, nargs=u'?', default=u'')\n self._parser = parser\n self._args = vars(parser.parse_args())\n\n def get_arg(self, arg_name):\n \"\"\"Get argument value.\n\n :param arg_name: Argument name.\n :type arg_name: str\n :returns: Argument value.\n :rtype: str\n \"\"\"\n arg_val = self._args.get(arg_name)\n if arg_val is None:\n raise Exception(f\"Argument '{arg_name}' not found\")\n return arg_val\n",
"step-3": "<mask token>\n\n\nclass TrafficScriptArg:\n \"\"\"Traffic scripts argument parser.\n\n Parse arguments for traffic script. Default has two arguments '--tx_if'\n and '--rx_if'. You can provide more arguments. All arguments have string\n representation of the value. You can add also optional arguments. Default\n value for optional arguments is empty string.\n\n :param more_args: List of additional arguments (optional).\n :param opt_args: List of optional arguments (optional).\n :type more_args: list\n :type opt_args: list\n\n :Example:\n\n >>> from TrafficScriptArg import TrafficScriptArg\n >>> args = TrafficScriptArg(['src_mac', 'dst_mac', 'src_ip', 'dst_ip'])\n \"\"\"\n\n def __init__(self, more_args=None, opt_args=None):\n parser = argparse.ArgumentParser()\n parser.add_argument(u'--tx_if', help=u'interface that sends traffic')\n parser.add_argument(u'--rx_if', help=u'interface that receives traffic'\n )\n if more_args is not None:\n for arg in more_args:\n arg_name = f'--{arg}'\n parser.add_argument(arg_name)\n if opt_args is not None:\n for arg in opt_args:\n arg_name = f'--{arg}'\n parser.add_argument(arg_name, nargs=u'?', default=u'')\n self._parser = parser\n self._args = vars(parser.parse_args())\n\n def get_arg(self, arg_name):\n \"\"\"Get argument value.\n\n :param arg_name: Argument name.\n :type arg_name: str\n :returns: Argument value.\n :rtype: str\n \"\"\"\n arg_val = self._args.get(arg_name)\n if arg_val is None:\n raise Exception(f\"Argument '{arg_name}' not found\")\n return arg_val\n",
"step-4": "<mask token>\nimport argparse\n\n\nclass TrafficScriptArg:\n \"\"\"Traffic scripts argument parser.\n\n Parse arguments for traffic script. Default has two arguments '--tx_if'\n and '--rx_if'. You can provide more arguments. All arguments have string\n representation of the value. You can add also optional arguments. Default\n value for optional arguments is empty string.\n\n :param more_args: List of additional arguments (optional).\n :param opt_args: List of optional arguments (optional).\n :type more_args: list\n :type opt_args: list\n\n :Example:\n\n >>> from TrafficScriptArg import TrafficScriptArg\n >>> args = TrafficScriptArg(['src_mac', 'dst_mac', 'src_ip', 'dst_ip'])\n \"\"\"\n\n def __init__(self, more_args=None, opt_args=None):\n parser = argparse.ArgumentParser()\n parser.add_argument(u'--tx_if', help=u'interface that sends traffic')\n parser.add_argument(u'--rx_if', help=u'interface that receives traffic'\n )\n if more_args is not None:\n for arg in more_args:\n arg_name = f'--{arg}'\n parser.add_argument(arg_name)\n if opt_args is not None:\n for arg in opt_args:\n arg_name = f'--{arg}'\n parser.add_argument(arg_name, nargs=u'?', default=u'')\n self._parser = parser\n self._args = vars(parser.parse_args())\n\n def get_arg(self, arg_name):\n \"\"\"Get argument value.\n\n :param arg_name: Argument name.\n :type arg_name: str\n :returns: Argument value.\n :rtype: str\n \"\"\"\n arg_val = self._args.get(arg_name)\n if arg_val is None:\n raise Exception(f\"Argument '{arg_name}' not found\")\n return arg_val\n",
"step-5": "# Copyright (c) 2021 Cisco and/or its affiliates.\n#\n# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later\n#\n# Licensed under the Apache License 2.0 or\n# GNU General Public License v2.0 or later; you may not use this file\n# except in compliance with one of these Licenses. You\n# may obtain a copy of the Licenses at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html\n#\n# Note: If this file is linked with Scapy, which is GPLv2+, your use of it\n# must be under GPLv2+. If at any point in the future it is no longer linked\n# with Scapy (or other GPLv2+ licensed software), you are free to choose\n# Apache 2.\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Traffic scripts argument parser library.\"\"\"\n\nimport argparse\n\n\nclass TrafficScriptArg:\n \"\"\"Traffic scripts argument parser.\n\n Parse arguments for traffic script. Default has two arguments '--tx_if'\n and '--rx_if'. You can provide more arguments. All arguments have string\n representation of the value. You can add also optional arguments. Default\n value for optional arguments is empty string.\n\n :param more_args: List of additional arguments (optional).\n :param opt_args: List of optional arguments (optional).\n :type more_args: list\n :type opt_args: list\n\n :Example:\n\n >>> from TrafficScriptArg import TrafficScriptArg\n >>> args = TrafficScriptArg(['src_mac', 'dst_mac', 'src_ip', 'dst_ip'])\n \"\"\"\n\n def __init__(self, more_args=None, opt_args=None):\n parser = argparse.ArgumentParser()\n parser.add_argument(u\"--tx_if\", help=u\"interface that sends traffic\")\n parser.add_argument(u\"--rx_if\", help=u\"interface that receives traffic\")\n\n if more_args is not None:\n for arg in more_args:\n arg_name = f\"--{arg}\"\n parser.add_argument(arg_name)\n\n if opt_args is not None:\n for arg in opt_args:\n arg_name = f\"--{arg}\"\n parser.add_argument(arg_name, nargs=u\"?\", default=u\"\")\n\n self._parser = parser\n self._args = vars(parser.parse_args())\n\n def get_arg(self, arg_name):\n \"\"\"Get argument value.\n\n :param arg_name: Argument name.\n :type arg_name: str\n :returns: Argument value.\n :rtype: str\n \"\"\"\n arg_val = self._args.get(arg_name)\n if arg_val is None:\n raise Exception(f\"Argument '{arg_name}' not found\")\n\n return arg_val\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def generateNoise():
caveMap = []
column = 1
row = 1
while column <= mapWidth:
while row <= mapHeight:
if (column == 1 or column == mapWidth or row == 1 or row ==
mapHeight):
caveMap.append([column, row, 1])
elif random.randrange(1, 100) <= fillPercent:
caveMap.append([column, row, 1])
else:
caveMap.append([column, row, 0])
row += 1
column += 1
row = 1
printCaveMap(caveMap)
return caveMap
<|reserved_special_token_0|>
def isWall(caveMap, column, row):
for cell in caveMap:
if cell[0] == column and cell[1] == row and cell[2] == 1:
return True
elif cell[0] == column and cell[1] == row and cell[2] == 0:
return False
else:
continue
def findNeighbors(caveMap, column, row):
neighbors = 0
if isOutOfBounds(column - 1, row - 1):
neighbors += 1
elif isWall(caveMap, column - 1, row - 1):
neighbors += 1
if isOutOfBounds(column, row - 1):
neighbors += 1
elif isWall(caveMap, column, row - 1):
neighbors += 1
if isOutOfBounds(column + 1, row - 1):
neighbors += 1
elif isWall(caveMap, column + 1, row - 1):
neighbors += 1
if isOutOfBounds(column - 1, row):
neighbors += 1
elif isWall(caveMap, column - 1, row):
neighbors += 1
if isOutOfBounds(column + 1, row):
neighbors += 1
elif isWall(caveMap, column + 1, row):
neighbors += 1
if isOutOfBounds(column - 1, row + 1):
neighbors += 1
elif isWall(caveMap, column - 1, row + 1):
neighbors += 1
if isOutOfBounds(column, row + 1):
neighbors += 1
elif isWall(caveMap, column, row + 1):
neighbors += 1
if isOutOfBounds(column + 1, row + 1):
neighbors += 1
elif isWall(caveMap, column + 1, row + 1):
neighbors += 1
return neighbors
def runGeneration(caveMap, generations):
i = 0
for i in range(0, generations):
start_time = time.time()
for cell in caveMap:
if findNeighbors(caveMap, cell[0], cell[1]) < 3:
cell[2] = 0
elif findNeighbors(caveMap, cell[0], cell[1]) > 5:
cell[2] = 1
printCaveMap(caveMap)
end_time = time.time()
print(end_time - start_time, ' seconds')
return caveMap
<|reserved_special_token_0|>
def main():
caveMap = generateNoise()
runGeneration(caveMap, 2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def generateNoise():
caveMap = []
column = 1
row = 1
while column <= mapWidth:
while row <= mapHeight:
if (column == 1 or column == mapWidth or row == 1 or row ==
mapHeight):
caveMap.append([column, row, 1])
elif random.randrange(1, 100) <= fillPercent:
caveMap.append([column, row, 1])
else:
caveMap.append([column, row, 0])
row += 1
column += 1
row = 1
printCaveMap(caveMap)
return caveMap
<|reserved_special_token_0|>
def isWall(caveMap, column, row):
for cell in caveMap:
if cell[0] == column and cell[1] == row and cell[2] == 1:
return True
elif cell[0] == column and cell[1] == row and cell[2] == 0:
return False
else:
continue
def findNeighbors(caveMap, column, row):
neighbors = 0
if isOutOfBounds(column - 1, row - 1):
neighbors += 1
elif isWall(caveMap, column - 1, row - 1):
neighbors += 1
if isOutOfBounds(column, row - 1):
neighbors += 1
elif isWall(caveMap, column, row - 1):
neighbors += 1
if isOutOfBounds(column + 1, row - 1):
neighbors += 1
elif isWall(caveMap, column + 1, row - 1):
neighbors += 1
if isOutOfBounds(column - 1, row):
neighbors += 1
elif isWall(caveMap, column - 1, row):
neighbors += 1
if isOutOfBounds(column + 1, row):
neighbors += 1
elif isWall(caveMap, column + 1, row):
neighbors += 1
if isOutOfBounds(column - 1, row + 1):
neighbors += 1
elif isWall(caveMap, column - 1, row + 1):
neighbors += 1
if isOutOfBounds(column, row + 1):
neighbors += 1
elif isWall(caveMap, column, row + 1):
neighbors += 1
if isOutOfBounds(column + 1, row + 1):
neighbors += 1
elif isWall(caveMap, column + 1, row + 1):
neighbors += 1
return neighbors
def runGeneration(caveMap, generations):
i = 0
for i in range(0, generations):
start_time = time.time()
for cell in caveMap:
if findNeighbors(caveMap, cell[0], cell[1]) < 3:
cell[2] = 0
elif findNeighbors(caveMap, cell[0], cell[1]) > 5:
cell[2] = 1
printCaveMap(caveMap)
end_time = time.time()
print(end_time - start_time, ' seconds')
return caveMap
def printCaveMap(caveMap):
i = 1
for item in caveMap:
if i == mapWidth + 1:
print('\r')
i = 1
if item[2] == 1:
print(' # ', end='')
else:
print(' ', end='')
i += 1
print('\n', '\n')
def main():
caveMap = generateNoise()
runGeneration(caveMap, 2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def generateNoise():
caveMap = []
column = 1
row = 1
while column <= mapWidth:
while row <= mapHeight:
if (column == 1 or column == mapWidth or row == 1 or row ==
mapHeight):
caveMap.append([column, row, 1])
elif random.randrange(1, 100) <= fillPercent:
caveMap.append([column, row, 1])
else:
caveMap.append([column, row, 0])
row += 1
column += 1
row = 1
printCaveMap(caveMap)
return caveMap
def isOutOfBounds(column, row):
if column < 1 or row < 1:
return True
elif column > mapWidth or row > mapHeight:
return True
else:
return False
def isWall(caveMap, column, row):
for cell in caveMap:
if cell[0] == column and cell[1] == row and cell[2] == 1:
return True
elif cell[0] == column and cell[1] == row and cell[2] == 0:
return False
else:
continue
def findNeighbors(caveMap, column, row):
neighbors = 0
if isOutOfBounds(column - 1, row - 1):
neighbors += 1
elif isWall(caveMap, column - 1, row - 1):
neighbors += 1
if isOutOfBounds(column, row - 1):
neighbors += 1
elif isWall(caveMap, column, row - 1):
neighbors += 1
if isOutOfBounds(column + 1, row - 1):
neighbors += 1
elif isWall(caveMap, column + 1, row - 1):
neighbors += 1
if isOutOfBounds(column - 1, row):
neighbors += 1
elif isWall(caveMap, column - 1, row):
neighbors += 1
if isOutOfBounds(column + 1, row):
neighbors += 1
elif isWall(caveMap, column + 1, row):
neighbors += 1
if isOutOfBounds(column - 1, row + 1):
neighbors += 1
elif isWall(caveMap, column - 1, row + 1):
neighbors += 1
if isOutOfBounds(column, row + 1):
neighbors += 1
elif isWall(caveMap, column, row + 1):
neighbors += 1
if isOutOfBounds(column + 1, row + 1):
neighbors += 1
elif isWall(caveMap, column + 1, row + 1):
neighbors += 1
return neighbors
def runGeneration(caveMap, generations):
i = 0
for i in range(0, generations):
start_time = time.time()
for cell in caveMap:
if findNeighbors(caveMap, cell[0], cell[1]) < 3:
cell[2] = 0
elif findNeighbors(caveMap, cell[0], cell[1]) > 5:
cell[2] = 1
printCaveMap(caveMap)
end_time = time.time()
print(end_time - start_time, ' seconds')
return caveMap
def printCaveMap(caveMap):
i = 1
for item in caveMap:
if i == mapWidth + 1:
print('\r')
i = 1
if item[2] == 1:
print(' # ', end='')
else:
print(' ', end='')
i += 1
print('\n', '\n')
def main():
caveMap = generateNoise()
runGeneration(caveMap, 2)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
mapHeight = 30
mapWidth = 30
fillPercent = 45
def generateNoise():
caveMap = []
column = 1
row = 1
while column <= mapWidth:
while row <= mapHeight:
if (column == 1 or column == mapWidth or row == 1 or row ==
mapHeight):
caveMap.append([column, row, 1])
elif random.randrange(1, 100) <= fillPercent:
caveMap.append([column, row, 1])
else:
caveMap.append([column, row, 0])
row += 1
column += 1
row = 1
printCaveMap(caveMap)
return caveMap
def isOutOfBounds(column, row):
if column < 1 or row < 1:
return True
elif column > mapWidth or row > mapHeight:
return True
else:
return False
def isWall(caveMap, column, row):
for cell in caveMap:
if cell[0] == column and cell[1] == row and cell[2] == 1:
return True
elif cell[0] == column and cell[1] == row and cell[2] == 0:
return False
else:
continue
def findNeighbors(caveMap, column, row):
neighbors = 0
if isOutOfBounds(column - 1, row - 1):
neighbors += 1
elif isWall(caveMap, column - 1, row - 1):
neighbors += 1
if isOutOfBounds(column, row - 1):
neighbors += 1
elif isWall(caveMap, column, row - 1):
neighbors += 1
if isOutOfBounds(column + 1, row - 1):
neighbors += 1
elif isWall(caveMap, column + 1, row - 1):
neighbors += 1
if isOutOfBounds(column - 1, row):
neighbors += 1
elif isWall(caveMap, column - 1, row):
neighbors += 1
if isOutOfBounds(column + 1, row):
neighbors += 1
elif isWall(caveMap, column + 1, row):
neighbors += 1
if isOutOfBounds(column - 1, row + 1):
neighbors += 1
elif isWall(caveMap, column - 1, row + 1):
neighbors += 1
if isOutOfBounds(column, row + 1):
neighbors += 1
elif isWall(caveMap, column, row + 1):
neighbors += 1
if isOutOfBounds(column + 1, row + 1):
neighbors += 1
elif isWall(caveMap, column + 1, row + 1):
neighbors += 1
return neighbors
def runGeneration(caveMap, generations):
i = 0
for i in range(0, generations):
start_time = time.time()
for cell in caveMap:
if findNeighbors(caveMap, cell[0], cell[1]) < 3:
cell[2] = 0
elif findNeighbors(caveMap, cell[0], cell[1]) > 5:
cell[2] = 1
printCaveMap(caveMap)
end_time = time.time()
print(end_time - start_time, ' seconds')
return caveMap
def printCaveMap(caveMap):
i = 1
for item in caveMap:
if i == mapWidth + 1:
print('\r')
i = 1
if item[2] == 1:
print(' # ', end='')
else:
print(' ', end='')
i += 1
print('\n', '\n')
def main():
caveMap = generateNoise()
runGeneration(caveMap, 2)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#/usr/bin/env python
#v0.2
import random, time
mapHeight = 30
mapWidth = 30
fillPercent = 45
def generateNoise():
#generate a grid of cells with height = mapHeight and width = mapWidth with each cell either "walls" (true) or "floors" (false)
#border is guaranteed to be walls and all other spaces have a fillPercent chance of being walls
caveMap = []
column = 1
row = 1
while column <= mapWidth:
while row <= mapHeight:
if (column == 1) or (column == mapWidth) or (row == 1) or (row == mapHeight):
caveMap.append([column, row, 1])
else:
if random.randrange(1,100) <= fillPercent:
caveMap.append([column, row, 1])
else:
caveMap.append([column,row,0])
row += 1
column += 1
row = 1
printCaveMap(caveMap)
return caveMap
def isOutOfBounds(column, row):
#find if a cell is out of bounds based on map size
if column < 1 or row < 1:
return True
elif column > mapWidth or row > mapHeight:
return True
else:
return False
def isWall(caveMap, column, row):
#determine if a cell is a wall or not
#very inefficient - might have to loop through entire list
for cell in caveMap:
if cell[0] == column and cell[1] == row and cell[2] == 1:
return True
elif cell[0] == column and cell[1] == row and cell[2] == 0:
return False
else:
continue
def findNeighbors(caveMap, column, row):
#find the number of walls in a 3x3 pattern around a given cell (determined by column and row)
#there must be a more efficient way to do this, but here we are
neighbors = 0
if isOutOfBounds(column -1, row -1):
neighbors += 1
elif isWall(caveMap, column -1, row -1):
neighbors += 1
if isOutOfBounds(column, row -1):
neighbors += 1
elif isWall(caveMap, column, row -1):
neighbors += 1
if isOutOfBounds(column +1, row -1):
neighbors += 1
elif isWall(caveMap, column +1, row -1):
neighbors += 1
if isOutOfBounds(column -1, row):
neighbors += 1
elif isWall(caveMap, column -1, row):
neighbors += 1
if isOutOfBounds(column +1, row):
neighbors += 1
elif isWall(caveMap, column +1, row):
neighbors += 1
if isOutOfBounds(column -1, row +1):
neighbors += 1
elif isWall(caveMap, column -1, row +1):
neighbors += 1
if isOutOfBounds(column, row +1):
neighbors += 1
elif isWall(caveMap, column, row +1):
neighbors += 1
if isOutOfBounds(column +1, row +1):
neighbors += 1
elif isWall(caveMap, column +1, row +1):
neighbors += 1
return neighbors
def runGeneration (caveMap, generations):
#smooth out random noise using modified 4-5 cellular automata rules
#the entire process is pretty inefficient - it has to loop through the entire list as many as
#(mapWidth * mapHeight * 8) times for potentially millions of comparisons
i =0
for i in range(0, generations):
start_time = time.time()
for cell in caveMap:
if findNeighbors(caveMap,cell[0],cell[1]) < 3:
cell[2] = 0
elif findNeighbors(caveMap, cell[0], cell[1]) > 5:
cell[2] = 1
printCaveMap(caveMap)
end_time = time.time()
print(end_time - start_time, " seconds")
return caveMap
def printCaveMap(caveMap):
#print the map by displaying a grid of characters where # = walls and spaces = floors
#just uses mapWidth to insert returns, very agnostic about the column/row of a cell
i = 1
for item in caveMap:
if i == mapWidth + 1:
print('\r')
i = 1
if item[2] == 1:
print(" # ", end="")
else:
print(" ", end="")
i += 1
print("\n", "\n")
def main():
caveMap = generateNoise()
runGeneration(caveMap, 2)
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "7feac838f17ef1e4338190c0e8c284ed99369693",
"index": 1628,
"step-1": "<mask token>\n\n\ndef generateNoise():\n caveMap = []\n column = 1\n row = 1\n while column <= mapWidth:\n while row <= mapHeight:\n if (column == 1 or column == mapWidth or row == 1 or row ==\n mapHeight):\n caveMap.append([column, row, 1])\n elif random.randrange(1, 100) <= fillPercent:\n caveMap.append([column, row, 1])\n else:\n caveMap.append([column, row, 0])\n row += 1\n column += 1\n row = 1\n printCaveMap(caveMap)\n return caveMap\n\n\n<mask token>\n\n\ndef isWall(caveMap, column, row):\n for cell in caveMap:\n if cell[0] == column and cell[1] == row and cell[2] == 1:\n return True\n elif cell[0] == column and cell[1] == row and cell[2] == 0:\n return False\n else:\n continue\n\n\ndef findNeighbors(caveMap, column, row):\n neighbors = 0\n if isOutOfBounds(column - 1, row - 1):\n neighbors += 1\n elif isWall(caveMap, column - 1, row - 1):\n neighbors += 1\n if isOutOfBounds(column, row - 1):\n neighbors += 1\n elif isWall(caveMap, column, row - 1):\n neighbors += 1\n if isOutOfBounds(column + 1, row - 1):\n neighbors += 1\n elif isWall(caveMap, column + 1, row - 1):\n neighbors += 1\n if isOutOfBounds(column - 1, row):\n neighbors += 1\n elif isWall(caveMap, column - 1, row):\n neighbors += 1\n if isOutOfBounds(column + 1, row):\n neighbors += 1\n elif isWall(caveMap, column + 1, row):\n neighbors += 1\n if isOutOfBounds(column - 1, row + 1):\n neighbors += 1\n elif isWall(caveMap, column - 1, row + 1):\n neighbors += 1\n if isOutOfBounds(column, row + 1):\n neighbors += 1\n elif isWall(caveMap, column, row + 1):\n neighbors += 1\n if isOutOfBounds(column + 1, row + 1):\n neighbors += 1\n elif isWall(caveMap, column + 1, row + 1):\n neighbors += 1\n return neighbors\n\n\ndef runGeneration(caveMap, generations):\n i = 0\n for i in range(0, generations):\n start_time = time.time()\n for cell in caveMap:\n if findNeighbors(caveMap, cell[0], cell[1]) < 3:\n cell[2] = 0\n elif findNeighbors(caveMap, cell[0], cell[1]) > 5:\n cell[2] = 1\n printCaveMap(caveMap)\n end_time = time.time()\n print(end_time - start_time, ' seconds')\n return caveMap\n\n\n<mask token>\n\n\ndef main():\n caveMap = generateNoise()\n runGeneration(caveMap, 2)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef generateNoise():\n caveMap = []\n column = 1\n row = 1\n while column <= mapWidth:\n while row <= mapHeight:\n if (column == 1 or column == mapWidth or row == 1 or row ==\n mapHeight):\n caveMap.append([column, row, 1])\n elif random.randrange(1, 100) <= fillPercent:\n caveMap.append([column, row, 1])\n else:\n caveMap.append([column, row, 0])\n row += 1\n column += 1\n row = 1\n printCaveMap(caveMap)\n return caveMap\n\n\n<mask token>\n\n\ndef isWall(caveMap, column, row):\n for cell in caveMap:\n if cell[0] == column and cell[1] == row and cell[2] == 1:\n return True\n elif cell[0] == column and cell[1] == row and cell[2] == 0:\n return False\n else:\n continue\n\n\ndef findNeighbors(caveMap, column, row):\n neighbors = 0\n if isOutOfBounds(column - 1, row - 1):\n neighbors += 1\n elif isWall(caveMap, column - 1, row - 1):\n neighbors += 1\n if isOutOfBounds(column, row - 1):\n neighbors += 1\n elif isWall(caveMap, column, row - 1):\n neighbors += 1\n if isOutOfBounds(column + 1, row - 1):\n neighbors += 1\n elif isWall(caveMap, column + 1, row - 1):\n neighbors += 1\n if isOutOfBounds(column - 1, row):\n neighbors += 1\n elif isWall(caveMap, column - 1, row):\n neighbors += 1\n if isOutOfBounds(column + 1, row):\n neighbors += 1\n elif isWall(caveMap, column + 1, row):\n neighbors += 1\n if isOutOfBounds(column - 1, row + 1):\n neighbors += 1\n elif isWall(caveMap, column - 1, row + 1):\n neighbors += 1\n if isOutOfBounds(column, row + 1):\n neighbors += 1\n elif isWall(caveMap, column, row + 1):\n neighbors += 1\n if isOutOfBounds(column + 1, row + 1):\n neighbors += 1\n elif isWall(caveMap, column + 1, row + 1):\n neighbors += 1\n return neighbors\n\n\ndef runGeneration(caveMap, generations):\n i = 0\n for i in range(0, generations):\n start_time = time.time()\n for cell in caveMap:\n if findNeighbors(caveMap, cell[0], cell[1]) < 3:\n cell[2] = 0\n elif findNeighbors(caveMap, cell[0], cell[1]) > 5:\n cell[2] = 1\n printCaveMap(caveMap)\n end_time = time.time()\n print(end_time - start_time, ' seconds')\n return caveMap\n\n\ndef printCaveMap(caveMap):\n i = 1\n for item in caveMap:\n if i == mapWidth + 1:\n print('\\r')\n i = 1\n if item[2] == 1:\n print(' # ', end='')\n else:\n print(' ', end='')\n i += 1\n print('\\n', '\\n')\n\n\ndef main():\n caveMap = generateNoise()\n runGeneration(caveMap, 2)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef generateNoise():\n caveMap = []\n column = 1\n row = 1\n while column <= mapWidth:\n while row <= mapHeight:\n if (column == 1 or column == mapWidth or row == 1 or row ==\n mapHeight):\n caveMap.append([column, row, 1])\n elif random.randrange(1, 100) <= fillPercent:\n caveMap.append([column, row, 1])\n else:\n caveMap.append([column, row, 0])\n row += 1\n column += 1\n row = 1\n printCaveMap(caveMap)\n return caveMap\n\n\ndef isOutOfBounds(column, row):\n if column < 1 or row < 1:\n return True\n elif column > mapWidth or row > mapHeight:\n return True\n else:\n return False\n\n\ndef isWall(caveMap, column, row):\n for cell in caveMap:\n if cell[0] == column and cell[1] == row and cell[2] == 1:\n return True\n elif cell[0] == column and cell[1] == row and cell[2] == 0:\n return False\n else:\n continue\n\n\ndef findNeighbors(caveMap, column, row):\n neighbors = 0\n if isOutOfBounds(column - 1, row - 1):\n neighbors += 1\n elif isWall(caveMap, column - 1, row - 1):\n neighbors += 1\n if isOutOfBounds(column, row - 1):\n neighbors += 1\n elif isWall(caveMap, column, row - 1):\n neighbors += 1\n if isOutOfBounds(column + 1, row - 1):\n neighbors += 1\n elif isWall(caveMap, column + 1, row - 1):\n neighbors += 1\n if isOutOfBounds(column - 1, row):\n neighbors += 1\n elif isWall(caveMap, column - 1, row):\n neighbors += 1\n if isOutOfBounds(column + 1, row):\n neighbors += 1\n elif isWall(caveMap, column + 1, row):\n neighbors += 1\n if isOutOfBounds(column - 1, row + 1):\n neighbors += 1\n elif isWall(caveMap, column - 1, row + 1):\n neighbors += 1\n if isOutOfBounds(column, row + 1):\n neighbors += 1\n elif isWall(caveMap, column, row + 1):\n neighbors += 1\n if isOutOfBounds(column + 1, row + 1):\n neighbors += 1\n elif isWall(caveMap, column + 1, row + 1):\n neighbors += 1\n return neighbors\n\n\ndef runGeneration(caveMap, generations):\n i = 0\n for i in range(0, generations):\n start_time = time.time()\n for cell in caveMap:\n if findNeighbors(caveMap, cell[0], cell[1]) < 3:\n cell[2] = 0\n elif findNeighbors(caveMap, cell[0], cell[1]) > 5:\n cell[2] = 1\n printCaveMap(caveMap)\n end_time = time.time()\n print(end_time - start_time, ' seconds')\n return caveMap\n\n\ndef printCaveMap(caveMap):\n i = 1\n for item in caveMap:\n if i == mapWidth + 1:\n print('\\r')\n i = 1\n if item[2] == 1:\n print(' # ', end='')\n else:\n print(' ', end='')\n i += 1\n print('\\n', '\\n')\n\n\ndef main():\n caveMap = generateNoise()\n runGeneration(caveMap, 2)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nmapHeight = 30\nmapWidth = 30\nfillPercent = 45\n\n\ndef generateNoise():\n caveMap = []\n column = 1\n row = 1\n while column <= mapWidth:\n while row <= mapHeight:\n if (column == 1 or column == mapWidth or row == 1 or row ==\n mapHeight):\n caveMap.append([column, row, 1])\n elif random.randrange(1, 100) <= fillPercent:\n caveMap.append([column, row, 1])\n else:\n caveMap.append([column, row, 0])\n row += 1\n column += 1\n row = 1\n printCaveMap(caveMap)\n return caveMap\n\n\ndef isOutOfBounds(column, row):\n if column < 1 or row < 1:\n return True\n elif column > mapWidth or row > mapHeight:\n return True\n else:\n return False\n\n\ndef isWall(caveMap, column, row):\n for cell in caveMap:\n if cell[0] == column and cell[1] == row and cell[2] == 1:\n return True\n elif cell[0] == column and cell[1] == row and cell[2] == 0:\n return False\n else:\n continue\n\n\ndef findNeighbors(caveMap, column, row):\n neighbors = 0\n if isOutOfBounds(column - 1, row - 1):\n neighbors += 1\n elif isWall(caveMap, column - 1, row - 1):\n neighbors += 1\n if isOutOfBounds(column, row - 1):\n neighbors += 1\n elif isWall(caveMap, column, row - 1):\n neighbors += 1\n if isOutOfBounds(column + 1, row - 1):\n neighbors += 1\n elif isWall(caveMap, column + 1, row - 1):\n neighbors += 1\n if isOutOfBounds(column - 1, row):\n neighbors += 1\n elif isWall(caveMap, column - 1, row):\n neighbors += 1\n if isOutOfBounds(column + 1, row):\n neighbors += 1\n elif isWall(caveMap, column + 1, row):\n neighbors += 1\n if isOutOfBounds(column - 1, row + 1):\n neighbors += 1\n elif isWall(caveMap, column - 1, row + 1):\n neighbors += 1\n if isOutOfBounds(column, row + 1):\n neighbors += 1\n elif isWall(caveMap, column, row + 1):\n neighbors += 1\n if isOutOfBounds(column + 1, row + 1):\n neighbors += 1\n elif isWall(caveMap, column + 1, row + 1):\n neighbors += 1\n return neighbors\n\n\ndef runGeneration(caveMap, generations):\n i = 0\n for i in range(0, generations):\n start_time = time.time()\n for cell in caveMap:\n if findNeighbors(caveMap, cell[0], cell[1]) < 3:\n cell[2] = 0\n elif findNeighbors(caveMap, cell[0], cell[1]) > 5:\n cell[2] = 1\n printCaveMap(caveMap)\n end_time = time.time()\n print(end_time - start_time, ' seconds')\n return caveMap\n\n\ndef printCaveMap(caveMap):\n i = 1\n for item in caveMap:\n if i == mapWidth + 1:\n print('\\r')\n i = 1\n if item[2] == 1:\n print(' # ', end='')\n else:\n print(' ', end='')\n i += 1\n print('\\n', '\\n')\n\n\ndef main():\n caveMap = generateNoise()\n runGeneration(caveMap, 2)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#/usr/bin/env python\r\n#v0.2\r\nimport random, time\r\n\r\nmapHeight = 30\r\nmapWidth = 30\r\nfillPercent = 45\r\n\r\ndef generateNoise():\r\n\t#generate a grid of cells with height = mapHeight and width = mapWidth with each cell either \"walls\" (true) or \"floors\" (false)\r\n\t#border is guaranteed to be walls and all other spaces have a fillPercent chance of being walls\r\n\tcaveMap = []\r\n\tcolumn = 1\r\n\trow = 1\r\n\t\r\n\twhile column <= mapWidth:\r\n\t\twhile row <= mapHeight:\r\n\t\t\tif (column == 1) or (column == mapWidth) or (row == 1) or (row == mapHeight):\r\n\t\t\t\tcaveMap.append([column, row, 1])\r\n\t\t\telse:\r\n\t\t\t\tif random.randrange(1,100) <= fillPercent:\r\n\t\t\t\t\tcaveMap.append([column, row, 1])\r\n\t\t\t\telse:\r\n\t\t\t\t\tcaveMap.append([column,row,0])\r\n\t\t\trow += 1\r\n\t\tcolumn += 1\r\n\t\trow = 1\r\n\t\t\r\n\tprintCaveMap(caveMap)\t\t\r\n\treturn caveMap\r\n\r\ndef isOutOfBounds(column, row):\r\n\t#find if a cell is out of bounds based on map size\r\n\t\r\n\tif column < 1 or row < 1:\r\n\t\treturn True\r\n\telif column > mapWidth or row > mapHeight:\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False\r\n\r\ndef isWall(caveMap, column, row):\r\n\t#determine if a cell is a wall or not\r\n\t#very inefficient - might have to loop through entire list\r\n\r\n\tfor cell in caveMap:\r\n\t\tif cell[0] == column and cell[1] == row and cell[2] == 1:\r\n\t\t\treturn True\r\n\t\telif cell[0] == column and cell[1] == row and cell[2] == 0:\r\n\t\t\treturn False\r\n\t\telse:\r\n\t\t\tcontinue\r\n\t\r\ndef findNeighbors(caveMap, column, row):\r\n\t#find the number of walls in a 3x3 pattern around a given cell (determined by column and row)\r\n\t#there must be a more efficient way to do this, but here we are\r\n\r\n\tneighbors = 0\r\n\r\n\tif isOutOfBounds(column -1, row -1):\r\n\t\tneighbors += 1\r\n\telif isWall(caveMap, column -1, row -1):\r\n\t\tneighbors += 1\r\n\t\t\r\n\tif isOutOfBounds(column, row -1):\r\n\t\tneighbors += 1\r\n\telif isWall(caveMap, column, row -1):\r\n\t\tneighbors += 1\r\n\t\t\r\n\tif isOutOfBounds(column +1, row -1):\r\n\t\tneighbors += 1\r\n\telif isWall(caveMap, column +1, row -1):\r\n\t\tneighbors += 1\r\n\t\t\r\n\tif isOutOfBounds(column -1, row):\r\n\t\tneighbors += 1\r\n\telif isWall(caveMap, column -1, row):\r\n\t\tneighbors += 1\r\n\t\t\r\n\tif isOutOfBounds(column +1, row):\r\n\t\tneighbors += 1\r\n\telif isWall(caveMap, column +1, row):\r\n\t\tneighbors += 1\r\n\t\t\r\n\tif isOutOfBounds(column -1, row +1):\r\n\t\tneighbors += 1\r\n\telif isWall(caveMap, column -1, row +1):\r\n\t\tneighbors += 1\r\n\t\t\r\n\tif isOutOfBounds(column, row +1):\r\n\t\tneighbors += 1\r\n\telif isWall(caveMap, column, row +1):\r\n\t\tneighbors += 1\r\n\t\t\r\n\tif isOutOfBounds(column +1, row +1):\r\n\t\tneighbors += 1\r\n\telif isWall(caveMap, column +1, row +1):\r\n\t\tneighbors += 1\r\n\r\n\treturn neighbors\r\n\t\r\ndef runGeneration (caveMap, generations):\r\n\t#smooth out random noise using modified 4-5 cellular automata rules\r\n\t#the entire process is pretty inefficient - it has to loop through the entire list as many as \r\n\t#(mapWidth * mapHeight * 8) times for potentially millions of comparisons\r\n\ti =0 \r\n\t\r\n\tfor i in range(0, generations):\r\n\t\tstart_time = time.time()\r\n\t\tfor cell in caveMap:\r\n\t\t\tif findNeighbors(caveMap,cell[0],cell[1]) < 3:\r\n\t\t\t\tcell[2] = 0\r\n\t\t\telif findNeighbors(caveMap, cell[0], cell[1]) > 5:\r\n\t\t\t\tcell[2] = 1\r\n\t\tprintCaveMap(caveMap)\r\n\t\tend_time = time.time()\r\n\t\tprint(end_time - start_time, \" seconds\")\r\n\t\r\n\treturn caveMap\r\n\r\n\t\r\ndef printCaveMap(caveMap):\r\n\t#print the map by displaying a grid of characters where # = walls and spaces = floors\r\n\t#just uses mapWidth to insert returns, very agnostic about the column/row of a cell\r\n\t\r\n\ti = 1\r\n\tfor item in caveMap:\r\n\t\tif i == mapWidth + 1:\r\n\t\t\tprint('\\r')\r\n\t\t\ti = 1\r\n\t\tif item[2] == 1:\r\n\t\t\tprint(\" # \", end=\"\")\r\n\t\telse:\r\n\t\t\tprint(\" \", end=\"\")\r\n\t\ti += 1\r\n\t\r\n\tprint(\"\\n\", \"\\n\")\r\n\t\r\ndef main():\r\n\t\t\r\n\tcaveMap = generateNoise()\r\n\trunGeneration(caveMap, 2)\r\n\t\t\r\nif __name__ == \"__main__\":\r\n\tmain()",
"step-ids": [
5,
6,
8,
9,
11
]
}
|
[
5,
6,
8,
9,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plt.figure()
plt.plot(r, g_r, color='black')
plt.xlabel('r')
plt.ylabel('g(r)')
plt.xlim((0, rmax))
plt.ylim((0, 1.05 * g_r.max()))
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
filename = 'C:\\Users\\Maxi\\Desktop\\t\\Ag_HfO2_cat_3.125_222_t.cif'
crystal = read(filename)
corrdinates = crystal.get_positions()
cell_length = crystal.get_cell_lengths_and_angles()
cell_length = cell_length[0:3]
dr = 0.01
min_length_cell = min(cell_length)
rmax = min_length_cell / 10
x = corrdinates[:, 0]
y = corrdinates[:, 1]
z = corrdinates[:, 2]
g_r, r, ref_ind = pairCorrelationFunction_3D(x, y, z, min_length_cell, rmax, dr
)
plt.figure()
plt.plot(r, g_r, color='black')
plt.xlabel('r')
plt.ylabel('g(r)')
plt.xlim((0, rmax))
plt.ylim((0, 1.05 * g_r.max()))
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
from ase.io import read
from RDF_3D import pairCorrelationFunction_3D
import matplotlib.pyplot as plt
filename = 'C:\\Users\\Maxi\\Desktop\\t\\Ag_HfO2_cat_3.125_222_t.cif'
crystal = read(filename)
corrdinates = crystal.get_positions()
cell_length = crystal.get_cell_lengths_and_angles()
cell_length = cell_length[0:3]
dr = 0.01
min_length_cell = min(cell_length)
rmax = min_length_cell / 10
x = corrdinates[:, 0]
y = corrdinates[:, 1]
z = corrdinates[:, 2]
g_r, r, ref_ind = pairCorrelationFunction_3D(x, y, z, min_length_cell, rmax, dr
)
plt.figure()
plt.plot(r, g_r, color='black')
plt.xlabel('r')
plt.ylabel('g(r)')
plt.xlim((0, rmax))
plt.ylim((0, 1.05 * g_r.max()))
plt.show()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 15 10:28:04 2020
@author: Maxi
"""
import numpy as np
from ase.io import read
from RDF_3D import pairCorrelationFunction_3D
import matplotlib.pyplot as plt
filename = r"C:\Users\Maxi\Desktop\t\Ag_HfO2_cat_3.125_222_t.cif"
crystal = read(filename)
corrdinates = crystal.get_positions()
cell_length = crystal.get_cell_lengths_and_angles()
cell_length = cell_length[0:3] # only select the cell length
dr = 0.01 # shperical shell radius dr
min_length_cell = min(cell_length) # select the smalles length in cell
rmax = min_length_cell / 10
x = corrdinates[:, 0] # split the 2d array into x, y, z coordinates
y = corrdinates[:, 1]
z = corrdinates[:, 2]
g_r, r, ref_ind = pairCorrelationFunction_3D(x, y, z, min_length_cell, rmax, dr)
plt.figure()
plt.plot(r, g_r, color='black')
plt.xlabel('r')
plt.ylabel('g(r)')
plt.xlim( (0, rmax) )
plt.ylim( (0, 1.05 * g_r.max()) )
plt.show()
|
flexible
|
{
"blob_id": "516d9790f40c021d45302948b7fba0cf3e00da0a",
"index": 6322,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.figure()\nplt.plot(r, g_r, color='black')\nplt.xlabel('r')\nplt.ylabel('g(r)')\nplt.xlim((0, rmax))\nplt.ylim((0, 1.05 * g_r.max()))\nplt.show()\n",
"step-3": "<mask token>\nfilename = 'C:\\\\Users\\\\Maxi\\\\Desktop\\\\t\\\\Ag_HfO2_cat_3.125_222_t.cif'\ncrystal = read(filename)\ncorrdinates = crystal.get_positions()\ncell_length = crystal.get_cell_lengths_and_angles()\ncell_length = cell_length[0:3]\ndr = 0.01\nmin_length_cell = min(cell_length)\nrmax = min_length_cell / 10\nx = corrdinates[:, 0]\ny = corrdinates[:, 1]\nz = corrdinates[:, 2]\ng_r, r, ref_ind = pairCorrelationFunction_3D(x, y, z, min_length_cell, rmax, dr\n )\nplt.figure()\nplt.plot(r, g_r, color='black')\nplt.xlabel('r')\nplt.ylabel('g(r)')\nplt.xlim((0, rmax))\nplt.ylim((0, 1.05 * g_r.max()))\nplt.show()\n",
"step-4": "<mask token>\nimport numpy as np\nfrom ase.io import read\nfrom RDF_3D import pairCorrelationFunction_3D\nimport matplotlib.pyplot as plt\nfilename = 'C:\\\\Users\\\\Maxi\\\\Desktop\\\\t\\\\Ag_HfO2_cat_3.125_222_t.cif'\ncrystal = read(filename)\ncorrdinates = crystal.get_positions()\ncell_length = crystal.get_cell_lengths_and_angles()\ncell_length = cell_length[0:3]\ndr = 0.01\nmin_length_cell = min(cell_length)\nrmax = min_length_cell / 10\nx = corrdinates[:, 0]\ny = corrdinates[:, 1]\nz = corrdinates[:, 2]\ng_r, r, ref_ind = pairCorrelationFunction_3D(x, y, z, min_length_cell, rmax, dr\n )\nplt.figure()\nplt.plot(r, g_r, color='black')\nplt.xlabel('r')\nplt.ylabel('g(r)')\nplt.xlim((0, rmax))\nplt.ylim((0, 1.05 * g_r.max()))\nplt.show()\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 15 10:28:04 2020\n\n@author: Maxi\n\"\"\"\nimport numpy as np\nfrom ase.io import read\nfrom RDF_3D import pairCorrelationFunction_3D\nimport matplotlib.pyplot as plt\n \n\nfilename = r\"C:\\Users\\Maxi\\Desktop\\t\\Ag_HfO2_cat_3.125_222_t.cif\"\ncrystal = read(filename)\ncorrdinates = crystal.get_positions()\ncell_length = crystal.get_cell_lengths_and_angles()\ncell_length = cell_length[0:3] # only select the cell length\n\ndr = 0.01 # shperical shell radius dr\nmin_length_cell = min(cell_length) # select the smalles length in cell\nrmax = min_length_cell / 10\nx = corrdinates[:, 0] # split the 2d array into x, y, z coordinates\ny = corrdinates[:, 1]\nz = corrdinates[:, 2]\n\ng_r, r, ref_ind = pairCorrelationFunction_3D(x, y, z, min_length_cell, rmax, dr)\n\nplt.figure()\nplt.plot(r, g_r, color='black')\nplt.xlabel('r')\nplt.ylabel('g(r)')\nplt.xlim( (0, rmax) )\nplt.ylim( (0, 1.05 * g_r.max()) )\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
def sumbelow(n):
multiples_of_3 = set(range(0,n,3))
multiples_of_5 = set(range(0,n,5))
return sum(multiples_of_3.union(multiples_of_5))
#one linear:
# return sum(set(range(0,n,3)).union(set(range(0,n,5)))),
# or rather,
# return sum(set(range(0,n,3) + range(0,n,5)))
if __name__ == '__main__':
print sumbelow(1000)
n = 1000
|
normal
|
{
"blob_id": "8dbc0b9b80aae4cb5c4101007afc50ac54f7a7e7",
"index": 5873,
"step-1": "#!/usr/bin/python\n\ndef sumbelow(n):\n multiples_of_3 = set(range(0,n,3))\n multiples_of_5 = set(range(0,n,5))\n return sum(multiples_of_3.union(multiples_of_5))\n\n#one linear:\n# return sum(set(range(0,n,3)).union(set(range(0,n,5)))),\n# or rather,\n# return sum(set(range(0,n,3) + range(0,n,5)))\n\nif __name__ == '__main__':\n print sumbelow(1000)\n n = 1000\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from __future__ import absolute_import
import unittest
import yaml
import os
from bok_choy.web_app_test import WebAppTest
from .pages.job_config_history_subpage import JobConfigHistorySubPage
class TestJobConfigHistory(WebAppTest):
def setUp(self):
super(TestJobConfigHistory, self).setUp()
config_path = os.getenv('CONFIG_PATH')
try:
yaml_contents = open(
"{}/job_config_history.yml".format(config_path), 'r'
).read()
except IOError:
pass
self.job_config_history = yaml.safe_load(yaml_contents)
self.config_page = JobConfigHistorySubPage(self.browser)
def test_job_config_history(self):
"""
Verify the Jenkins Config History plugin has been configured
properly.
"""
self.config_page.visit()
self.config_page.expand_advanced()
assert self.job_config_history['HISTORY_ROOT_DIR'] == self.config_page.get_history_root_dir()
assert self.job_config_history['MAX_HISTORY_ENTRIES'] == self.config_page.get_max_history_entries()
assert str(self.job_config_history['SKIP_DUPLICATE_HISTORY']).lower() == self.config_page.get_skip_duplicate_history()
assert self.job_config_history['SHOW_BUILD_BADGES'] == self.config_page.get_show_build_badges()
|
normal
|
{
"blob_id": "51bdbec732bebd73a84b52c6d1d39eead047d29e",
"index": 5349,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestJobConfigHistory(WebAppTest):\n\n def setUp(self):\n super(TestJobConfigHistory, self).setUp()\n config_path = os.getenv('CONFIG_PATH')\n try:\n yaml_contents = open('{}/job_config_history.yml'.format(\n config_path), 'r').read()\n except IOError:\n pass\n self.job_config_history = yaml.safe_load(yaml_contents)\n self.config_page = JobConfigHistorySubPage(self.browser)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestJobConfigHistory(WebAppTest):\n\n def setUp(self):\n super(TestJobConfigHistory, self).setUp()\n config_path = os.getenv('CONFIG_PATH')\n try:\n yaml_contents = open('{}/job_config_history.yml'.format(\n config_path), 'r').read()\n except IOError:\n pass\n self.job_config_history = yaml.safe_load(yaml_contents)\n self.config_page = JobConfigHistorySubPage(self.browser)\n\n def test_job_config_history(self):\n \"\"\"\n Verify the Jenkins Config History plugin has been configured\n properly.\n \"\"\"\n self.config_page.visit()\n self.config_page.expand_advanced()\n assert self.job_config_history['HISTORY_ROOT_DIR'\n ] == self.config_page.get_history_root_dir()\n assert self.job_config_history['MAX_HISTORY_ENTRIES'\n ] == self.config_page.get_max_history_entries()\n assert str(self.job_config_history['SKIP_DUPLICATE_HISTORY']).lower(\n ) == self.config_page.get_skip_duplicate_history()\n assert self.job_config_history['SHOW_BUILD_BADGES'\n ] == self.config_page.get_show_build_badges()\n",
"step-4": "from __future__ import absolute_import\nimport unittest\nimport yaml\nimport os\nfrom bok_choy.web_app_test import WebAppTest\nfrom .pages.job_config_history_subpage import JobConfigHistorySubPage\n\n\nclass TestJobConfigHistory(WebAppTest):\n\n def setUp(self):\n super(TestJobConfigHistory, self).setUp()\n config_path = os.getenv('CONFIG_PATH')\n try:\n yaml_contents = open('{}/job_config_history.yml'.format(\n config_path), 'r').read()\n except IOError:\n pass\n self.job_config_history = yaml.safe_load(yaml_contents)\n self.config_page = JobConfigHistorySubPage(self.browser)\n\n def test_job_config_history(self):\n \"\"\"\n Verify the Jenkins Config History plugin has been configured\n properly.\n \"\"\"\n self.config_page.visit()\n self.config_page.expand_advanced()\n assert self.job_config_history['HISTORY_ROOT_DIR'\n ] == self.config_page.get_history_root_dir()\n assert self.job_config_history['MAX_HISTORY_ENTRIES'\n ] == self.config_page.get_max_history_entries()\n assert str(self.job_config_history['SKIP_DUPLICATE_HISTORY']).lower(\n ) == self.config_page.get_skip_duplicate_history()\n assert self.job_config_history['SHOW_BUILD_BADGES'\n ] == self.config_page.get_show_build_badges()\n",
"step-5": "from __future__ import absolute_import\nimport unittest\nimport yaml\nimport os\nfrom bok_choy.web_app_test import WebAppTest\nfrom .pages.job_config_history_subpage import JobConfigHistorySubPage\n\nclass TestJobConfigHistory(WebAppTest):\n\n def setUp(self):\n super(TestJobConfigHistory, self).setUp()\n config_path = os.getenv('CONFIG_PATH')\n try:\n yaml_contents = open(\n \"{}/job_config_history.yml\".format(config_path), 'r'\n ).read()\n except IOError:\n pass\n self.job_config_history = yaml.safe_load(yaml_contents)\n self.config_page = JobConfigHistorySubPage(self.browser)\n\n def test_job_config_history(self):\n \"\"\"\n Verify the Jenkins Config History plugin has been configured\n properly.\n \"\"\"\n self.config_page.visit()\n self.config_page.expand_advanced()\n assert self.job_config_history['HISTORY_ROOT_DIR'] == self.config_page.get_history_root_dir()\n assert self.job_config_history['MAX_HISTORY_ENTRIES'] == self.config_page.get_max_history_entries()\n assert str(self.job_config_history['SKIP_DUPLICATE_HISTORY']).lower() == self.config_page.get_skip_duplicate_history()\n assert self.job_config_history['SHOW_BUILD_BADGES'] == self.config_page.get_show_build_badges()\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserinfoSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Userinfo
fields = ('fname', 'lname', 'address', 'city', 'state', 'zipcode',
'dob', 'phone', 'email', 'author')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class billsSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = bills
fields = 'bname', 'bamount', 'duedate', 'user_id'
class UserinfoSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Userinfo
fields = ('fname', 'lname', 'address', 'city', 'state', 'zipcode',
'dob', 'phone', 'email', 'author')
<|reserved_special_token_1|>
from rest_framework import serializers
from users.models import bills, Userinfo
class billsSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = bills
fields = 'bname', 'bamount', 'duedate', 'user_id'
class UserinfoSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Userinfo
fields = ('fname', 'lname', 'address', 'city', 'state', 'zipcode',
'dob', 'phone', 'email', 'author')
<|reserved_special_token_1|>
from rest_framework import serializers
from users.models import bills, Userinfo
class billsSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = bills
fields = ('bname', 'bamount', 'duedate', 'user_id')
class UserinfoSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Userinfo
fields = ('fname', 'lname', 'address', 'city', 'state', 'zipcode', 'dob', 'phone', 'email', 'author')
|
flexible
|
{
"blob_id": "124ece8f2f4ecc53d19657e2463cc608befb1ce7",
"index": 3722,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass UserinfoSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = Userinfo\n fields = ('fname', 'lname', 'address', 'city', 'state', 'zipcode',\n 'dob', 'phone', 'email', 'author')\n",
"step-3": "<mask token>\n\n\nclass billsSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = bills\n fields = 'bname', 'bamount', 'duedate', 'user_id'\n\n\nclass UserinfoSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = Userinfo\n fields = ('fname', 'lname', 'address', 'city', 'state', 'zipcode',\n 'dob', 'phone', 'email', 'author')\n",
"step-4": "from rest_framework import serializers\nfrom users.models import bills, Userinfo\n\n\nclass billsSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = bills\n fields = 'bname', 'bamount', 'duedate', 'user_id'\n\n\nclass UserinfoSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = Userinfo\n fields = ('fname', 'lname', 'address', 'city', 'state', 'zipcode',\n 'dob', 'phone', 'email', 'author')\n",
"step-5": "from rest_framework import serializers\nfrom users.models import bills, Userinfo\n\nclass billsSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = bills\n fields = ('bname', 'bamount', 'duedate', 'user_id')\n\n\nclass UserinfoSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Userinfo\n fields = ('fname', 'lname', 'address', 'city', 'state', 'zipcode', 'dob', 'phone', 'email', 'author')",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import time
import datetime
import math
import os
import random
import logzero
import logging
from logzero import logger
from sense_hat import SenseHat
import ephem
anyException = False
# program Time is here for easy acces (in minutes)
programTime = 175
# 2:55 min of runtime
# ____________________________
# DEFINE FUNCTIONS
# ____________________________
def setLoggingFile():
'''
This function will setup a logger and logfile
'''
# It will create a data01.csv file if it does not exist, data02.csv if previous exist etc
# but when data01.csv data02.csv data03.csv data04.csv data05.csv exist it will overwrite the data01.csv file
try:
# set dirPath
dirPath = os.path.dirname(os.path.realpath(__file__))
# set dir filenames
dirFiles = os.listdir(dirPath)
for itemNr in range(len(dirFiles)):
nameOfFile = 'data0'+str(itemNr+1)+".csv"
if nameOfFile =='data05.csv':
nameOfFile ='data01.csv'
break
if nameOfFile in dirFiles:
print('this file exsist' + str(nameOfFile))
else:
break
# Handle the Exception
except Exception as dummy:
# change global variable anyException to True, it will be logged at the end of run
global anyException
anyException = True
# set namefile to default one
nameOfFile = 'data01.csv'
# set logfile and custom formatter
logzero.logfile(dirPath+"/"+nameOfFile)
print(dirPath+"/"+nameOfFile)
formatter = logging.Formatter('_%(levelname)s_,line: %(lineno)d, %(message)s')
logzero.formatter(formatter)
def isItOversized():
'''
This function will check storage used to be sure files weight are less than 3gb
'''
try:
# set dirPath
dirPath = os.path.dirname(os.path.realpath(__file__))
# set dir filenames
dirFiles = os.listdir(dirPath)
# check files name
filesSize = 0
# add all files size to variable
for file in dirFiles:
filesSize+=os.stat(file).st_size
# check that filesSize variable is less than 3221225472 bites which is 3gb
# return False if it is smaller and return True when it is oversized
if filesSize < 3221225472:
return False
else:
return True
# Handle the exception as default => not oversized
except Exception as e_oversizedFun_ecxeption:
global anyException
anyException = True
logger.error('Time from start: %s,Time is: %s,ERROR: %s',timer1.minsOfRun(),timer1.nowForLog(),str(e_oversizedFun_ecxeption))
pictures('error')
return False
def measure(whatToMeasure):
'''
This function will measure temperature, humidity and pressure
'''
# reset variables
temp = 0
hum = 0
press = 0
failed = 0
try:
# TEMPERATURE
# Our code does 10 measurements
# ignore the 0 - corrupted
# and return the average result of measurements
# if 5 measurements will be corrupted it will return an error
if whatToMeasure == 'temp':
print('TEMP MEASURE:')
for dummy in range(10):
# sleep betwen measurements
time.sleep(0.2)
while failed<5:
# get Temperature from SenseHat
tempNow=sh.get_temperature()
print(tempNow)
# if measured temp is okay break (while filed<5) loop
# always measured temp give us floats with decimals
# so if temp will be close to 0 measured temp will be for example 0.2312
# so if measured temp is equal to 0 without decimals measured temp is corrupted
if(tempNow != 0):
break
else:
# if cant get temperature (temp=0) add 1 to failed variable
failed+=1
temp+=tempNow
if(failed<5):
# count the average data and round
temp/=10
temp=round(temp,2)
print('MEASURED TEMP IS: '+str(temp))
else:
# error is returned as string becouse it will be displayed on screen (showInfo function)
temp = 'ERROR'
# return measured temp
return temp
# HUMidITY
# same as temp but we measure humidity
if whatToMeasure == 'hum':
print('HUM MEASURE:')
for dummy in range(10):
time.sleep(0.2)
while failed<5:
humNow=sh.get_humidity()
print(humNow)
if(humNow != 0):
break
else:
failed+=1
hum+=humNow
if(failed<5):
hum/=10
hum=round(hum,2)
print('MEASURED HUM IS: '+str(hum))
else:
hum = 'ERROR'
return hum
# PRESSURE
# same as temp but we measure pressure
if whatToMeasure == 'press':
print('PRESS MEASURE:')
for dummy in range(10):
time.sleep(0.2)
while failed<5:
pressNow=sh.get_pressure()
print(pressNow)
if(pressNow != 0):
break
else:
failed+=1
press+=pressNow
if(failed<5):
press/=10
press=round(press,2)
print('MEASURED PRESS IS: '+str(press))
else:
press = 'ERROR'
return press
# handle exception and log it, display error image on screen
except Exception as e_measure_eception:
global anyException
anyException = True
logger.error('Time from start: %s,Time is: %s,ERROR: %s',timer1.minsOfRun(),timer1.nowForLog(),str(e_measure_eception))
pictures('error')
return "ERROR"
def pictures(idImg):
'''
This function displays images on SnenseHat pixel matrix scrren
'''
try:
# Set display rotation on 0 deg
rot = 0
sh.set_rotation(rot)
# Define some colors - keep brightness low
r = [50,0,0]
g = [0,50,0]
b = [0,0,50]
p = [50,0,50]
o = [0,0,0]
w = [50,50,50]
orientation = [0,90,180,270]
# Define an images
welcome_img = [
o,o,w,w,w,w,o,o,
o,w,w,w,w,w,w,o,
w,w,w,w,w,w,w,w,
w,w,w,w,w,w,w,w,
r,r,r,r,r,r,r,r,
r,o,r,r,r,r,o,r,
o,r,r,r,r,r,r,o,
o,o,r,r,r,r,o,o,
]
wait_img = [
g,g,g,g,g,g,g,g,
o,g,o,o,o,o,g,o,
o,o,g,o,o,g,o,o,
o,o,o,g,g,o,o,o,
o,o,o,g,g,o,o,o,
o,o,g,g,g,g,o,o,
o,g,g,g,g,g,g,o,
g,g,g,g,g,g,g,g,
]
temp_img1 = [
o,o,o,r,b,r,o,o,
o,o,o,r,b,r,o,o,
o,o,o,r,b,r,o,o,
o,o,o,r,b,r,o,o,
o,o,o,r,b,r,o,o,
o,o,r,b,b,b,r,o,
o,o,r,b,b,b,r,o,
o,o,o,r,r,r,o,o,
]
temp_img2 = [
o,o,o,r,r,r,o,o,
o,o,o,r,b,r,o,o,
o,o,o,r,b,r,o,o,
o,o,o,r,b,r,o,o,
o,o,o,r,b,r,o,o,
o,o,r,b,b,b,r,o,
o,o,r,b,b,b,r,o,
o,o,o,r,r,r,o,o,
]
temp_img3 = [
o,o,o,r,r,r,o,o,
o,o,o,r,r,r,o,o,
o,o,o,r,b,r,o,o,
o,o,o,r,b,r,o,o,
o,o,o,r,b,r,o,o,
o,o,r,b,b,b,r,o,
o,o,r,b,b,b,r,o,
o,o,o,r,r,r,o,o,
]
temp_img4 = [
o,o,o,r,r,r,o,o,
o,o,o,r,r,r,o,o,
o,o,o,r,r,r,o,o,
o,o,o,r,b,r,o,o,
o,o,o,r,b,r,o,o,
o,o,r,b,b,b,r,o,
o,o,r,b,b,b,r,o,
o,o,o,r,r,r,o,o,
]
temp_img5 = [
o,o,o,r,r,r,o,o,
o,o,o,r,r,r,o,o,
o,o,o,r,r,r,o,o,
o,o,o,r,r,r,o,o,
o,o,o,r,b,r,o,o,
o,o,r,b,b,b,r,o,
o,o,r,b,b,b,r,o,
o,o,o,r,r,r,o,o,
]
temp_img6 = [
o,o,o,r,r,r,o,o,
o,o,o,r,r,r,o,o,
o,o,o,r,r,r,o,o,
o,o,o,r,r,r,o,o,
o,o,o,r,r,r,o,o,
o,o,r,b,b,b,r,o,
o,o,r,b,b,b,r,o,
o,o,o,r,r,r,o,o,
]
hum_img1 = [
o,o,o,o,b,o,o,o,
o,o,o,b,b,o,o,o,
o,o,b,b,b,b,o,o,
o,o,b,b,b,b,o,o,
o,b,b,b,b,b,b,o,
o,b,b,b,b,b,b,o,
o,o,b,b,b,b,o,o,
o,o,o,b,b,o,o,o,
]
hum_img2 = [
o,o,o,o,o,o,o,o,
o,o,o,o,b,o,o,o,
o,o,o,b,b,o,o,o,
o,o,b,b,b,b,o,o,
o,o,b,b,b,b,o,o,
o,b,b,b,b,b,b,o,
o,b,b,b,b,b,b,o,
o,o,b,b,b,b,o,o,
]
hum_img3 = [
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,b,o,o,o,
o,o,o,b,b,o,o,o,
o,o,b,b,b,b,o,o,
o,o,b,b,b,b,o,o,
o,b,b,b,b,b,b,o,
b,b,b,b,b,b,b,b,
]
hum_img4 = [
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,b,o,o,o,
o,o,o,b,b,o,o,o,
o,o,b,b,b,b,o,o,
b,b,b,b,b,b,b,b,
]
hum_img5 = [
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
b,b,b,b,b,b,b,b,
b,b,b,b,b,b,b,b,
]
press_img1 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
p,o,o,o,o,o,o,p,
p,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img2 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
p,o,o,o,o,o,o,p,
p,p,o,o,o,o,o,p,
p,p,o,o,o,o,o,p,
p,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img3 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,p,o,o,o,o,o,p,
p,p,p,o,o,o,o,p,
p,p,p,o,o,o,o,p,
o,p,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img4 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,p,o,o,o,o,p,
p,p,p,p,o,o,o,p,
p,p,p,p,o,o,o,p,
o,o,p,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img5 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,p,o,o,o,p,
p,p,p,p,p,o,o,p,
p,p,p,p,p,o,o,p,
o,o,o,p,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img6 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,p,o,o,p,
p,p,p,p,p,p,o,p,
p,p,p,p,p,p,o,p,
o,o,o,o,p,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img7 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,p,o,p,
p,p,p,p,p,p,p,p,
p,p,p,p,p,p,p,p,
o,o,o,o,o,p,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img8 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,p,p,
o,p,p,p,p,p,p,p,
o,p,p,p,p,p,p,p,
o,o,o,o,o,o,p,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img9 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,p,p,p,p,p,p,
o,o,p,p,p,p,p,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img10 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,p,p,p,p,p,
o,o,o,p,p,p,p,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img11 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,p,p,p,p,
o,o,o,o,p,p,p,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img12 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,p,p,p,
o,o,o,o,o,p,p,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img13 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,p,p,
o,o,o,o,o,o,p,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img14 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
working_array= [
g,g,g,g,g,g,g,g,
g,g,g,g,g,g,g,g,
g,g,g,g,g,g,g,g,
g,g,g,g,g,g,g,g,
g,g,g,g,g,g,g,g,
g,g,g,g,g,g,g,g,
g,g,g,g,g,g,g,g,
g,g,g,g,g,g,g,g,
]
error_array=[
r,r,r,r,r,r,r,r,
r,r,r,r,r,r,r,r,
r,r,r,r,r,r,r,r,
r,r,r,r,r,r,r,r,
r,r,r,r,r,r,r,r,
r,r,r,r,r,r,r,r,
r,r,r,r,r,r,r,r,
r,r,r,r,r,r,r,r,
]
end_array=[
b,b,b,b,b,b,b,b,
b,b,b,b,b,b,b,b,
b,b,b,b,b,b,b,b,
b,b,b,b,b,b,b,b,
b,b,b,b,b,b,b,b,
b,b,b,b,b,b,b,b,
b,b,b,b,b,b,b,b,
b,b,b,b,b,b,b,b,
]
reset_array=[
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
]
# routes to display diffrent images
if idImg == 'welcome':
sh.show_message('Welcome to PAPi', text_colour = w, scroll_speed=0.05)
#This is a quote from one of a 'Country balls comics'
sh.show_message('Poland can ', text_colour = w, scroll_speed=0.05)
sh.show_message('into space!', text_colour = r, scroll_speed=0.05)
#Polish flag is upside down becouse this picture represent one of the 'Country balls'
#specificaly a 'Poland ball', which have colors of Monako hahahaha!
sh.set_rotation(180)
sh.set_pixels(welcome_img)
time.sleep(2)
sh.set_rotation(0)
# store multiple arrays (images) in one array for do an animation
temp_array = [temp_img1, temp_img2,temp_img3, temp_img4, temp_img5, temp_img6, temp_img5, temp_img4, temp_img3, temp_img2, temp_img1]
if idImg == 'temp':
for temp_anim in temp_array:
sh.set_pixels(temp_anim)
time.sleep(0.1)
hum_array = [hum_img1, hum_img2, hum_img3, hum_img4, hum_img5]
if idImg == 'hum':
for hum_anim in hum_array:
sh.set_pixels(hum_anim)
time.sleep(0.5)
press_array = [press_img1, press_img2, press_img3, press_img4, press_img5,press_img6, press_img7, press_img8, press_img9, press_img10, press_img11, press_img12, press_img13, press_img14]
if idImg == 'press':
for press_anim in press_array:
sh.set_pixels(press_anim)
time.sleep(0.1)
if idImg == 'end':
sh.set_pixels(end_array)
time.sleep(0.3)
if idImg == 'error':
sh.set_pixels(error_array)
time.sleep(0.5)
if idImg == 'wait':
sh.set_pixels(wait_img)
# here we have animation by rotate the screen
for rot in orientation:
sh.set_rotation(rot)
time.sleep(0.2)
if idImg == 'reset':
sh.set_pixels(reset_array)
# if given parameter was not equal to wait and or error show us a greeen image until next function call
# green image means that everything is okay
if (idImg != 'wait' and idImg != 'end' and idImg != 'error' and idImg != 'reset'):
sh.set_pixels(working_array)
# Handle the exception
except Exception as e_display_eception:
global anyException
anyException = True
logger.error('Time from start: %s,Time is: %s,ERROR: %s',timer1.minsOfRun(),timer1.nowForLog(),str(e_display_eception))
return "ERROR"
def showInfo(measure):
'''
that function gets random color and displays measure parameter on SenseHat screen
'''
try:
# define RGB colors
r = [50,0,0]
g = [0,50,0]
b = [0,0,50]
p = [50,0,50]
c = [0,50,50]
u = [50,50,0]
# store all colors in one array for easier draw
textColours = [r,g,b,p,c,u]
# get random color
color = textColours[random.randint(0,len(textColours)-1)]
# set rotation to 0 degrees
sh.set_rotation(0)
# show message
sh.show_message(str(measure), text_colour =color, scroll_speed=0.05)
# sleep one secound
time.sleep(1)
# handle the exception
except Exception as e_displayText_exception:
logger.error('cannot show message on senseHat: %s',e_displayText_exception)
pictures('error')
def ephemISS():
'''
Ephem module funciton
'''
try:
# ehem computing for logs
# SOURCE = CELESTREAK.COM
# DAY = 26.01.2019
nameOfStation = 'ISS (ZARYA)'
firstLine = '1 25544U 98067A 19027.58387731 .00001656 00000-0 33287-4 0 9996'
secondLine = '2 25544 51.6426 340.5081 0004927 322.6857 20.8029 15.53199695153409'
stationISS = ephem.readtle(nameOfStation,firstLine,secondLine)
stationISS.compute()
# calculate result
if(stationISS.sublat)<0:
return 'ISS is in Southern hemisphere'
else:
return 'ISS is in Northern hemisphere'
# handle the exception
except Exception as e_ISS_eception:
global anyException
anyException = True
logger.error('Cannot get EPHEM resulit eroor:%s',str(e_ISS_eception))
pictures('error')
return "ERROR"
class timer:
'''
Timer obiect give us easier ability to count time and get better logs
'''
# set start time and end time of obiect
def __init__(self):
# get start time from datatime module
self.startTime = datetime.datetime.now()
# count the endtime, programTime was defined at the top
self.endTime = self.startTime + datetime.timedelta(minutes=programTime)
def minsOfRun(self):
# cuted output for logs
# example '0:00:27' - how much time program is already running
return str(datetime.datetime.now()-self.startTime)[:7]
def now(self):
# return datetime obiect for check actual time
self.time = datetime.datetime.now()
return self.time
def nowForLog(self):
# cuted output for nicer logs:
# example '14:52:32'
return str(datetime.datetime.now())[11:19]
# ____________________________
# INICIALIZE PROGRAM
# ____________________________
try:
# set timer obiect
timer1 = timer()
# call function to setup logging
setLoggingFile()
# first log
logger.debug('starting program,time is: %s, program will be running for: %smin',timer1.now(),programTime)
# log ephem return
logger.info('EPHEM: %s',ephemISS())
# connect to SenseHat
sh = SenseHat()
# sleep 2 sec
time.sleep(2)
# show welcome screen
pictures('welcome')#
# handle the exception
except Exception as e_init_exception:
anyException = True
logger.error('INIT ERROR: %s',str(e_init_exception))
# ____________________________
# SET VARIABLES
# ____________________________
averageTemp=0
averageHum=0
averagePress=0
rounds = 0
lowestTemp = 9999999999
lowestHum = 9999999999
lowestPress = 9999999999
higestTemp = 0
higestHum = 0
higestPress = 0
# ____________________________
# MAIN LOOP OF PROGRAM
# ____________________________
# while timer1 obiect (time now) is smaller than (endTime)
while(timer1.now()<timer1.endTime):
try:
# check oversize function return
if(isItOversized()):
logger.debug('OVERSIZED EXITING')
break
# add rounds (average measurements will be calculate on this variable)
rounds+=1
# log round and time for start
logger.debug('Start round: %s,Time from start: %s',rounds,timer1.minsOfRun())
# show wait image and show active round on screen
pictures('wait')
showInfo('round: '+str(rounds))
# show temperature animation, measure temperature and display result on screen
pictures('temp')
tempNowIs = measure('temp')
showInfo(str(tempNowIs)+" 'C")
# same to humidity
pictures('hum')
humNowIs = measure('hum')
showInfo(str(humNowIs)+' %')
# same to pressure
pictures('press')
pressNowIs = measure('press')
showInfo(str(pressNowIs)+' mbar')
# log all results
logger.info('Time is: %s,Time from start: %s,Temp: %s,Hum: %s,Press: %s',timer1.nowForLog(),timer1.minsOfRun(),tempNowIs,humNowIs,pressNowIs)
# if there was not any error count lower, higest and average measurements
if(tempNowIs != 'ERROR' and humNowIs != 'ERROR' and pressNowIs != 'ERROR'):
averageTemp+=tempNowIs
averageHum+=humNowIs
averagePress+=pressNowIs
if tempNowIs<lowestTemp:
lowestTemp = tempNowIs
if tempNowIs>higestTemp:
higestTemp = tempNowIs
if humNowIs < lowestHum:
lowestHum = humNowIs
if humNowIs>higestHum:
higestHum = humNowIs
if pressNowIs<lowestPress:
lowestPress = pressNowIs
if pressNowIs>higestPress:
higestPress = pressNowIs
else:
# if there was an error dont count this round
rounds-=1
# handle main exception
except Exception as e_main_exception:
anyException = True
logger.error('Time from start: %s,Time is: %s,ERROR: %s',timer1.minsOfRun(),timer1.nowForLog(),str(e_main_exception))
pictures('error')
# ____________________________
# AFTER MAIN LOOP
# ____________________________
try:
pictures('end')
# calculate and round average measurements
averageTemp/=rounds
averageHum/=rounds
averagePress/=rounds
averageTemp = round(averageTemp,2)
averageHum = round(averageHum,2)
averagePress = round(averagePress,2)
lowestTemp = round(lowestTemp,2)
lowestHum = round(lowestHum,2)
lowestPress = round(lowestPress,2)
higestTemp = round(higestTemp,2)
higestHum = round(higestHum,2)
higestPress = round(higestPress,2)
# log all
logger.info('average Temp: %s,average hum: %s,average press: %s',averageTemp,averageHum,averagePress)
logger.info('Temp: highest: %s lowest: %s ,Hum: highest: %s lowest: %s ,Press: highest: %s lowest: %s',higestTemp,lowestTemp,higestHum,lowestHum,higestPress,lowestPress)
logger.debug('code succesfully exited after: %s,expected time: %smin, time of end is: %s, problems: %s',timer1.minsOfRun(),programTime,timer1.now(),anyException)
logger.debug('program ended with %s rounds of collecting data',rounds)
# say goodbye
time.sleep(3)
pictures('reset')
# exit on that Exception
except Exception as e_sumUp_exception:
print('CANNOT SUMUP DATA, EXITING')
logger.error('SUMUP ERROR: %s',str(e_sumUp_exception))
exit()
|
normal
|
{
"blob_id": "05e468c2f64e33d6b390f681314ed7961bd4def7",
"index": 2684,
"step-1": "<mask token>\n\n\ndef setLoggingFile():\n \"\"\"\n This function will setup a logger and logfile\n \"\"\"\n try:\n dirPath = os.path.dirname(os.path.realpath(__file__))\n dirFiles = os.listdir(dirPath)\n for itemNr in range(len(dirFiles)):\n nameOfFile = 'data0' + str(itemNr + 1) + '.csv'\n if nameOfFile == 'data05.csv':\n nameOfFile = 'data01.csv'\n break\n if nameOfFile in dirFiles:\n print('this file exsist' + str(nameOfFile))\n else:\n break\n except Exception as dummy:\n global anyException\n anyException = True\n nameOfFile = 'data01.csv'\n logzero.logfile(dirPath + '/' + nameOfFile)\n print(dirPath + '/' + nameOfFile)\n formatter = logging.Formatter(\n '_%(levelname)s_,line: %(lineno)d, %(message)s')\n logzero.formatter(formatter)\n\n\n<mask token>\n\n\ndef measure(whatToMeasure):\n \"\"\"\n This function will measure temperature, humidity and pressure\n \"\"\"\n temp = 0\n hum = 0\n press = 0\n failed = 0\n try:\n if whatToMeasure == 'temp':\n print('TEMP MEASURE:')\n for dummy in range(10):\n time.sleep(0.2)\n while failed < 5:\n tempNow = sh.get_temperature()\n print(tempNow)\n if tempNow != 0:\n break\n else:\n failed += 1\n temp += tempNow\n if failed < 5:\n temp /= 10\n temp = round(temp, 2)\n print('MEASURED TEMP IS: ' + str(temp))\n else:\n temp = 'ERROR'\n return temp\n if whatToMeasure == 'hum':\n print('HUM MEASURE:')\n for dummy in range(10):\n time.sleep(0.2)\n while failed < 5:\n humNow = sh.get_humidity()\n print(humNow)\n if humNow != 0:\n break\n else:\n failed += 1\n hum += humNow\n if failed < 5:\n hum /= 10\n hum = round(hum, 2)\n print('MEASURED HUM IS: ' + str(hum))\n else:\n hum = 'ERROR'\n return hum\n if whatToMeasure == 'press':\n print('PRESS MEASURE:')\n for dummy in range(10):\n time.sleep(0.2)\n while failed < 5:\n pressNow = sh.get_pressure()\n print(pressNow)\n if pressNow != 0:\n break\n else:\n failed += 1\n press += pressNow\n if failed < 5:\n press /= 10\n press = round(press, 2)\n print('MEASURED PRESS IS: ' + str(press))\n else:\n press = 'ERROR'\n return press\n except Exception as e_measure_eception:\n global anyException\n anyException = True\n logger.error('Time from start: %s,Time is: %s,ERROR: %s', timer1.\n minsOfRun(), timer1.nowForLog(), str(e_measure_eception))\n pictures('error')\n return 'ERROR'\n\n\n<mask token>\n\n\ndef showInfo(measure):\n \"\"\"\n that function gets random color and displays measure parameter on SenseHat screen\n \"\"\"\n try:\n r = [50, 0, 0]\n g = [0, 50, 0]\n b = [0, 0, 50]\n p = [50, 0, 50]\n c = [0, 50, 50]\n u = [50, 50, 0]\n textColours = [r, g, b, p, c, u]\n color = textColours[random.randint(0, len(textColours) - 1)]\n sh.set_rotation(0)\n sh.show_message(str(measure), text_colour=color, scroll_speed=0.05)\n time.sleep(1)\n except Exception as e_displayText_exception:\n logger.error('cannot show message on senseHat: %s',\n e_displayText_exception)\n pictures('error')\n\n\ndef ephemISS():\n \"\"\"\n Ephem module funciton\n \"\"\"\n try:\n nameOfStation = 'ISS (ZARYA)'\n firstLine = (\n '1 25544U 98067A 19027.58387731 .00001656 00000-0 33287-4 0 9996'\n )\n secondLine = (\n '2 25544 51.6426 340.5081 0004927 322.6857 20.8029 15.53199695153409'\n )\n stationISS = ephem.readtle(nameOfStation, firstLine, secondLine)\n stationISS.compute()\n if stationISS.sublat < 0:\n return 'ISS is in Southern hemisphere'\n else:\n return 'ISS is in Northern hemisphere'\n except Exception as e_ISS_eception:\n global anyException\n anyException = True\n logger.error('Cannot get EPHEM resulit eroor:%s', str(e_ISS_eception))\n pictures('error')\n return 'ERROR'\n\n\nclass timer:\n \"\"\"\n Timer obiect give us easier ability to count time and get better logs\n\n \"\"\"\n\n def __init__(self):\n self.startTime = datetime.datetime.now()\n self.endTime = self.startTime + datetime.timedelta(minutes=programTime)\n\n def minsOfRun(self):\n return str(datetime.datetime.now() - self.startTime)[:7]\n\n def now(self):\n self.time = datetime.datetime.now()\n return self.time\n\n def nowForLog(self):\n return str(datetime.datetime.now())[11:19]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef setLoggingFile():\n \"\"\"\n This function will setup a logger and logfile\n \"\"\"\n try:\n dirPath = os.path.dirname(os.path.realpath(__file__))\n dirFiles = os.listdir(dirPath)\n for itemNr in range(len(dirFiles)):\n nameOfFile = 'data0' + str(itemNr + 1) + '.csv'\n if nameOfFile == 'data05.csv':\n nameOfFile = 'data01.csv'\n break\n if nameOfFile in dirFiles:\n print('this file exsist' + str(nameOfFile))\n else:\n break\n except Exception as dummy:\n global anyException\n anyException = True\n nameOfFile = 'data01.csv'\n logzero.logfile(dirPath + '/' + nameOfFile)\n print(dirPath + '/' + nameOfFile)\n formatter = logging.Formatter(\n '_%(levelname)s_,line: %(lineno)d, %(message)s')\n logzero.formatter(formatter)\n\n\ndef isItOversized():\n \"\"\"\n This function will check storage used to be sure files weight are less than 3gb\n \"\"\"\n try:\n dirPath = os.path.dirname(os.path.realpath(__file__))\n dirFiles = os.listdir(dirPath)\n filesSize = 0\n for file in dirFiles:\n filesSize += os.stat(file).st_size\n if filesSize < 3221225472:\n return False\n else:\n return True\n except Exception as e_oversizedFun_ecxeption:\n global anyException\n anyException = True\n logger.error('Time from start: %s,Time is: %s,ERROR: %s', timer1.\n minsOfRun(), timer1.nowForLog(), str(e_oversizedFun_ecxeption))\n pictures('error')\n return False\n\n\ndef measure(whatToMeasure):\n \"\"\"\n This function will measure temperature, humidity and pressure\n \"\"\"\n temp = 0\n hum = 0\n press = 0\n failed = 0\n try:\n if whatToMeasure == 'temp':\n print('TEMP MEASURE:')\n for dummy in range(10):\n time.sleep(0.2)\n while failed < 5:\n tempNow = sh.get_temperature()\n print(tempNow)\n if tempNow != 0:\n break\n else:\n failed += 1\n temp += tempNow\n if failed < 5:\n temp /= 10\n temp = round(temp, 2)\n print('MEASURED TEMP IS: ' + str(temp))\n else:\n temp = 'ERROR'\n return temp\n if whatToMeasure == 'hum':\n print('HUM MEASURE:')\n for dummy in range(10):\n time.sleep(0.2)\n while failed < 5:\n humNow = sh.get_humidity()\n print(humNow)\n if humNow != 0:\n break\n else:\n failed += 1\n hum += humNow\n if failed < 5:\n hum /= 10\n hum = round(hum, 2)\n print('MEASURED HUM IS: ' + str(hum))\n else:\n hum = 'ERROR'\n return hum\n if whatToMeasure == 'press':\n print('PRESS MEASURE:')\n for dummy in range(10):\n time.sleep(0.2)\n while failed < 5:\n pressNow = sh.get_pressure()\n print(pressNow)\n if pressNow != 0:\n break\n else:\n failed += 1\n press += pressNow\n if failed < 5:\n press /= 10\n press = round(press, 2)\n print('MEASURED PRESS IS: ' + str(press))\n else:\n press = 'ERROR'\n return press\n except Exception as e_measure_eception:\n global anyException\n anyException = True\n logger.error('Time from start: %s,Time is: %s,ERROR: %s', timer1.\n minsOfRun(), timer1.nowForLog(), str(e_measure_eception))\n pictures('error')\n return 'ERROR'\n\n\ndef pictures(idImg):\n \"\"\"\n This function displays images on SnenseHat pixel matrix scrren\n \"\"\"\n try:\n rot = 0\n sh.set_rotation(rot)\n r = [50, 0, 0]\n g = [0, 50, 0]\n b = [0, 0, 50]\n p = [50, 0, 50]\n o = [0, 0, 0]\n w = [50, 50, 50]\n orientation = [0, 90, 180, 270]\n welcome_img = [o, o, w, w, w, w, o, o, o, w, w, w, w, w, w, o, w, w,\n w, w, w, w, w, w, w, w, w, w, w, w, w, w, r, r, r, r, r, r, r,\n r, r, o, r, r, r, r, o, r, o, r, r, r, r, r, r, o, o, o, r, r,\n r, r, o, o]\n wait_img = [g, g, g, g, g, g, g, g, o, g, o, o, o, o, g, o, o, o, g,\n o, o, g, o, o, o, o, o, g, g, o, o, o, o, o, o, g, g, o, o, o,\n o, o, g, g, g, g, o, o, o, g, g, g, g, g, g, o, g, g, g, g, g,\n g, g, g]\n temp_img1 = [o, o, o, r, b, r, o, o, o, o, o, r, b, r, o, o, o, o,\n o, r, b, r, o, o, o, o, o, r, b, r, o, o, o, o, o, r, b, r, o,\n o, o, o, r, b, b, b, r, o, o, o, r, b, b, b, r, o, o, o, o, r,\n r, r, o, o]\n temp_img2 = [o, o, o, r, r, r, o, o, o, o, o, r, b, r, o, o, o, o,\n o, r, b, r, o, o, o, o, o, r, b, r, o, o, o, o, o, r, b, r, o,\n o, o, o, r, b, b, b, r, o, o, o, r, b, b, b, r, o, o, o, o, r,\n r, r, o, o]\n temp_img3 = [o, o, o, r, r, r, o, o, o, o, o, r, r, r, o, o, o, o,\n o, r, b, r, o, o, o, o, o, r, b, r, o, o, o, o, o, r, b, r, o,\n o, o, o, r, b, b, b, r, o, o, o, r, b, b, b, r, o, o, o, o, r,\n r, r, o, o]\n temp_img4 = [o, o, o, r, r, r, o, o, o, o, o, r, r, r, o, o, o, o,\n o, r, r, r, o, o, o, o, o, r, b, r, o, o, o, o, o, r, b, r, o,\n o, o, o, r, b, b, b, r, o, o, o, r, b, b, b, r, o, o, o, o, r,\n r, r, o, o]\n temp_img5 = [o, o, o, r, r, r, o, o, o, o, o, r, r, r, o, o, o, o,\n o, r, r, r, o, o, o, o, o, r, r, r, o, o, o, o, o, r, b, r, o,\n o, o, o, r, b, b, b, r, o, o, o, r, b, b, b, r, o, o, o, o, r,\n r, r, o, o]\n temp_img6 = [o, o, o, r, r, r, o, o, o, o, o, r, r, r, o, o, o, o,\n o, r, r, r, o, o, o, o, o, r, r, r, o, o, o, o, o, r, r, r, o,\n o, o, o, r, b, b, b, r, o, o, o, r, b, b, b, r, o, o, o, o, r,\n r, r, o, o]\n hum_img1 = [o, o, o, o, b, o, o, o, o, o, o, b, b, o, o, o, o, o, b,\n b, b, b, o, o, o, o, b, b, b, b, o, o, o, b, b, b, b, b, b, o,\n o, b, b, b, b, b, b, o, o, o, b, b, b, b, o, o, o, o, o, b, b,\n o, o, o]\n hum_img2 = [o, o, o, o, o, o, o, o, o, o, o, o, b, o, o, o, o, o, o,\n b, b, o, o, o, o, o, b, b, b, b, o, o, o, o, b, b, b, b, o, o,\n o, b, b, b, b, b, b, o, o, b, b, b, b, b, b, o, o, o, b, b, b,\n b, o, o]\n hum_img3 = [o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o,\n o, b, o, o, o, o, o, o, b, b, o, o, o, o, o, b, b, b, b, o, o,\n o, o, b, b, b, b, o, o, o, b, b, b, b, b, b, o, b, b, b, b, b,\n b, b, b]\n hum_img4 = [o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o,\n o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, b, o, o, o,\n o, o, o, b, b, o, o, o, o, o, b, b, b, b, o, o, b, b, b, b, b,\n b, b, b]\n hum_img5 = [o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o,\n o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o,\n o, o, o, o, o, o, o, o, b, b, b, b, b, b, b, b, b, b, b, b, b,\n b, b, b]\n press_img1 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, o, p, p, o, o, o, o, o, o, p, p, o, o, o, o, o, o,\n p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img2 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, p, o,\n o, o, o, o, o, p, p, p, o, o, o, o, o, p, p, p, o, o, o, o, o,\n p, p, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img3 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, p,\n o, o, o, o, o, p, p, p, p, o, o, o, o, p, p, p, p, o, o, o, o,\n p, o, p, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img4 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n p, o, o, o, o, p, p, p, p, p, o, o, o, p, p, p, p, p, o, o, o,\n p, o, o, p, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img5 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, p, o, o, o, p, p, p, p, p, p, o, o, p, p, p, p, p, p, o, o,\n p, o, o, o, p, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img6 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, p, o, o, p, p, p, p, p, p, p, o, p, p, p, p, p, p, p, o,\n p, o, o, o, o, p, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img7 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, p, o, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p,\n p, o, o, o, o, o, p, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img8 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, p, p, o, p, p, p, p, p, p, p, o, p, p, p, p, p, p,\n p, o, o, o, o, o, o, p, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img9 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, o, p, o, o, p, p, p, p, p, p, o, o, p, p, p, p, p,\n p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img10 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, o, p, o, o, o, p, p, p, p, p, o, o, o, p, p, p, p,\n p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img11 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, o, p, o, o, o, o, p, p, p, p, o, o, o, o, p, p, p,\n p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img12 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, o, p, o, o, o, o, o, p, p, p, o, o, o, o, o, p, p,\n p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img13 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, o, p, o, o, o, o, o, o, p, p, o, o, o, o, o, o, p,\n p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img14 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o,\n p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n working_array = [g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g,\n g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g,\n g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g,\n g, g, g, g, g]\n error_array = [r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r,\n r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r,\n r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r,\n r, r, r, r]\n end_array = [b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b,\n b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b,\n b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b,\n b, b, b, b]\n reset_array = [o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o,\n o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o,\n o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o,\n o, o, o, o]\n if idImg == 'welcome':\n sh.show_message('Welcome to PAPi', text_colour=w, scroll_speed=0.05\n )\n sh.show_message('Poland can ', text_colour=w, scroll_speed=0.05)\n sh.show_message('into space!', text_colour=r, scroll_speed=0.05)\n sh.set_rotation(180)\n sh.set_pixels(welcome_img)\n time.sleep(2)\n sh.set_rotation(0)\n temp_array = [temp_img1, temp_img2, temp_img3, temp_img4, temp_img5,\n temp_img6, temp_img5, temp_img4, temp_img3, temp_img2, temp_img1]\n if idImg == 'temp':\n for temp_anim in temp_array:\n sh.set_pixels(temp_anim)\n time.sleep(0.1)\n hum_array = [hum_img1, hum_img2, hum_img3, hum_img4, hum_img5]\n if idImg == 'hum':\n for hum_anim in hum_array:\n sh.set_pixels(hum_anim)\n time.sleep(0.5)\n press_array = [press_img1, press_img2, press_img3, press_img4,\n press_img5, press_img6, press_img7, press_img8, press_img9,\n press_img10, press_img11, press_img12, press_img13, press_img14]\n if idImg == 'press':\n for press_anim in press_array:\n sh.set_pixels(press_anim)\n time.sleep(0.1)\n if idImg == 'end':\n sh.set_pixels(end_array)\n time.sleep(0.3)\n if idImg == 'error':\n sh.set_pixels(error_array)\n time.sleep(0.5)\n if idImg == 'wait':\n sh.set_pixels(wait_img)\n for rot in orientation:\n sh.set_rotation(rot)\n time.sleep(0.2)\n if idImg == 'reset':\n sh.set_pixels(reset_array)\n if (idImg != 'wait' and idImg != 'end' and idImg != 'error' and \n idImg != 'reset'):\n sh.set_pixels(working_array)\n except Exception as e_display_eception:\n global anyException\n anyException = True\n logger.error('Time from start: %s,Time is: %s,ERROR: %s', timer1.\n minsOfRun(), timer1.nowForLog(), str(e_display_eception))\n return 'ERROR'\n\n\ndef showInfo(measure):\n \"\"\"\n that function gets random color and displays measure parameter on SenseHat screen\n \"\"\"\n try:\n r = [50, 0, 0]\n g = [0, 50, 0]\n b = [0, 0, 50]\n p = [50, 0, 50]\n c = [0, 50, 50]\n u = [50, 50, 0]\n textColours = [r, g, b, p, c, u]\n color = textColours[random.randint(0, len(textColours) - 1)]\n sh.set_rotation(0)\n sh.show_message(str(measure), text_colour=color, scroll_speed=0.05)\n time.sleep(1)\n except Exception as e_displayText_exception:\n logger.error('cannot show message on senseHat: %s',\n e_displayText_exception)\n pictures('error')\n\n\ndef ephemISS():\n \"\"\"\n Ephem module funciton\n \"\"\"\n try:\n nameOfStation = 'ISS (ZARYA)'\n firstLine = (\n '1 25544U 98067A 19027.58387731 .00001656 00000-0 33287-4 0 9996'\n )\n secondLine = (\n '2 25544 51.6426 340.5081 0004927 322.6857 20.8029 15.53199695153409'\n )\n stationISS = ephem.readtle(nameOfStation, firstLine, secondLine)\n stationISS.compute()\n if stationISS.sublat < 0:\n return 'ISS is in Southern hemisphere'\n else:\n return 'ISS is in Northern hemisphere'\n except Exception as e_ISS_eception:\n global anyException\n anyException = True\n logger.error('Cannot get EPHEM resulit eroor:%s', str(e_ISS_eception))\n pictures('error')\n return 'ERROR'\n\n\nclass timer:\n \"\"\"\n Timer obiect give us easier ability to count time and get better logs\n\n \"\"\"\n\n def __init__(self):\n self.startTime = datetime.datetime.now()\n self.endTime = self.startTime + datetime.timedelta(minutes=programTime)\n\n def minsOfRun(self):\n return str(datetime.datetime.now() - self.startTime)[:7]\n\n def now(self):\n self.time = datetime.datetime.now()\n return self.time\n\n def nowForLog(self):\n return str(datetime.datetime.now())[11:19]\n\n\n<mask token>\n",
"step-3": "<mask token>\nanyException = False\nprogramTime = 175\n\n\ndef setLoggingFile():\n \"\"\"\n This function will setup a logger and logfile\n \"\"\"\n try:\n dirPath = os.path.dirname(os.path.realpath(__file__))\n dirFiles = os.listdir(dirPath)\n for itemNr in range(len(dirFiles)):\n nameOfFile = 'data0' + str(itemNr + 1) + '.csv'\n if nameOfFile == 'data05.csv':\n nameOfFile = 'data01.csv'\n break\n if nameOfFile in dirFiles:\n print('this file exsist' + str(nameOfFile))\n else:\n break\n except Exception as dummy:\n global anyException\n anyException = True\n nameOfFile = 'data01.csv'\n logzero.logfile(dirPath + '/' + nameOfFile)\n print(dirPath + '/' + nameOfFile)\n formatter = logging.Formatter(\n '_%(levelname)s_,line: %(lineno)d, %(message)s')\n logzero.formatter(formatter)\n\n\ndef isItOversized():\n \"\"\"\n This function will check storage used to be sure files weight are less than 3gb\n \"\"\"\n try:\n dirPath = os.path.dirname(os.path.realpath(__file__))\n dirFiles = os.listdir(dirPath)\n filesSize = 0\n for file in dirFiles:\n filesSize += os.stat(file).st_size\n if filesSize < 3221225472:\n return False\n else:\n return True\n except Exception as e_oversizedFun_ecxeption:\n global anyException\n anyException = True\n logger.error('Time from start: %s,Time is: %s,ERROR: %s', timer1.\n minsOfRun(), timer1.nowForLog(), str(e_oversizedFun_ecxeption))\n pictures('error')\n return False\n\n\ndef measure(whatToMeasure):\n \"\"\"\n This function will measure temperature, humidity and pressure\n \"\"\"\n temp = 0\n hum = 0\n press = 0\n failed = 0\n try:\n if whatToMeasure == 'temp':\n print('TEMP MEASURE:')\n for dummy in range(10):\n time.sleep(0.2)\n while failed < 5:\n tempNow = sh.get_temperature()\n print(tempNow)\n if tempNow != 0:\n break\n else:\n failed += 1\n temp += tempNow\n if failed < 5:\n temp /= 10\n temp = round(temp, 2)\n print('MEASURED TEMP IS: ' + str(temp))\n else:\n temp = 'ERROR'\n return temp\n if whatToMeasure == 'hum':\n print('HUM MEASURE:')\n for dummy in range(10):\n time.sleep(0.2)\n while failed < 5:\n humNow = sh.get_humidity()\n print(humNow)\n if humNow != 0:\n break\n else:\n failed += 1\n hum += humNow\n if failed < 5:\n hum /= 10\n hum = round(hum, 2)\n print('MEASURED HUM IS: ' + str(hum))\n else:\n hum = 'ERROR'\n return hum\n if whatToMeasure == 'press':\n print('PRESS MEASURE:')\n for dummy in range(10):\n time.sleep(0.2)\n while failed < 5:\n pressNow = sh.get_pressure()\n print(pressNow)\n if pressNow != 0:\n break\n else:\n failed += 1\n press += pressNow\n if failed < 5:\n press /= 10\n press = round(press, 2)\n print('MEASURED PRESS IS: ' + str(press))\n else:\n press = 'ERROR'\n return press\n except Exception as e_measure_eception:\n global anyException\n anyException = True\n logger.error('Time from start: %s,Time is: %s,ERROR: %s', timer1.\n minsOfRun(), timer1.nowForLog(), str(e_measure_eception))\n pictures('error')\n return 'ERROR'\n\n\ndef pictures(idImg):\n \"\"\"\n This function displays images on SnenseHat pixel matrix scrren\n \"\"\"\n try:\n rot = 0\n sh.set_rotation(rot)\n r = [50, 0, 0]\n g = [0, 50, 0]\n b = [0, 0, 50]\n p = [50, 0, 50]\n o = [0, 0, 0]\n w = [50, 50, 50]\n orientation = [0, 90, 180, 270]\n welcome_img = [o, o, w, w, w, w, o, o, o, w, w, w, w, w, w, o, w, w,\n w, w, w, w, w, w, w, w, w, w, w, w, w, w, r, r, r, r, r, r, r,\n r, r, o, r, r, r, r, o, r, o, r, r, r, r, r, r, o, o, o, r, r,\n r, r, o, o]\n wait_img = [g, g, g, g, g, g, g, g, o, g, o, o, o, o, g, o, o, o, g,\n o, o, g, o, o, o, o, o, g, g, o, o, o, o, o, o, g, g, o, o, o,\n o, o, g, g, g, g, o, o, o, g, g, g, g, g, g, o, g, g, g, g, g,\n g, g, g]\n temp_img1 = [o, o, o, r, b, r, o, o, o, o, o, r, b, r, o, o, o, o,\n o, r, b, r, o, o, o, o, o, r, b, r, o, o, o, o, o, r, b, r, o,\n o, o, o, r, b, b, b, r, o, o, o, r, b, b, b, r, o, o, o, o, r,\n r, r, o, o]\n temp_img2 = [o, o, o, r, r, r, o, o, o, o, o, r, b, r, o, o, o, o,\n o, r, b, r, o, o, o, o, o, r, b, r, o, o, o, o, o, r, b, r, o,\n o, o, o, r, b, b, b, r, o, o, o, r, b, b, b, r, o, o, o, o, r,\n r, r, o, o]\n temp_img3 = [o, o, o, r, r, r, o, o, o, o, o, r, r, r, o, o, o, o,\n o, r, b, r, o, o, o, o, o, r, b, r, o, o, o, o, o, r, b, r, o,\n o, o, o, r, b, b, b, r, o, o, o, r, b, b, b, r, o, o, o, o, r,\n r, r, o, o]\n temp_img4 = [o, o, o, r, r, r, o, o, o, o, o, r, r, r, o, o, o, o,\n o, r, r, r, o, o, o, o, o, r, b, r, o, o, o, o, o, r, b, r, o,\n o, o, o, r, b, b, b, r, o, o, o, r, b, b, b, r, o, o, o, o, r,\n r, r, o, o]\n temp_img5 = [o, o, o, r, r, r, o, o, o, o, o, r, r, r, o, o, o, o,\n o, r, r, r, o, o, o, o, o, r, r, r, o, o, o, o, o, r, b, r, o,\n o, o, o, r, b, b, b, r, o, o, o, r, b, b, b, r, o, o, o, o, r,\n r, r, o, o]\n temp_img6 = [o, o, o, r, r, r, o, o, o, o, o, r, r, r, o, o, o, o,\n o, r, r, r, o, o, o, o, o, r, r, r, o, o, o, o, o, r, r, r, o,\n o, o, o, r, b, b, b, r, o, o, o, r, b, b, b, r, o, o, o, o, r,\n r, r, o, o]\n hum_img1 = [o, o, o, o, b, o, o, o, o, o, o, b, b, o, o, o, o, o, b,\n b, b, b, o, o, o, o, b, b, b, b, o, o, o, b, b, b, b, b, b, o,\n o, b, b, b, b, b, b, o, o, o, b, b, b, b, o, o, o, o, o, b, b,\n o, o, o]\n hum_img2 = [o, o, o, o, o, o, o, o, o, o, o, o, b, o, o, o, o, o, o,\n b, b, o, o, o, o, o, b, b, b, b, o, o, o, o, b, b, b, b, o, o,\n o, b, b, b, b, b, b, o, o, b, b, b, b, b, b, o, o, o, b, b, b,\n b, o, o]\n hum_img3 = [o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o,\n o, b, o, o, o, o, o, o, b, b, o, o, o, o, o, b, b, b, b, o, o,\n o, o, b, b, b, b, o, o, o, b, b, b, b, b, b, o, b, b, b, b, b,\n b, b, b]\n hum_img4 = [o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o,\n o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, b, o, o, o,\n o, o, o, b, b, o, o, o, o, o, b, b, b, b, o, o, b, b, b, b, b,\n b, b, b]\n hum_img5 = [o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o,\n o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o,\n o, o, o, o, o, o, o, o, b, b, b, b, b, b, b, b, b, b, b, b, b,\n b, b, b]\n press_img1 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, o, p, p, o, o, o, o, o, o, p, p, o, o, o, o, o, o,\n p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img2 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, p, o,\n o, o, o, o, o, p, p, p, o, o, o, o, o, p, p, p, o, o, o, o, o,\n p, p, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img3 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, p,\n o, o, o, o, o, p, p, p, p, o, o, o, o, p, p, p, p, o, o, o, o,\n p, o, p, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img4 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n p, o, o, o, o, p, p, p, p, p, o, o, o, p, p, p, p, p, o, o, o,\n p, o, o, p, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img5 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, p, o, o, o, p, p, p, p, p, p, o, o, p, p, p, p, p, p, o, o,\n p, o, o, o, p, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img6 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, p, o, o, p, p, p, p, p, p, p, o, p, p, p, p, p, p, p, o,\n p, o, o, o, o, p, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img7 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, p, o, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p,\n p, o, o, o, o, o, p, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img8 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, p, p, o, p, p, p, p, p, p, p, o, p, p, p, p, p, p,\n p, o, o, o, o, o, o, p, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img9 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, o, p, o, o, p, p, p, p, p, p, o, o, p, p, p, p, p,\n p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img10 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, o, p, o, o, o, p, p, p, p, p, o, o, o, p, p, p, p,\n p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img11 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, o, p, o, o, o, o, p, p, p, p, o, o, o, o, p, p, p,\n p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img12 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, o, p, o, o, o, o, o, p, p, p, o, o, o, o, o, p, p,\n p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img13 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, o, p, o, o, o, o, o, o, p, p, o, o, o, o, o, o, p,\n p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img14 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o,\n p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n working_array = [g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g,\n g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g,\n g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g,\n g, g, g, g, g]\n error_array = [r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r,\n r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r,\n r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r,\n r, r, r, r]\n end_array = [b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b,\n b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b,\n b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b,\n b, b, b, b]\n reset_array = [o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o,\n o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o,\n o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o,\n o, o, o, o]\n if idImg == 'welcome':\n sh.show_message('Welcome to PAPi', text_colour=w, scroll_speed=0.05\n )\n sh.show_message('Poland can ', text_colour=w, scroll_speed=0.05)\n sh.show_message('into space!', text_colour=r, scroll_speed=0.05)\n sh.set_rotation(180)\n sh.set_pixels(welcome_img)\n time.sleep(2)\n sh.set_rotation(0)\n temp_array = [temp_img1, temp_img2, temp_img3, temp_img4, temp_img5,\n temp_img6, temp_img5, temp_img4, temp_img3, temp_img2, temp_img1]\n if idImg == 'temp':\n for temp_anim in temp_array:\n sh.set_pixels(temp_anim)\n time.sleep(0.1)\n hum_array = [hum_img1, hum_img2, hum_img3, hum_img4, hum_img5]\n if idImg == 'hum':\n for hum_anim in hum_array:\n sh.set_pixels(hum_anim)\n time.sleep(0.5)\n press_array = [press_img1, press_img2, press_img3, press_img4,\n press_img5, press_img6, press_img7, press_img8, press_img9,\n press_img10, press_img11, press_img12, press_img13, press_img14]\n if idImg == 'press':\n for press_anim in press_array:\n sh.set_pixels(press_anim)\n time.sleep(0.1)\n if idImg == 'end':\n sh.set_pixels(end_array)\n time.sleep(0.3)\n if idImg == 'error':\n sh.set_pixels(error_array)\n time.sleep(0.5)\n if idImg == 'wait':\n sh.set_pixels(wait_img)\n for rot in orientation:\n sh.set_rotation(rot)\n time.sleep(0.2)\n if idImg == 'reset':\n sh.set_pixels(reset_array)\n if (idImg != 'wait' and idImg != 'end' and idImg != 'error' and \n idImg != 'reset'):\n sh.set_pixels(working_array)\n except Exception as e_display_eception:\n global anyException\n anyException = True\n logger.error('Time from start: %s,Time is: %s,ERROR: %s', timer1.\n minsOfRun(), timer1.nowForLog(), str(e_display_eception))\n return 'ERROR'\n\n\ndef showInfo(measure):\n \"\"\"\n that function gets random color and displays measure parameter on SenseHat screen\n \"\"\"\n try:\n r = [50, 0, 0]\n g = [0, 50, 0]\n b = [0, 0, 50]\n p = [50, 0, 50]\n c = [0, 50, 50]\n u = [50, 50, 0]\n textColours = [r, g, b, p, c, u]\n color = textColours[random.randint(0, len(textColours) - 1)]\n sh.set_rotation(0)\n sh.show_message(str(measure), text_colour=color, scroll_speed=0.05)\n time.sleep(1)\n except Exception as e_displayText_exception:\n logger.error('cannot show message on senseHat: %s',\n e_displayText_exception)\n pictures('error')\n\n\ndef ephemISS():\n \"\"\"\n Ephem module funciton\n \"\"\"\n try:\n nameOfStation = 'ISS (ZARYA)'\n firstLine = (\n '1 25544U 98067A 19027.58387731 .00001656 00000-0 33287-4 0 9996'\n )\n secondLine = (\n '2 25544 51.6426 340.5081 0004927 322.6857 20.8029 15.53199695153409'\n )\n stationISS = ephem.readtle(nameOfStation, firstLine, secondLine)\n stationISS.compute()\n if stationISS.sublat < 0:\n return 'ISS is in Southern hemisphere'\n else:\n return 'ISS is in Northern hemisphere'\n except Exception as e_ISS_eception:\n global anyException\n anyException = True\n logger.error('Cannot get EPHEM resulit eroor:%s', str(e_ISS_eception))\n pictures('error')\n return 'ERROR'\n\n\nclass timer:\n \"\"\"\n Timer obiect give us easier ability to count time and get better logs\n\n \"\"\"\n\n def __init__(self):\n self.startTime = datetime.datetime.now()\n self.endTime = self.startTime + datetime.timedelta(minutes=programTime)\n\n def minsOfRun(self):\n return str(datetime.datetime.now() - self.startTime)[:7]\n\n def now(self):\n self.time = datetime.datetime.now()\n return self.time\n\n def nowForLog(self):\n return str(datetime.datetime.now())[11:19]\n\n\ntry:\n timer1 = timer()\n setLoggingFile()\n logger.debug(\n 'starting program,time is: %s, program will be running for: %smin',\n timer1.now(), programTime)\n logger.info('EPHEM: %s', ephemISS())\n sh = SenseHat()\n time.sleep(2)\n pictures('welcome')\nexcept Exception as e_init_exception:\n anyException = True\n logger.error('INIT ERROR: %s', str(e_init_exception))\naverageTemp = 0\naverageHum = 0\naveragePress = 0\nrounds = 0\nlowestTemp = 9999999999\nlowestHum = 9999999999\nlowestPress = 9999999999\nhigestTemp = 0\nhigestHum = 0\nhigestPress = 0\nwhile timer1.now() < timer1.endTime:\n try:\n if isItOversized():\n logger.debug('OVERSIZED EXITING')\n break\n rounds += 1\n logger.debug('Start round: %s,Time from start: %s', rounds, timer1.\n minsOfRun())\n pictures('wait')\n showInfo('round: ' + str(rounds))\n pictures('temp')\n tempNowIs = measure('temp')\n showInfo(str(tempNowIs) + \" 'C\")\n pictures('hum')\n humNowIs = measure('hum')\n showInfo(str(humNowIs) + ' %')\n pictures('press')\n pressNowIs = measure('press')\n showInfo(str(pressNowIs) + ' mbar')\n logger.info(\n 'Time is: %s,Time from start: %s,Temp: %s,Hum: %s,Press: %s',\n timer1.nowForLog(), timer1.minsOfRun(), tempNowIs, humNowIs,\n pressNowIs)\n if (tempNowIs != 'ERROR' and humNowIs != 'ERROR' and pressNowIs !=\n 'ERROR'):\n averageTemp += tempNowIs\n averageHum += humNowIs\n averagePress += pressNowIs\n if tempNowIs < lowestTemp:\n lowestTemp = tempNowIs\n if tempNowIs > higestTemp:\n higestTemp = tempNowIs\n if humNowIs < lowestHum:\n lowestHum = humNowIs\n if humNowIs > higestHum:\n higestHum = humNowIs\n if pressNowIs < lowestPress:\n lowestPress = pressNowIs\n if pressNowIs > higestPress:\n higestPress = pressNowIs\n else:\n rounds -= 1\n except Exception as e_main_exception:\n anyException = True\n logger.error('Time from start: %s,Time is: %s,ERROR: %s', timer1.\n minsOfRun(), timer1.nowForLog(), str(e_main_exception))\n pictures('error')\ntry:\n pictures('end')\n averageTemp /= rounds\n averageHum /= rounds\n averagePress /= rounds\n averageTemp = round(averageTemp, 2)\n averageHum = round(averageHum, 2)\n averagePress = round(averagePress, 2)\n lowestTemp = round(lowestTemp, 2)\n lowestHum = round(lowestHum, 2)\n lowestPress = round(lowestPress, 2)\n higestTemp = round(higestTemp, 2)\n higestHum = round(higestHum, 2)\n higestPress = round(higestPress, 2)\n logger.info('average Temp: %s,average hum: %s,average press: %s',\n averageTemp, averageHum, averagePress)\n logger.info(\n 'Temp: highest: %s lowest: %s ,Hum: highest: %s lowest: %s ,Press: highest: %s lowest: %s'\n , higestTemp, lowestTemp, higestHum, lowestHum, higestPress,\n lowestPress)\n logger.debug(\n 'code succesfully exited after: %s,expected time: %smin, time of end is: %s, problems: %s'\n , timer1.minsOfRun(), programTime, timer1.now(), anyException)\n logger.debug('program ended with %s rounds of collecting data', rounds)\n time.sleep(3)\n pictures('reset')\nexcept Exception as e_sumUp_exception:\n print('CANNOT SUMUP DATA, EXITING')\n logger.error('SUMUP ERROR: %s', str(e_sumUp_exception))\n exit()\n",
"step-4": "import time\nimport datetime\nimport math\nimport os\nimport random\nimport logzero\nimport logging\nfrom logzero import logger\nfrom sense_hat import SenseHat\nimport ephem\nanyException = False\nprogramTime = 175\n\n\ndef setLoggingFile():\n \"\"\"\n This function will setup a logger and logfile\n \"\"\"\n try:\n dirPath = os.path.dirname(os.path.realpath(__file__))\n dirFiles = os.listdir(dirPath)\n for itemNr in range(len(dirFiles)):\n nameOfFile = 'data0' + str(itemNr + 1) + '.csv'\n if nameOfFile == 'data05.csv':\n nameOfFile = 'data01.csv'\n break\n if nameOfFile in dirFiles:\n print('this file exsist' + str(nameOfFile))\n else:\n break\n except Exception as dummy:\n global anyException\n anyException = True\n nameOfFile = 'data01.csv'\n logzero.logfile(dirPath + '/' + nameOfFile)\n print(dirPath + '/' + nameOfFile)\n formatter = logging.Formatter(\n '_%(levelname)s_,line: %(lineno)d, %(message)s')\n logzero.formatter(formatter)\n\n\ndef isItOversized():\n \"\"\"\n This function will check storage used to be sure files weight are less than 3gb\n \"\"\"\n try:\n dirPath = os.path.dirname(os.path.realpath(__file__))\n dirFiles = os.listdir(dirPath)\n filesSize = 0\n for file in dirFiles:\n filesSize += os.stat(file).st_size\n if filesSize < 3221225472:\n return False\n else:\n return True\n except Exception as e_oversizedFun_ecxeption:\n global anyException\n anyException = True\n logger.error('Time from start: %s,Time is: %s,ERROR: %s', timer1.\n minsOfRun(), timer1.nowForLog(), str(e_oversizedFun_ecxeption))\n pictures('error')\n return False\n\n\ndef measure(whatToMeasure):\n \"\"\"\n This function will measure temperature, humidity and pressure\n \"\"\"\n temp = 0\n hum = 0\n press = 0\n failed = 0\n try:\n if whatToMeasure == 'temp':\n print('TEMP MEASURE:')\n for dummy in range(10):\n time.sleep(0.2)\n while failed < 5:\n tempNow = sh.get_temperature()\n print(tempNow)\n if tempNow != 0:\n break\n else:\n failed += 1\n temp += tempNow\n if failed < 5:\n temp /= 10\n temp = round(temp, 2)\n print('MEASURED TEMP IS: ' + str(temp))\n else:\n temp = 'ERROR'\n return temp\n if whatToMeasure == 'hum':\n print('HUM MEASURE:')\n for dummy in range(10):\n time.sleep(0.2)\n while failed < 5:\n humNow = sh.get_humidity()\n print(humNow)\n if humNow != 0:\n break\n else:\n failed += 1\n hum += humNow\n if failed < 5:\n hum /= 10\n hum = round(hum, 2)\n print('MEASURED HUM IS: ' + str(hum))\n else:\n hum = 'ERROR'\n return hum\n if whatToMeasure == 'press':\n print('PRESS MEASURE:')\n for dummy in range(10):\n time.sleep(0.2)\n while failed < 5:\n pressNow = sh.get_pressure()\n print(pressNow)\n if pressNow != 0:\n break\n else:\n failed += 1\n press += pressNow\n if failed < 5:\n press /= 10\n press = round(press, 2)\n print('MEASURED PRESS IS: ' + str(press))\n else:\n press = 'ERROR'\n return press\n except Exception as e_measure_eception:\n global anyException\n anyException = True\n logger.error('Time from start: %s,Time is: %s,ERROR: %s', timer1.\n minsOfRun(), timer1.nowForLog(), str(e_measure_eception))\n pictures('error')\n return 'ERROR'\n\n\ndef pictures(idImg):\n \"\"\"\n This function displays images on SnenseHat pixel matrix scrren\n \"\"\"\n try:\n rot = 0\n sh.set_rotation(rot)\n r = [50, 0, 0]\n g = [0, 50, 0]\n b = [0, 0, 50]\n p = [50, 0, 50]\n o = [0, 0, 0]\n w = [50, 50, 50]\n orientation = [0, 90, 180, 270]\n welcome_img = [o, o, w, w, w, w, o, o, o, w, w, w, w, w, w, o, w, w,\n w, w, w, w, w, w, w, w, w, w, w, w, w, w, r, r, r, r, r, r, r,\n r, r, o, r, r, r, r, o, r, o, r, r, r, r, r, r, o, o, o, r, r,\n r, r, o, o]\n wait_img = [g, g, g, g, g, g, g, g, o, g, o, o, o, o, g, o, o, o, g,\n o, o, g, o, o, o, o, o, g, g, o, o, o, o, o, o, g, g, o, o, o,\n o, o, g, g, g, g, o, o, o, g, g, g, g, g, g, o, g, g, g, g, g,\n g, g, g]\n temp_img1 = [o, o, o, r, b, r, o, o, o, o, o, r, b, r, o, o, o, o,\n o, r, b, r, o, o, o, o, o, r, b, r, o, o, o, o, o, r, b, r, o,\n o, o, o, r, b, b, b, r, o, o, o, r, b, b, b, r, o, o, o, o, r,\n r, r, o, o]\n temp_img2 = [o, o, o, r, r, r, o, o, o, o, o, r, b, r, o, o, o, o,\n o, r, b, r, o, o, o, o, o, r, b, r, o, o, o, o, o, r, b, r, o,\n o, o, o, r, b, b, b, r, o, o, o, r, b, b, b, r, o, o, o, o, r,\n r, r, o, o]\n temp_img3 = [o, o, o, r, r, r, o, o, o, o, o, r, r, r, o, o, o, o,\n o, r, b, r, o, o, o, o, o, r, b, r, o, o, o, o, o, r, b, r, o,\n o, o, o, r, b, b, b, r, o, o, o, r, b, b, b, r, o, o, o, o, r,\n r, r, o, o]\n temp_img4 = [o, o, o, r, r, r, o, o, o, o, o, r, r, r, o, o, o, o,\n o, r, r, r, o, o, o, o, o, r, b, r, o, o, o, o, o, r, b, r, o,\n o, o, o, r, b, b, b, r, o, o, o, r, b, b, b, r, o, o, o, o, r,\n r, r, o, o]\n temp_img5 = [o, o, o, r, r, r, o, o, o, o, o, r, r, r, o, o, o, o,\n o, r, r, r, o, o, o, o, o, r, r, r, o, o, o, o, o, r, b, r, o,\n o, o, o, r, b, b, b, r, o, o, o, r, b, b, b, r, o, o, o, o, r,\n r, r, o, o]\n temp_img6 = [o, o, o, r, r, r, o, o, o, o, o, r, r, r, o, o, o, o,\n o, r, r, r, o, o, o, o, o, r, r, r, o, o, o, o, o, r, r, r, o,\n o, o, o, r, b, b, b, r, o, o, o, r, b, b, b, r, o, o, o, o, r,\n r, r, o, o]\n hum_img1 = [o, o, o, o, b, o, o, o, o, o, o, b, b, o, o, o, o, o, b,\n b, b, b, o, o, o, o, b, b, b, b, o, o, o, b, b, b, b, b, b, o,\n o, b, b, b, b, b, b, o, o, o, b, b, b, b, o, o, o, o, o, b, b,\n o, o, o]\n hum_img2 = [o, o, o, o, o, o, o, o, o, o, o, o, b, o, o, o, o, o, o,\n b, b, o, o, o, o, o, b, b, b, b, o, o, o, o, b, b, b, b, o, o,\n o, b, b, b, b, b, b, o, o, b, b, b, b, b, b, o, o, o, b, b, b,\n b, o, o]\n hum_img3 = [o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o,\n o, b, o, o, o, o, o, o, b, b, o, o, o, o, o, b, b, b, b, o, o,\n o, o, b, b, b, b, o, o, o, b, b, b, b, b, b, o, b, b, b, b, b,\n b, b, b]\n hum_img4 = [o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o,\n o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, b, o, o, o,\n o, o, o, b, b, o, o, o, o, o, b, b, b, b, o, o, b, b, b, b, b,\n b, b, b]\n hum_img5 = [o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o,\n o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o,\n o, o, o, o, o, o, o, o, b, b, b, b, b, b, b, b, b, b, b, b, b,\n b, b, b]\n press_img1 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, o, p, p, o, o, o, o, o, o, p, p, o, o, o, o, o, o,\n p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img2 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, p, o,\n o, o, o, o, o, p, p, p, o, o, o, o, o, p, p, p, o, o, o, o, o,\n p, p, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img3 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, p,\n o, o, o, o, o, p, p, p, p, o, o, o, o, p, p, p, p, o, o, o, o,\n p, o, p, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img4 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n p, o, o, o, o, p, p, p, p, p, o, o, o, p, p, p, p, p, o, o, o,\n p, o, o, p, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img5 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, p, o, o, o, p, p, p, p, p, p, o, o, p, p, p, p, p, p, o, o,\n p, o, o, o, p, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img6 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, p, o, o, p, p, p, p, p, p, p, o, p, p, p, p, p, p, p, o,\n p, o, o, o, o, p, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img7 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, p, o, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p, p,\n p, o, o, o, o, o, p, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img8 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, p, p, o, p, p, p, p, p, p, p, o, p, p, p, p, p, p,\n p, o, o, o, o, o, o, p, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img9 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, o, p, o, o, p, p, p, p, p, p, o, o, p, p, p, p, p,\n p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img10 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, o, p, o, o, o, p, p, p, p, p, o, o, o, p, p, p, p,\n p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img11 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, o, p, o, o, o, o, p, p, p, p, o, o, o, o, p, p, p,\n p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img12 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, o, p, o, o, o, o, o, p, p, p, o, o, o, o, o, p, p,\n p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img13 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, o, p, o, o, o, o, o, o, p, p, o, o, o, o, o, o, p,\n p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n press_img14 = [o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o,\n o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o,\n p, o, o, o, o, o, o, o, p, o, o, o, o, o, o, o, p, o, o, o, o,\n o, o, o, p]\n working_array = [g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g,\n g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g,\n g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g, g,\n g, g, g, g, g]\n error_array = [r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r,\n r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r,\n r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r, r,\n r, r, r, r]\n end_array = [b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b,\n b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b,\n b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b,\n b, b, b, b]\n reset_array = [o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o,\n o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o,\n o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o, o,\n o, o, o, o]\n if idImg == 'welcome':\n sh.show_message('Welcome to PAPi', text_colour=w, scroll_speed=0.05\n )\n sh.show_message('Poland can ', text_colour=w, scroll_speed=0.05)\n sh.show_message('into space!', text_colour=r, scroll_speed=0.05)\n sh.set_rotation(180)\n sh.set_pixels(welcome_img)\n time.sleep(2)\n sh.set_rotation(0)\n temp_array = [temp_img1, temp_img2, temp_img3, temp_img4, temp_img5,\n temp_img6, temp_img5, temp_img4, temp_img3, temp_img2, temp_img1]\n if idImg == 'temp':\n for temp_anim in temp_array:\n sh.set_pixels(temp_anim)\n time.sleep(0.1)\n hum_array = [hum_img1, hum_img2, hum_img3, hum_img4, hum_img5]\n if idImg == 'hum':\n for hum_anim in hum_array:\n sh.set_pixels(hum_anim)\n time.sleep(0.5)\n press_array = [press_img1, press_img2, press_img3, press_img4,\n press_img5, press_img6, press_img7, press_img8, press_img9,\n press_img10, press_img11, press_img12, press_img13, press_img14]\n if idImg == 'press':\n for press_anim in press_array:\n sh.set_pixels(press_anim)\n time.sleep(0.1)\n if idImg == 'end':\n sh.set_pixels(end_array)\n time.sleep(0.3)\n if idImg == 'error':\n sh.set_pixels(error_array)\n time.sleep(0.5)\n if idImg == 'wait':\n sh.set_pixels(wait_img)\n for rot in orientation:\n sh.set_rotation(rot)\n time.sleep(0.2)\n if idImg == 'reset':\n sh.set_pixels(reset_array)\n if (idImg != 'wait' and idImg != 'end' and idImg != 'error' and \n idImg != 'reset'):\n sh.set_pixels(working_array)\n except Exception as e_display_eception:\n global anyException\n anyException = True\n logger.error('Time from start: %s,Time is: %s,ERROR: %s', timer1.\n minsOfRun(), timer1.nowForLog(), str(e_display_eception))\n return 'ERROR'\n\n\ndef showInfo(measure):\n \"\"\"\n that function gets random color and displays measure parameter on SenseHat screen\n \"\"\"\n try:\n r = [50, 0, 0]\n g = [0, 50, 0]\n b = [0, 0, 50]\n p = [50, 0, 50]\n c = [0, 50, 50]\n u = [50, 50, 0]\n textColours = [r, g, b, p, c, u]\n color = textColours[random.randint(0, len(textColours) - 1)]\n sh.set_rotation(0)\n sh.show_message(str(measure), text_colour=color, scroll_speed=0.05)\n time.sleep(1)\n except Exception as e_displayText_exception:\n logger.error('cannot show message on senseHat: %s',\n e_displayText_exception)\n pictures('error')\n\n\ndef ephemISS():\n \"\"\"\n Ephem module funciton\n \"\"\"\n try:\n nameOfStation = 'ISS (ZARYA)'\n firstLine = (\n '1 25544U 98067A 19027.58387731 .00001656 00000-0 33287-4 0 9996'\n )\n secondLine = (\n '2 25544 51.6426 340.5081 0004927 322.6857 20.8029 15.53199695153409'\n )\n stationISS = ephem.readtle(nameOfStation, firstLine, secondLine)\n stationISS.compute()\n if stationISS.sublat < 0:\n return 'ISS is in Southern hemisphere'\n else:\n return 'ISS is in Northern hemisphere'\n except Exception as e_ISS_eception:\n global anyException\n anyException = True\n logger.error('Cannot get EPHEM resulit eroor:%s', str(e_ISS_eception))\n pictures('error')\n return 'ERROR'\n\n\nclass timer:\n \"\"\"\n Timer obiect give us easier ability to count time and get better logs\n\n \"\"\"\n\n def __init__(self):\n self.startTime = datetime.datetime.now()\n self.endTime = self.startTime + datetime.timedelta(minutes=programTime)\n\n def minsOfRun(self):\n return str(datetime.datetime.now() - self.startTime)[:7]\n\n def now(self):\n self.time = datetime.datetime.now()\n return self.time\n\n def nowForLog(self):\n return str(datetime.datetime.now())[11:19]\n\n\ntry:\n timer1 = timer()\n setLoggingFile()\n logger.debug(\n 'starting program,time is: %s, program will be running for: %smin',\n timer1.now(), programTime)\n logger.info('EPHEM: %s', ephemISS())\n sh = SenseHat()\n time.sleep(2)\n pictures('welcome')\nexcept Exception as e_init_exception:\n anyException = True\n logger.error('INIT ERROR: %s', str(e_init_exception))\naverageTemp = 0\naverageHum = 0\naveragePress = 0\nrounds = 0\nlowestTemp = 9999999999\nlowestHum = 9999999999\nlowestPress = 9999999999\nhigestTemp = 0\nhigestHum = 0\nhigestPress = 0\nwhile timer1.now() < timer1.endTime:\n try:\n if isItOversized():\n logger.debug('OVERSIZED EXITING')\n break\n rounds += 1\n logger.debug('Start round: %s,Time from start: %s', rounds, timer1.\n minsOfRun())\n pictures('wait')\n showInfo('round: ' + str(rounds))\n pictures('temp')\n tempNowIs = measure('temp')\n showInfo(str(tempNowIs) + \" 'C\")\n pictures('hum')\n humNowIs = measure('hum')\n showInfo(str(humNowIs) + ' %')\n pictures('press')\n pressNowIs = measure('press')\n showInfo(str(pressNowIs) + ' mbar')\n logger.info(\n 'Time is: %s,Time from start: %s,Temp: %s,Hum: %s,Press: %s',\n timer1.nowForLog(), timer1.minsOfRun(), tempNowIs, humNowIs,\n pressNowIs)\n if (tempNowIs != 'ERROR' and humNowIs != 'ERROR' and pressNowIs !=\n 'ERROR'):\n averageTemp += tempNowIs\n averageHum += humNowIs\n averagePress += pressNowIs\n if tempNowIs < lowestTemp:\n lowestTemp = tempNowIs\n if tempNowIs > higestTemp:\n higestTemp = tempNowIs\n if humNowIs < lowestHum:\n lowestHum = humNowIs\n if humNowIs > higestHum:\n higestHum = humNowIs\n if pressNowIs < lowestPress:\n lowestPress = pressNowIs\n if pressNowIs > higestPress:\n higestPress = pressNowIs\n else:\n rounds -= 1\n except Exception as e_main_exception:\n anyException = True\n logger.error('Time from start: %s,Time is: %s,ERROR: %s', timer1.\n minsOfRun(), timer1.nowForLog(), str(e_main_exception))\n pictures('error')\ntry:\n pictures('end')\n averageTemp /= rounds\n averageHum /= rounds\n averagePress /= rounds\n averageTemp = round(averageTemp, 2)\n averageHum = round(averageHum, 2)\n averagePress = round(averagePress, 2)\n lowestTemp = round(lowestTemp, 2)\n lowestHum = round(lowestHum, 2)\n lowestPress = round(lowestPress, 2)\n higestTemp = round(higestTemp, 2)\n higestHum = round(higestHum, 2)\n higestPress = round(higestPress, 2)\n logger.info('average Temp: %s,average hum: %s,average press: %s',\n averageTemp, averageHum, averagePress)\n logger.info(\n 'Temp: highest: %s lowest: %s ,Hum: highest: %s lowest: %s ,Press: highest: %s lowest: %s'\n , higestTemp, lowestTemp, higestHum, lowestHum, higestPress,\n lowestPress)\n logger.debug(\n 'code succesfully exited after: %s,expected time: %smin, time of end is: %s, problems: %s'\n , timer1.minsOfRun(), programTime, timer1.now(), anyException)\n logger.debug('program ended with %s rounds of collecting data', rounds)\n time.sleep(3)\n pictures('reset')\nexcept Exception as e_sumUp_exception:\n print('CANNOT SUMUP DATA, EXITING')\n logger.error('SUMUP ERROR: %s', str(e_sumUp_exception))\n exit()\n",
"step-5": "import time\r\nimport datetime\r\nimport math\r\nimport os\r\nimport random\r\nimport logzero\r\nimport logging\r\nfrom logzero import logger\r\nfrom sense_hat import SenseHat\r\nimport ephem\r\nanyException = False\r\n# program Time is here for easy acces (in minutes)\r\nprogramTime = 175\r\n# 2:55 min of runtime\r\n\r\n# ____________________________\r\n# DEFINE FUNCTIONS\r\n# ____________________________\r\n\r\ndef setLoggingFile():\r\n '''\r\n This function will setup a logger and logfile\r\n '''\r\n # It will create a data01.csv file if it does not exist, data02.csv if previous exist etc\r\n # but when data01.csv data02.csv data03.csv data04.csv data05.csv exist it will overwrite the data01.csv file \r\n\r\n try:\r\n # set dirPath\r\n dirPath = os.path.dirname(os.path.realpath(__file__))\r\n # set dir filenames \r\n dirFiles = os.listdir(dirPath)\r\n for itemNr in range(len(dirFiles)):\r\n nameOfFile = 'data0'+str(itemNr+1)+\".csv\"\r\n if nameOfFile =='data05.csv':\r\n nameOfFile ='data01.csv'\r\n break\r\n if nameOfFile in dirFiles:\r\n print('this file exsist' + str(nameOfFile))\r\n else:\r\n break\r\n\r\n # Handle the Exception\r\n except Exception as dummy:\r\n # change global variable anyException to True, it will be logged at the end of run\r\n global anyException\r\n anyException = True \r\n # set namefile to default one\r\n nameOfFile = 'data01.csv'\r\n # set logfile and custom formatter\r\n logzero.logfile(dirPath+\"/\"+nameOfFile)\r\n print(dirPath+\"/\"+nameOfFile)\r\n formatter = logging.Formatter('_%(levelname)s_,line: %(lineno)d, %(message)s')\r\n logzero.formatter(formatter)\r\n\r\ndef isItOversized():\r\n '''\r\n This function will check storage used to be sure files weight are less than 3gb\r\n '''\r\n try:\r\n # set dirPath\r\n dirPath = os.path.dirname(os.path.realpath(__file__))\r\n # set dir filenames \r\n dirFiles = os.listdir(dirPath)\r\n # check files name\r\n filesSize = 0\r\n # add all files size to variable\r\n for file in dirFiles:\r\n filesSize+=os.stat(file).st_size\r\n # check that filesSize variable is less than 3221225472 bites which is 3gb\r\n # return False if it is smaller and return True when it is oversized\r\n if filesSize < 3221225472:\r\n return False\r\n else:\r\n return True\r\n # Handle the exception as default => not oversized \r\n except Exception as e_oversizedFun_ecxeption:\r\n global anyException\r\n anyException = True \r\n logger.error('Time from start: %s,Time is: %s,ERROR: %s',timer1.minsOfRun(),timer1.nowForLog(),str(e_oversizedFun_ecxeption))\r\n pictures('error')\r\n return False\r\n\r\ndef measure(whatToMeasure):\r\n '''\r\n This function will measure temperature, humidity and pressure\r\n '''\r\n\r\n # reset variables\r\n temp = 0\r\n hum = 0\r\n press = 0\r\n failed = 0\r\n \r\n try:\r\n # TEMPERATURE\r\n\r\n # Our code does 10 measurements\r\n # ignore the 0 - corrupted\r\n # and return the average result of measurements\r\n # if 5 measurements will be corrupted it will return an error\r\n\r\n if whatToMeasure == 'temp':\r\n print('TEMP MEASURE:')\r\n for dummy in range(10):\r\n # sleep betwen measurements\r\n time.sleep(0.2)\r\n while failed<5:\r\n # get Temperature from SenseHat\r\n tempNow=sh.get_temperature()\r\n print(tempNow)\r\n # if measured temp is okay break (while filed<5) loop\r\n # always measured temp give us floats with decimals\r\n # so if temp will be close to 0 measured temp will be for example 0.2312\r\n # so if measured temp is equal to 0 without decimals measured temp is corrupted\r\n if(tempNow != 0):\r\n break\r\n else:\r\n # if cant get temperature (temp=0) add 1 to failed variable\r\n failed+=1\r\n temp+=tempNow\r\n if(failed<5):\r\n # count the average data and round\r\n temp/=10\r\n temp=round(temp,2)\r\n print('MEASURED TEMP IS: '+str(temp))\r\n else:\r\n # error is returned as string becouse it will be displayed on screen (showInfo function)\r\n temp = 'ERROR'\r\n # return measured temp\r\n return temp\r\n\r\n # HUMidITY\r\n # same as temp but we measure humidity\r\n if whatToMeasure == 'hum':\r\n print('HUM MEASURE:')\r\n for dummy in range(10):\r\n time.sleep(0.2)\r\n while failed<5:\r\n humNow=sh.get_humidity()\r\n print(humNow)\r\n if(humNow != 0):\r\n break\r\n else:\r\n failed+=1\r\n hum+=humNow\r\n if(failed<5):\r\n hum/=10\r\n hum=round(hum,2)\r\n print('MEASURED HUM IS: '+str(hum))\r\n else:\r\n hum = 'ERROR'\r\n return hum\r\n # PRESSURE\r\n # same as temp but we measure pressure\r\n if whatToMeasure == 'press':\r\n print('PRESS MEASURE:')\r\n for dummy in range(10):\r\n time.sleep(0.2)\r\n while failed<5:\r\n pressNow=sh.get_pressure()\r\n print(pressNow)\r\n if(pressNow != 0):\r\n break\r\n else:\r\n failed+=1\r\n press+=pressNow\r\n if(failed<5):\r\n press/=10 \r\n press=round(press,2)\r\n print('MEASURED PRESS IS: '+str(press))\r\n else:\r\n press = 'ERROR'\r\n return press\r\n\r\n # handle exception and log it, display error image on screen\r\n except Exception as e_measure_eception:\r\n global anyException\r\n anyException = True\r\n logger.error('Time from start: %s,Time is: %s,ERROR: %s',timer1.minsOfRun(),timer1.nowForLog(),str(e_measure_eception))\r\n pictures('error')\r\n return \"ERROR\"\r\n\r\ndef pictures(idImg):\r\n '''\r\n This function displays images on SnenseHat pixel matrix scrren\r\n '''\r\n\r\n try:\r\n # Set display rotation on 0 deg\r\n rot = 0\r\n sh.set_rotation(rot)\r\n\r\n # Define some colors - keep brightness low \r\n r = [50,0,0]\r\n g = [0,50,0]\r\n b = [0,0,50]\r\n p = [50,0,50]\r\n o = [0,0,0]\r\n w = [50,50,50]\r\n orientation = [0,90,180,270]\r\n # Define an images \r\n \r\n welcome_img = [\r\n o,o,w,w,w,w,o,o,\r\n o,w,w,w,w,w,w,o,\r\n w,w,w,w,w,w,w,w,\r\n w,w,w,w,w,w,w,w,\r\n r,r,r,r,r,r,r,r,\r\n r,o,r,r,r,r,o,r,\r\n o,r,r,r,r,r,r,o,\r\n o,o,r,r,r,r,o,o,\r\n ]\r\n\r\n wait_img = [ \r\n g,g,g,g,g,g,g,g, \r\n o,g,o,o,o,o,g,o, \r\n o,o,g,o,o,g,o,o, \r\n o,o,o,g,g,o,o,o, \r\n o,o,o,g,g,o,o,o, \r\n o,o,g,g,g,g,o,o,\r\n o,g,g,g,g,g,g,o,\r\n g,g,g,g,g,g,g,g,\r\n ]\r\n\r\n temp_img1 = [\r\n o,o,o,r,b,r,o,o,\r\n o,o,o,r,b,r,o,o,\r\n o,o,o,r,b,r,o,o,\r\n o,o,o,r,b,r,o,o,\r\n o,o,o,r,b,r,o,o,\r\n o,o,r,b,b,b,r,o,\r\n o,o,r,b,b,b,r,o,\r\n o,o,o,r,r,r,o,o,\r\n ]\r\n \r\n temp_img2 = [\r\n o,o,o,r,r,r,o,o,\r\n o,o,o,r,b,r,o,o,\r\n o,o,o,r,b,r,o,o,\r\n o,o,o,r,b,r,o,o,\r\n o,o,o,r,b,r,o,o,\r\n o,o,r,b,b,b,r,o,\r\n o,o,r,b,b,b,r,o,\r\n o,o,o,r,r,r,o,o,\r\n ]\r\n\r\n temp_img3 = [\r\n o,o,o,r,r,r,o,o,\r\n o,o,o,r,r,r,o,o,\r\n o,o,o,r,b,r,o,o,\r\n o,o,o,r,b,r,o,o,\r\n o,o,o,r,b,r,o,o,\r\n o,o,r,b,b,b,r,o,\r\n o,o,r,b,b,b,r,o,\r\n o,o,o,r,r,r,o,o,\r\n ]\r\n\r\n temp_img4 = [\r\n o,o,o,r,r,r,o,o,\r\n o,o,o,r,r,r,o,o,\r\n o,o,o,r,r,r,o,o,\r\n o,o,o,r,b,r,o,o,\r\n o,o,o,r,b,r,o,o,\r\n o,o,r,b,b,b,r,o,\r\n o,o,r,b,b,b,r,o,\r\n o,o,o,r,r,r,o,o,\r\n ]\r\n \r\n temp_img5 = [\r\n o,o,o,r,r,r,o,o,\r\n o,o,o,r,r,r,o,o,\r\n o,o,o,r,r,r,o,o,\r\n o,o,o,r,r,r,o,o,\r\n o,o,o,r,b,r,o,o,\r\n o,o,r,b,b,b,r,o,\r\n o,o,r,b,b,b,r,o,\r\n o,o,o,r,r,r,o,o,\r\n ]\r\n\r\n temp_img6 = [\r\n o,o,o,r,r,r,o,o,\r\n o,o,o,r,r,r,o,o,\r\n o,o,o,r,r,r,o,o,\r\n o,o,o,r,r,r,o,o,\r\n o,o,o,r,r,r,o,o,\r\n o,o,r,b,b,b,r,o,\r\n o,o,r,b,b,b,r,o,\r\n o,o,o,r,r,r,o,o,\r\n ]\r\n\r\n hum_img1 = [\r\n o,o,o,o,b,o,o,o,\r\n o,o,o,b,b,o,o,o,\r\n o,o,b,b,b,b,o,o,\r\n o,o,b,b,b,b,o,o,\r\n o,b,b,b,b,b,b,o,\r\n o,b,b,b,b,b,b,o,\r\n o,o,b,b,b,b,o,o,\r\n o,o,o,b,b,o,o,o,\r\n ]\r\n\r\n hum_img2 = [\r\n o,o,o,o,o,o,o,o,\r\n o,o,o,o,b,o,o,o,\r\n o,o,o,b,b,o,o,o,\r\n o,o,b,b,b,b,o,o,\r\n o,o,b,b,b,b,o,o,\r\n o,b,b,b,b,b,b,o,\r\n o,b,b,b,b,b,b,o,\r\n o,o,b,b,b,b,o,o,\r\n ]\r\n \r\n hum_img3 = [\r\n o,o,o,o,o,o,o,o,\r\n o,o,o,o,o,o,o,o,\r\n o,o,o,o,b,o,o,o,\r\n o,o,o,b,b,o,o,o,\r\n o,o,b,b,b,b,o,o,\r\n o,o,b,b,b,b,o,o,\r\n o,b,b,b,b,b,b,o,\r\n b,b,b,b,b,b,b,b,\r\n ]\r\n \r\n hum_img4 = [\r\n o,o,o,o,o,o,o,o,\r\n o,o,o,o,o,o,o,o,\r\n o,o,o,o,o,o,o,o,\r\n o,o,o,o,o,o,o,o,\r\n o,o,o,o,b,o,o,o,\r\n o,o,o,b,b,o,o,o,\r\n o,o,b,b,b,b,o,o,\r\n b,b,b,b,b,b,b,b,\r\n ]\r\n \r\n hum_img5 = [\r\n o,o,o,o,o,o,o,o,\r\n o,o,o,o,o,o,o,o,\r\n o,o,o,o,o,o,o,o,\r\n o,o,o,o,o,o,o,o,\r\n o,o,o,o,o,o,o,o,\r\n o,o,o,o,o,o,o,o,\r\n b,b,b,b,b,b,b,b,\r\n b,b,b,b,b,b,b,b,\r\n ]\r\n\r\n press_img1 = [\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n p,o,o,o,o,o,o,p,\r\n p,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n ]\r\n\r\n press_img2 = [\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n p,o,o,o,o,o,o,p,\r\n p,p,o,o,o,o,o,p,\r\n p,p,o,o,o,o,o,p,\r\n p,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n ]\r\n\r\n press_img3 = [\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,p,o,o,o,o,o,p,\r\n p,p,p,o,o,o,o,p,\r\n p,p,p,o,o,o,o,p,\r\n o,p,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n ]\r\n\r\n press_img4 = [\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,p,o,o,o,o,p,\r\n p,p,p,p,o,o,o,p,\r\n p,p,p,p,o,o,o,p,\r\n o,o,p,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n ]\r\n\r\n press_img5 = [\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,p,o,o,o,p,\r\n p,p,p,p,p,o,o,p,\r\n p,p,p,p,p,o,o,p,\r\n o,o,o,p,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n ]\r\n\r\n press_img6 = [\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,p,o,o,p,\r\n p,p,p,p,p,p,o,p,\r\n p,p,p,p,p,p,o,p,\r\n o,o,o,o,p,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n ]\r\n\r\n press_img7 = [\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,p,o,p,\r\n p,p,p,p,p,p,p,p,\r\n p,p,p,p,p,p,p,p,\r\n o,o,o,o,o,p,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n ]\r\n\r\n press_img8 = [\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,p,p,\r\n o,p,p,p,p,p,p,p,\r\n o,p,p,p,p,p,p,p,\r\n o,o,o,o,o,o,p,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n ]\r\n\r\n press_img9 = [\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,p,p,p,p,p,p,\r\n o,o,p,p,p,p,p,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n ]\r\n\r\n press_img10 = [\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,p,p,p,p,p,\r\n o,o,o,p,p,p,p,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n ]\r\n\r\n press_img11 = [\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,p,p,p,p,\r\n o,o,o,o,p,p,p,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n ]\r\n\r\n press_img12 = [\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,p,p,p,\r\n o,o,o,o,o,p,p,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n ]\r\n\r\n press_img13 = [\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,p,p,\r\n o,o,o,o,o,o,p,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n ]\r\n\r\n press_img14 = [\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n o,o,o,o,o,o,o,p,\r\n ]\r\n working_array= [\r\n g,g,g,g,g,g,g,g,\r\n g,g,g,g,g,g,g,g,\r\n g,g,g,g,g,g,g,g,\r\n g,g,g,g,g,g,g,g,\r\n g,g,g,g,g,g,g,g,\r\n g,g,g,g,g,g,g,g,\r\n g,g,g,g,g,g,g,g,\r\n g,g,g,g,g,g,g,g,\r\n ]\r\n error_array=[\r\n r,r,r,r,r,r,r,r,\r\n r,r,r,r,r,r,r,r,\r\n r,r,r,r,r,r,r,r,\r\n r,r,r,r,r,r,r,r,\r\n r,r,r,r,r,r,r,r,\r\n r,r,r,r,r,r,r,r,\r\n r,r,r,r,r,r,r,r,\r\n r,r,r,r,r,r,r,r,\r\n ]\r\n end_array=[\r\n b,b,b,b,b,b,b,b,\r\n b,b,b,b,b,b,b,b,\r\n b,b,b,b,b,b,b,b,\r\n b,b,b,b,b,b,b,b,\r\n b,b,b,b,b,b,b,b,\r\n b,b,b,b,b,b,b,b,\r\n b,b,b,b,b,b,b,b,\r\n b,b,b,b,b,b,b,b,\r\n ]\r\n\r\n reset_array=[\r\n o,o,o,o,o,o,o,o,\r\n o,o,o,o,o,o,o,o,\r\n o,o,o,o,o,o,o,o,\r\n o,o,o,o,o,o,o,o,\r\n o,o,o,o,o,o,o,o,\r\n o,o,o,o,o,o,o,o,\r\n o,o,o,o,o,o,o,o,\r\n o,o,o,o,o,o,o,o,\r\n ]\r\n\r\n # routes to display diffrent images\r\n if idImg == 'welcome':\r\n sh.show_message('Welcome to PAPi', text_colour = w, scroll_speed=0.05)\r\n #This is a quote from one of a 'Country balls comics' \r\n sh.show_message('Poland can ', text_colour = w, scroll_speed=0.05)\r\n sh.show_message('into space!', text_colour = r, scroll_speed=0.05)\r\n #Polish flag is upside down becouse this picture represent one of the 'Country balls'\r\n #specificaly a 'Poland ball', which have colors of Monako hahahaha!\r\n sh.set_rotation(180)\r\n sh.set_pixels(welcome_img)\r\n time.sleep(2)\r\n sh.set_rotation(0)\r\n # store multiple arrays (images) in one array for do an animation\r\n temp_array = [temp_img1, temp_img2,temp_img3, temp_img4, temp_img5, temp_img6, temp_img5, temp_img4, temp_img3, temp_img2, temp_img1]\r\n if idImg == 'temp':\r\n for temp_anim in temp_array:\r\n sh.set_pixels(temp_anim)\r\n time.sleep(0.1)\r\n \r\n hum_array = [hum_img1, hum_img2, hum_img3, hum_img4, hum_img5]\r\n if idImg == 'hum':\r\n for hum_anim in hum_array:\r\n sh.set_pixels(hum_anim)\r\n time.sleep(0.5)\r\n\r\n press_array = [press_img1, press_img2, press_img3, press_img4, press_img5,press_img6, press_img7, press_img8, press_img9, press_img10, press_img11, press_img12, press_img13, press_img14]\r\n if idImg == 'press':\r\n for press_anim in press_array:\r\n sh.set_pixels(press_anim)\r\n time.sleep(0.1)\r\n\r\n if idImg == 'end':\r\n sh.set_pixels(end_array)\r\n time.sleep(0.3)\r\n\r\n if idImg == 'error':\r\n sh.set_pixels(error_array)\r\n time.sleep(0.5)\r\n if idImg == 'wait':\r\n sh.set_pixels(wait_img)\r\n # here we have animation by rotate the screen\r\n for rot in orientation:\r\n sh.set_rotation(rot)\r\n time.sleep(0.2)\r\n\r\n if idImg == 'reset':\r\n sh.set_pixels(reset_array)\r\n\r\n # if given parameter was not equal to wait and or error show us a greeen image until next function call\r\n # green image means that everything is okay\r\n if (idImg != 'wait' and idImg != 'end' and idImg != 'error' and idImg != 'reset'):\r\n sh.set_pixels(working_array)\r\n\r\n # Handle the exception\r\n except Exception as e_display_eception:\r\n global anyException\r\n anyException = True \r\n logger.error('Time from start: %s,Time is: %s,ERROR: %s',timer1.minsOfRun(),timer1.nowForLog(),str(e_display_eception))\r\n return \"ERROR\"\r\n\r\ndef showInfo(measure):\r\n '''\r\n that function gets random color and displays measure parameter on SenseHat screen\r\n '''\r\n try:\r\n # define RGB colors\r\n r = [50,0,0]\r\n g = [0,50,0]\r\n b = [0,0,50]\r\n p = [50,0,50]\r\n c = [0,50,50]\r\n u = [50,50,0]\r\n # store all colors in one array for easier draw\r\n textColours = [r,g,b,p,c,u]\r\n # get random color\r\n color = textColours[random.randint(0,len(textColours)-1)]\r\n # set rotation to 0 degrees\r\n sh.set_rotation(0)\r\n # show message\r\n sh.show_message(str(measure), text_colour =color, scroll_speed=0.05)\r\n # sleep one secound\r\n time.sleep(1)\r\n\r\n # handle the exception\r\n except Exception as e_displayText_exception:\r\n logger.error('cannot show message on senseHat: %s',e_displayText_exception)\r\n pictures('error')\r\ndef ephemISS():\r\n '''\r\n Ephem module funciton\r\n '''\r\n try:\r\n # ehem computing for logs\r\n # SOURCE = CELESTREAK.COM \r\n # DAY = 26.01.2019\r\n nameOfStation = 'ISS (ZARYA)'\r\n firstLine = '1 25544U 98067A 19027.58387731 .00001656 00000-0 33287-4 0 9996'\r\n secondLine = '2 25544 51.6426 340.5081 0004927 322.6857 20.8029 15.53199695153409'\r\n stationISS = ephem.readtle(nameOfStation,firstLine,secondLine)\r\n stationISS.compute()\r\n\r\n # calculate result\r\n if(stationISS.sublat)<0:\r\n return 'ISS is in Southern hemisphere'\r\n else:\r\n return 'ISS is in Northern hemisphere'\r\n # handle the exception\r\n except Exception as e_ISS_eception:\r\n global anyException\r\n anyException = True \r\n logger.error('Cannot get EPHEM resulit eroor:%s',str(e_ISS_eception))\r\n pictures('error')\r\n return \"ERROR\"\r\n\r\n\r\nclass timer:\r\n '''\r\n Timer obiect give us easier ability to count time and get better logs\r\n\r\n '''\r\n\r\n # set start time and end time of obiect\r\n def __init__(self):\r\n # get start time from datatime module\r\n self.startTime = datetime.datetime.now()\r\n # count the endtime, programTime was defined at the top\r\n self.endTime = self.startTime + datetime.timedelta(minutes=programTime)\r\n\r\n def minsOfRun(self):\r\n # cuted output for logs\r\n # example '0:00:27' - how much time program is already running\r\n return str(datetime.datetime.now()-self.startTime)[:7] \r\n def now(self):\r\n # return datetime obiect for check actual time\r\n self.time = datetime.datetime.now()\r\n return self.time\r\n def nowForLog(self):\r\n # cuted output for nicer logs:\r\n # example '14:52:32'\r\n return str(datetime.datetime.now())[11:19]\r\n\r\n# ____________________________\r\n# INICIALIZE PROGRAM\r\n# ____________________________\r\n\r\ntry:\r\n # set timer obiect\r\n timer1 = timer()\r\n # call function to setup logging\r\n setLoggingFile()\r\n # first log\r\n logger.debug('starting program,time is: %s, program will be running for: %smin',timer1.now(),programTime)\r\n # log ephem return\r\n logger.info('EPHEM: %s',ephemISS())\r\n # connect to SenseHat\r\n sh = SenseHat()\r\n # sleep 2 sec\r\n time.sleep(2)\r\n # show welcome screen\r\n pictures('welcome')# \r\n\r\n# handle the exception\r\nexcept Exception as e_init_exception:\r\n anyException = True \r\n logger.error('INIT ERROR: %s',str(e_init_exception))\r\n\r\n# ____________________________\r\n# SET VARIABLES\r\n# ____________________________\r\n\r\naverageTemp=0\r\naverageHum=0\r\naveragePress=0\r\nrounds = 0\r\nlowestTemp = 9999999999\r\nlowestHum = 9999999999\r\nlowestPress = 9999999999\r\nhigestTemp = 0\r\nhigestHum = 0\r\nhigestPress = 0\r\n\r\n# ____________________________\r\n# MAIN LOOP OF PROGRAM\r\n# ____________________________\r\n\r\n# while timer1 obiect (time now) is smaller than (endTime)\r\nwhile(timer1.now()<timer1.endTime):\r\n try:\r\n # check oversize function return\r\n if(isItOversized()):\r\n logger.debug('OVERSIZED EXITING')\r\n break\r\n\r\n\r\n # add rounds (average measurements will be calculate on this variable)\r\n rounds+=1\r\n # log round and time for start\r\n logger.debug('Start round: %s,Time from start: %s',rounds,timer1.minsOfRun())\r\n # show wait image and show active round on screen\r\n pictures('wait')\r\n showInfo('round: '+str(rounds))\r\n\r\n # show temperature animation, measure temperature and display result on screen \r\n pictures('temp')\r\n tempNowIs = measure('temp')\r\n showInfo(str(tempNowIs)+\" 'C\")\r\n\r\n # same to humidity\r\n pictures('hum')\r\n humNowIs = measure('hum')\r\n showInfo(str(humNowIs)+' %')\r\n\r\n # same to pressure\r\n pictures('press')\r\n pressNowIs = measure('press')\r\n showInfo(str(pressNowIs)+' mbar')\r\n\r\n # log all results\r\n logger.info('Time is: %s,Time from start: %s,Temp: %s,Hum: %s,Press: %s',timer1.nowForLog(),timer1.minsOfRun(),tempNowIs,humNowIs,pressNowIs)\r\n\r\n\r\n # if there was not any error count lower, higest and average measurements\r\n if(tempNowIs != 'ERROR' and humNowIs != 'ERROR' and pressNowIs != 'ERROR'):\r\n averageTemp+=tempNowIs\r\n averageHum+=humNowIs\r\n averagePress+=pressNowIs\r\n\r\n if tempNowIs<lowestTemp:\r\n lowestTemp = tempNowIs\r\n if tempNowIs>higestTemp:\r\n higestTemp = tempNowIs\r\n\r\n if humNowIs < lowestHum:\r\n lowestHum = humNowIs\r\n if humNowIs>higestHum:\r\n higestHum = humNowIs\r\n\r\n if pressNowIs<lowestPress:\r\n lowestPress = pressNowIs\r\n if pressNowIs>higestPress:\r\n higestPress = pressNowIs\r\n else:\r\n # if there was an error dont count this round\r\n rounds-=1\r\n\r\n # handle main exception\r\n except Exception as e_main_exception:\r\n anyException = True \r\n logger.error('Time from start: %s,Time is: %s,ERROR: %s',timer1.minsOfRun(),timer1.nowForLog(),str(e_main_exception))\r\n pictures('error')\r\n\r\n\r\n# ____________________________\r\n# AFTER MAIN LOOP\r\n# ____________________________\r\n\r\n\r\ntry:\r\n pictures('end')\r\n # calculate and round average measurements\r\n averageTemp/=rounds\r\n averageHum/=rounds\r\n averagePress/=rounds\r\n averageTemp = round(averageTemp,2)\r\n averageHum = round(averageHum,2)\r\n averagePress = round(averagePress,2)\r\n lowestTemp = round(lowestTemp,2)\r\n lowestHum = round(lowestHum,2)\r\n lowestPress = round(lowestPress,2)\r\n higestTemp = round(higestTemp,2)\r\n higestHum = round(higestHum,2)\r\n higestPress = round(higestPress,2)\r\n\r\n # log all\r\n logger.info('average Temp: %s,average hum: %s,average press: %s',averageTemp,averageHum,averagePress)\r\n logger.info('Temp: highest: %s lowest: %s ,Hum: highest: %s lowest: %s ,Press: highest: %s lowest: %s',higestTemp,lowestTemp,higestHum,lowestHum,higestPress,lowestPress)\r\n logger.debug('code succesfully exited after: %s,expected time: %smin, time of end is: %s, problems: %s',timer1.minsOfRun(),programTime,timer1.now(),anyException)\r\n logger.debug('program ended with %s rounds of collecting data',rounds)\r\n # say goodbye\r\n time.sleep(3)\r\n pictures('reset')\r\n\r\n\r\n# exit on that Exception\r\nexcept Exception as e_sumUp_exception:\r\n print('CANNOT SUMUP DATA, EXITING')\r\n logger.error('SUMUP ERROR: %s',str(e_sumUp_exception))\r\n exit()",
"step-ids": [
10,
12,
14,
15,
16
]
}
|
[
10,
12,
14,
15,
16
] |
<|reserved_special_token_0|>
class TestRandomSelectNode(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class TestRandomSample(unittest.TestCase):
def setUp(self):
np.random.seed(127)
def test_okay(self):
graph = BELGraph()
nodes = make_nodes(50)
edges = list(itt.combinations(nodes, r=2))
random.shuffle(edges)
n_edges = 500
for u, v in edges[:n_edges]:
graph.add_qualified_edge(u, v, relation=INCREASES, citation=str
(uuid4()), evidence=str(uuid4()))
self.assertEqual(n_edges, graph.number_of_edges())
sg = get_random_subgraph(graph, number_edges=250, number_seed_edges
=5, seed=127)
self.assertEqual(250, sg.number_of_edges())
def test_too_small(self):
graph = BELGraph()
nodes = make_nodes(11)
edges = list(itt.combinations(nodes, r=2))
random.shuffle(edges)
n_edges = 25
for u, v in edges[:n_edges]:
graph.add_qualified_edge(u, v, relation=INCREASES, citation=str
(uuid4()), evidence=str(uuid4()))
self.assertEqual(n_edges, graph.number_of_edges())
sg = get_random_subgraph(graph, number_edges=250, number_seed_edges
=5, seed=127)
self.assertEqual(graph.number_of_edges(), sg.number_of_edges(), msg
=
'since graph is too small, the subgraph should contain the whole thing'
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestRandomSelectNode(unittest.TestCase):
<|reserved_special_token_0|>
def setUp(self):
self.random_state = np.random.RandomState(seed=127)
self.trials = 30000
def test_randomly_select_node_1(self):
"""Tests that randomly selecting nodes works"""
a, b, c, d = (n() for _ in range(4))
g = BELGraph()
g.add_edge(a, b)
g.add_edge(b, c)
g.add_edge(b, d)
self.assertEqual(1, g.degree(a))
self.assertEqual(3, g.degree(b))
self.assertEqual(1, g.degree(c))
self.assertEqual(1, g.degree(d))
no_grow = set()
node_counter = Counter(randomly_select_node(g, no_grow, self.
random_state) for _ in range(self.trials))
self.assertIn(a, node_counter)
self.assertAlmostEqual(1 / 6, node_counter[a] / self.trials, places=2)
self.assertIn(b, node_counter)
self.assertAlmostEqual(3 / 6, node_counter[b] / self.trials, places=2)
self.assertIn(c, node_counter)
self.assertAlmostEqual(1 / 6, node_counter[c] / self.trials, places=2)
self.assertIn(d, node_counter)
self.assertAlmostEqual(1 / 6, node_counter[d] / self.trials, places=2)
def test_randomly_select_node_2(self):
"""Tests that randomly selecting nodes works, but disallow C"""
a, b, c, d = (n() for _ in range(4))
g = BELGraph()
g.add_edge(a, b)
g.add_edge(b, c)
g.add_edge(b, d)
self.assertEqual(1, g.degree(a))
self.assertEqual(3, g.degree(b))
self.assertEqual(1, g.degree(c))
self.assertEqual(1, g.degree(d))
no_grow = {c}
node_counter = Counter(randomly_select_node(g, no_grow, self.
random_state) for _ in range(self.trials))
self.assertIn(a, node_counter)
self.assertAlmostEqual(1 / 5, node_counter[a] / self.trials, places=2)
self.assertIn(b, node_counter)
self.assertAlmostEqual(3 / 5, node_counter[b] / self.trials, places=2)
self.assertNotIn(c, node_counter)
self.assertIn(d, node_counter)
self.assertAlmostEqual(1 / 5, node_counter[d] / self.trials, places=2)
<|reserved_special_token_0|>
class TestRandomSample(unittest.TestCase):
def setUp(self):
np.random.seed(127)
def test_okay(self):
graph = BELGraph()
nodes = make_nodes(50)
edges = list(itt.combinations(nodes, r=2))
random.shuffle(edges)
n_edges = 500
for u, v in edges[:n_edges]:
graph.add_qualified_edge(u, v, relation=INCREASES, citation=str
(uuid4()), evidence=str(uuid4()))
self.assertEqual(n_edges, graph.number_of_edges())
sg = get_random_subgraph(graph, number_edges=250, number_seed_edges
=5, seed=127)
self.assertEqual(250, sg.number_of_edges())
def test_too_small(self):
graph = BELGraph()
nodes = make_nodes(11)
edges = list(itt.combinations(nodes, r=2))
random.shuffle(edges)
n_edges = 25
for u, v in edges[:n_edges]:
graph.add_qualified_edge(u, v, relation=INCREASES, citation=str
(uuid4()), evidence=str(uuid4()))
self.assertEqual(n_edges, graph.number_of_edges())
sg = get_random_subgraph(graph, number_edges=250, number_seed_edges
=5, seed=127)
self.assertEqual(graph.number_of_edges(), sg.number_of_edges(), msg
=
'since graph is too small, the subgraph should contain the whole thing'
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestRandomSelectNode(unittest.TestCase):
"""Test random node selection"""
def setUp(self):
self.random_state = np.random.RandomState(seed=127)
self.trials = 30000
def test_randomly_select_node_1(self):
"""Tests that randomly selecting nodes works"""
a, b, c, d = (n() for _ in range(4))
g = BELGraph()
g.add_edge(a, b)
g.add_edge(b, c)
g.add_edge(b, d)
self.assertEqual(1, g.degree(a))
self.assertEqual(3, g.degree(b))
self.assertEqual(1, g.degree(c))
self.assertEqual(1, g.degree(d))
no_grow = set()
node_counter = Counter(randomly_select_node(g, no_grow, self.
random_state) for _ in range(self.trials))
self.assertIn(a, node_counter)
self.assertAlmostEqual(1 / 6, node_counter[a] / self.trials, places=2)
self.assertIn(b, node_counter)
self.assertAlmostEqual(3 / 6, node_counter[b] / self.trials, places=2)
self.assertIn(c, node_counter)
self.assertAlmostEqual(1 / 6, node_counter[c] / self.trials, places=2)
self.assertIn(d, node_counter)
self.assertAlmostEqual(1 / 6, node_counter[d] / self.trials, places=2)
def test_randomly_select_node_2(self):
"""Tests that randomly selecting nodes works, but disallow C"""
a, b, c, d = (n() for _ in range(4))
g = BELGraph()
g.add_edge(a, b)
g.add_edge(b, c)
g.add_edge(b, d)
self.assertEqual(1, g.degree(a))
self.assertEqual(3, g.degree(b))
self.assertEqual(1, g.degree(c))
self.assertEqual(1, g.degree(d))
no_grow = {c}
node_counter = Counter(randomly_select_node(g, no_grow, self.
random_state) for _ in range(self.trials))
self.assertIn(a, node_counter)
self.assertAlmostEqual(1 / 5, node_counter[a] / self.trials, places=2)
self.assertIn(b, node_counter)
self.assertAlmostEqual(3 / 5, node_counter[b] / self.trials, places=2)
self.assertNotIn(c, node_counter)
self.assertIn(d, node_counter)
self.assertAlmostEqual(1 / 5, node_counter[d] / self.trials, places=2)
<|reserved_special_token_0|>
class TestRandomSample(unittest.TestCase):
def setUp(self):
np.random.seed(127)
def test_okay(self):
graph = BELGraph()
nodes = make_nodes(50)
edges = list(itt.combinations(nodes, r=2))
random.shuffle(edges)
n_edges = 500
for u, v in edges[:n_edges]:
graph.add_qualified_edge(u, v, relation=INCREASES, citation=str
(uuid4()), evidence=str(uuid4()))
self.assertEqual(n_edges, graph.number_of_edges())
sg = get_random_subgraph(graph, number_edges=250, number_seed_edges
=5, seed=127)
self.assertEqual(250, sg.number_of_edges())
def test_too_small(self):
graph = BELGraph()
nodes = make_nodes(11)
edges = list(itt.combinations(nodes, r=2))
random.shuffle(edges)
n_edges = 25
for u, v in edges[:n_edges]:
graph.add_qualified_edge(u, v, relation=INCREASES, citation=str
(uuid4()), evidence=str(uuid4()))
self.assertEqual(n_edges, graph.number_of_edges())
sg = get_random_subgraph(graph, number_edges=250, number_seed_edges
=5, seed=127)
self.assertEqual(graph.number_of_edges(), sg.number_of_edges(), msg
=
'since graph is too small, the subgraph should contain the whole thing'
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def n():
"""Generates a PyBEL node tuple
:rtype: tuple
"""
return PROTEIN, 'TEST', str(uuid4())
class TestRandomSelectNode(unittest.TestCase):
"""Test random node selection"""
def setUp(self):
self.random_state = np.random.RandomState(seed=127)
self.trials = 30000
def test_randomly_select_node_1(self):
"""Tests that randomly selecting nodes works"""
a, b, c, d = (n() for _ in range(4))
g = BELGraph()
g.add_edge(a, b)
g.add_edge(b, c)
g.add_edge(b, d)
self.assertEqual(1, g.degree(a))
self.assertEqual(3, g.degree(b))
self.assertEqual(1, g.degree(c))
self.assertEqual(1, g.degree(d))
no_grow = set()
node_counter = Counter(randomly_select_node(g, no_grow, self.
random_state) for _ in range(self.trials))
self.assertIn(a, node_counter)
self.assertAlmostEqual(1 / 6, node_counter[a] / self.trials, places=2)
self.assertIn(b, node_counter)
self.assertAlmostEqual(3 / 6, node_counter[b] / self.trials, places=2)
self.assertIn(c, node_counter)
self.assertAlmostEqual(1 / 6, node_counter[c] / self.trials, places=2)
self.assertIn(d, node_counter)
self.assertAlmostEqual(1 / 6, node_counter[d] / self.trials, places=2)
def test_randomly_select_node_2(self):
"""Tests that randomly selecting nodes works, but disallow C"""
a, b, c, d = (n() for _ in range(4))
g = BELGraph()
g.add_edge(a, b)
g.add_edge(b, c)
g.add_edge(b, d)
self.assertEqual(1, g.degree(a))
self.assertEqual(3, g.degree(b))
self.assertEqual(1, g.degree(c))
self.assertEqual(1, g.degree(d))
no_grow = {c}
node_counter = Counter(randomly_select_node(g, no_grow, self.
random_state) for _ in range(self.trials))
self.assertIn(a, node_counter)
self.assertAlmostEqual(1 / 5, node_counter[a] / self.trials, places=2)
self.assertIn(b, node_counter)
self.assertAlmostEqual(3 / 5, node_counter[b] / self.trials, places=2)
self.assertNotIn(c, node_counter)
self.assertIn(d, node_counter)
self.assertAlmostEqual(1 / 5, node_counter[d] / self.trials, places=2)
<|reserved_special_token_0|>
class TestRandomSample(unittest.TestCase):
def setUp(self):
np.random.seed(127)
def test_okay(self):
graph = BELGraph()
nodes = make_nodes(50)
edges = list(itt.combinations(nodes, r=2))
random.shuffle(edges)
n_edges = 500
for u, v in edges[:n_edges]:
graph.add_qualified_edge(u, v, relation=INCREASES, citation=str
(uuid4()), evidence=str(uuid4()))
self.assertEqual(n_edges, graph.number_of_edges())
sg = get_random_subgraph(graph, number_edges=250, number_seed_edges
=5, seed=127)
self.assertEqual(250, sg.number_of_edges())
def test_too_small(self):
graph = BELGraph()
nodes = make_nodes(11)
edges = list(itt.combinations(nodes, r=2))
random.shuffle(edges)
n_edges = 25
for u, v in edges[:n_edges]:
graph.add_qualified_edge(u, v, relation=INCREASES, citation=str
(uuid4()), evidence=str(uuid4()))
self.assertEqual(n_edges, graph.number_of_edges())
sg = get_random_subgraph(graph, number_edges=250, number_seed_edges
=5, seed=127)
self.assertEqual(graph.number_of_edges(), sg.number_of_edges(), msg
=
'since graph is too small, the subgraph should contain the whole thing'
)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import itertools as itt
import random
import unittest
from collections import Counter
from uuid import uuid4
import numpy as np
from pybel import BELGraph
from pybel.constants import INCREASES, PROTEIN
from pybel.dsl import protein
from pybel_tools.selection import get_random_subgraph
from pybel_tools.selection.random_subgraph import randomly_select_node
def n():
"""Generates a PyBEL node tuple
:rtype: tuple
"""
return PROTEIN, 'TEST', str(uuid4())
class TestRandomSelectNode(unittest.TestCase):
"""Test random node selection"""
def setUp(self):
self.random_state = np.random.RandomState(seed=127)
self.trials = 30000
def test_randomly_select_node_1(self):
"""Tests that randomly selecting nodes works"""
a, b, c, d = (n() for _ in range(4))
g = BELGraph()
g.add_edge(a, b)
g.add_edge(b, c)
g.add_edge(b, d)
self.assertEqual(1, g.degree(a))
self.assertEqual(3, g.degree(b))
self.assertEqual(1, g.degree(c))
self.assertEqual(1, g.degree(d))
no_grow = set()
node_counter = Counter(
randomly_select_node(g, no_grow, self.random_state)
for _ in range(self.trials)
)
self.assertIn(a, node_counter)
self.assertAlmostEqual((1 / 6), node_counter[a] / self.trials, places=2)
self.assertIn(b, node_counter)
self.assertAlmostEqual((3 / 6), node_counter[b] / self.trials, places=2)
self.assertIn(c, node_counter)
self.assertAlmostEqual((1 / 6), node_counter[c] / self.trials, places=2)
self.assertIn(d, node_counter)
self.assertAlmostEqual((1 / 6), node_counter[d] / self.trials, places=2)
def test_randomly_select_node_2(self):
"""Tests that randomly selecting nodes works, but disallow C"""
a, b, c, d = (n() for _ in range(4))
g = BELGraph()
g.add_edge(a, b)
g.add_edge(b, c)
g.add_edge(b, d)
self.assertEqual(1, g.degree(a))
self.assertEqual(3, g.degree(b))
self.assertEqual(1, g.degree(c))
self.assertEqual(1, g.degree(d))
no_grow = {c}
node_counter = Counter(
randomly_select_node(g, no_grow, self.random_state)
for _ in range(self.trials)
)
self.assertIn(a, node_counter)
self.assertAlmostEqual((1 / 5), node_counter[a] / self.trials, places=2)
self.assertIn(b, node_counter)
self.assertAlmostEqual((3 / 5), node_counter[b] / self.trials, places=2)
self.assertNotIn(c, node_counter)
self.assertIn(d, node_counter)
self.assertAlmostEqual((1 / 5), node_counter[d] / self.trials, places=2)
def make_nodes(n):
"""Returns a list of PyBEL node data dictionaries
:param int n: number nodes
:rtype: list[protein]
"""
return [
protein(namespace='NS', name=str(i))
for i in range(1, n)
]
class TestRandomSample(unittest.TestCase):
def setUp(self):
np.random.seed(127)
def test_okay(self):
graph = BELGraph()
nodes = make_nodes(50)
edges = list(itt.combinations(nodes, r=2))
random.shuffle(edges)
n_edges = 500
for u, v in edges[:n_edges]:
graph.add_qualified_edge(
u, v,
relation=INCREASES,
citation=str(uuid4()),
evidence=str(uuid4()),
)
self.assertEqual(n_edges, graph.number_of_edges())
sg = get_random_subgraph(graph, number_edges=250, number_seed_edges=5, seed=127)
self.assertEqual(250, sg.number_of_edges())
def test_too_small(self):
graph = BELGraph()
nodes = make_nodes(11)
edges = list(itt.combinations(nodes, r=2))
random.shuffle(edges)
n_edges = 25
for u, v in edges[:n_edges]:
graph.add_qualified_edge(
u, v,
relation=INCREASES,
citation=str(uuid4()),
evidence=str(uuid4()),
)
self.assertEqual(n_edges, graph.number_of_edges())
sg = get_random_subgraph(graph, number_edges=250, number_seed_edges=5, seed=127)
self.assertEqual(graph.number_of_edges(), sg.number_of_edges(),
msg='since graph is too small, the subgraph should contain the whole thing')
|
flexible
|
{
"blob_id": "3a88ff479e3b01518d79e9930c29514863f96f9b",
"index": 1568,
"step-1": "<mask token>\n\n\nclass TestRandomSelectNode(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass TestRandomSample(unittest.TestCase):\n\n def setUp(self):\n np.random.seed(127)\n\n def test_okay(self):\n graph = BELGraph()\n nodes = make_nodes(50)\n edges = list(itt.combinations(nodes, r=2))\n random.shuffle(edges)\n n_edges = 500\n for u, v in edges[:n_edges]:\n graph.add_qualified_edge(u, v, relation=INCREASES, citation=str\n (uuid4()), evidence=str(uuid4()))\n self.assertEqual(n_edges, graph.number_of_edges())\n sg = get_random_subgraph(graph, number_edges=250, number_seed_edges\n =5, seed=127)\n self.assertEqual(250, sg.number_of_edges())\n\n def test_too_small(self):\n graph = BELGraph()\n nodes = make_nodes(11)\n edges = list(itt.combinations(nodes, r=2))\n random.shuffle(edges)\n n_edges = 25\n for u, v in edges[:n_edges]:\n graph.add_qualified_edge(u, v, relation=INCREASES, citation=str\n (uuid4()), evidence=str(uuid4()))\n self.assertEqual(n_edges, graph.number_of_edges())\n sg = get_random_subgraph(graph, number_edges=250, number_seed_edges\n =5, seed=127)\n self.assertEqual(graph.number_of_edges(), sg.number_of_edges(), msg\n =\n 'since graph is too small, the subgraph should contain the whole thing'\n )\n",
"step-2": "<mask token>\n\n\nclass TestRandomSelectNode(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n self.random_state = np.random.RandomState(seed=127)\n self.trials = 30000\n\n def test_randomly_select_node_1(self):\n \"\"\"Tests that randomly selecting nodes works\"\"\"\n a, b, c, d = (n() for _ in range(4))\n g = BELGraph()\n g.add_edge(a, b)\n g.add_edge(b, c)\n g.add_edge(b, d)\n self.assertEqual(1, g.degree(a))\n self.assertEqual(3, g.degree(b))\n self.assertEqual(1, g.degree(c))\n self.assertEqual(1, g.degree(d))\n no_grow = set()\n node_counter = Counter(randomly_select_node(g, no_grow, self.\n random_state) for _ in range(self.trials))\n self.assertIn(a, node_counter)\n self.assertAlmostEqual(1 / 6, node_counter[a] / self.trials, places=2)\n self.assertIn(b, node_counter)\n self.assertAlmostEqual(3 / 6, node_counter[b] / self.trials, places=2)\n self.assertIn(c, node_counter)\n self.assertAlmostEqual(1 / 6, node_counter[c] / self.trials, places=2)\n self.assertIn(d, node_counter)\n self.assertAlmostEqual(1 / 6, node_counter[d] / self.trials, places=2)\n\n def test_randomly_select_node_2(self):\n \"\"\"Tests that randomly selecting nodes works, but disallow C\"\"\"\n a, b, c, d = (n() for _ in range(4))\n g = BELGraph()\n g.add_edge(a, b)\n g.add_edge(b, c)\n g.add_edge(b, d)\n self.assertEqual(1, g.degree(a))\n self.assertEqual(3, g.degree(b))\n self.assertEqual(1, g.degree(c))\n self.assertEqual(1, g.degree(d))\n no_grow = {c}\n node_counter = Counter(randomly_select_node(g, no_grow, self.\n random_state) for _ in range(self.trials))\n self.assertIn(a, node_counter)\n self.assertAlmostEqual(1 / 5, node_counter[a] / self.trials, places=2)\n self.assertIn(b, node_counter)\n self.assertAlmostEqual(3 / 5, node_counter[b] / self.trials, places=2)\n self.assertNotIn(c, node_counter)\n self.assertIn(d, node_counter)\n self.assertAlmostEqual(1 / 5, node_counter[d] / self.trials, places=2)\n\n\n<mask token>\n\n\nclass TestRandomSample(unittest.TestCase):\n\n def setUp(self):\n np.random.seed(127)\n\n def test_okay(self):\n graph = BELGraph()\n nodes = make_nodes(50)\n edges = list(itt.combinations(nodes, r=2))\n random.shuffle(edges)\n n_edges = 500\n for u, v in edges[:n_edges]:\n graph.add_qualified_edge(u, v, relation=INCREASES, citation=str\n (uuid4()), evidence=str(uuid4()))\n self.assertEqual(n_edges, graph.number_of_edges())\n sg = get_random_subgraph(graph, number_edges=250, number_seed_edges\n =5, seed=127)\n self.assertEqual(250, sg.number_of_edges())\n\n def test_too_small(self):\n graph = BELGraph()\n nodes = make_nodes(11)\n edges = list(itt.combinations(nodes, r=2))\n random.shuffle(edges)\n n_edges = 25\n for u, v in edges[:n_edges]:\n graph.add_qualified_edge(u, v, relation=INCREASES, citation=str\n (uuid4()), evidence=str(uuid4()))\n self.assertEqual(n_edges, graph.number_of_edges())\n sg = get_random_subgraph(graph, number_edges=250, number_seed_edges\n =5, seed=127)\n self.assertEqual(graph.number_of_edges(), sg.number_of_edges(), msg\n =\n 'since graph is too small, the subgraph should contain the whole thing'\n )\n",
"step-3": "<mask token>\n\n\nclass TestRandomSelectNode(unittest.TestCase):\n \"\"\"Test random node selection\"\"\"\n\n def setUp(self):\n self.random_state = np.random.RandomState(seed=127)\n self.trials = 30000\n\n def test_randomly_select_node_1(self):\n \"\"\"Tests that randomly selecting nodes works\"\"\"\n a, b, c, d = (n() for _ in range(4))\n g = BELGraph()\n g.add_edge(a, b)\n g.add_edge(b, c)\n g.add_edge(b, d)\n self.assertEqual(1, g.degree(a))\n self.assertEqual(3, g.degree(b))\n self.assertEqual(1, g.degree(c))\n self.assertEqual(1, g.degree(d))\n no_grow = set()\n node_counter = Counter(randomly_select_node(g, no_grow, self.\n random_state) for _ in range(self.trials))\n self.assertIn(a, node_counter)\n self.assertAlmostEqual(1 / 6, node_counter[a] / self.trials, places=2)\n self.assertIn(b, node_counter)\n self.assertAlmostEqual(3 / 6, node_counter[b] / self.trials, places=2)\n self.assertIn(c, node_counter)\n self.assertAlmostEqual(1 / 6, node_counter[c] / self.trials, places=2)\n self.assertIn(d, node_counter)\n self.assertAlmostEqual(1 / 6, node_counter[d] / self.trials, places=2)\n\n def test_randomly_select_node_2(self):\n \"\"\"Tests that randomly selecting nodes works, but disallow C\"\"\"\n a, b, c, d = (n() for _ in range(4))\n g = BELGraph()\n g.add_edge(a, b)\n g.add_edge(b, c)\n g.add_edge(b, d)\n self.assertEqual(1, g.degree(a))\n self.assertEqual(3, g.degree(b))\n self.assertEqual(1, g.degree(c))\n self.assertEqual(1, g.degree(d))\n no_grow = {c}\n node_counter = Counter(randomly_select_node(g, no_grow, self.\n random_state) for _ in range(self.trials))\n self.assertIn(a, node_counter)\n self.assertAlmostEqual(1 / 5, node_counter[a] / self.trials, places=2)\n self.assertIn(b, node_counter)\n self.assertAlmostEqual(3 / 5, node_counter[b] / self.trials, places=2)\n self.assertNotIn(c, node_counter)\n self.assertIn(d, node_counter)\n self.assertAlmostEqual(1 / 5, node_counter[d] / self.trials, places=2)\n\n\n<mask token>\n\n\nclass TestRandomSample(unittest.TestCase):\n\n def setUp(self):\n np.random.seed(127)\n\n def test_okay(self):\n graph = BELGraph()\n nodes = make_nodes(50)\n edges = list(itt.combinations(nodes, r=2))\n random.shuffle(edges)\n n_edges = 500\n for u, v in edges[:n_edges]:\n graph.add_qualified_edge(u, v, relation=INCREASES, citation=str\n (uuid4()), evidence=str(uuid4()))\n self.assertEqual(n_edges, graph.number_of_edges())\n sg = get_random_subgraph(graph, number_edges=250, number_seed_edges\n =5, seed=127)\n self.assertEqual(250, sg.number_of_edges())\n\n def test_too_small(self):\n graph = BELGraph()\n nodes = make_nodes(11)\n edges = list(itt.combinations(nodes, r=2))\n random.shuffle(edges)\n n_edges = 25\n for u, v in edges[:n_edges]:\n graph.add_qualified_edge(u, v, relation=INCREASES, citation=str\n (uuid4()), evidence=str(uuid4()))\n self.assertEqual(n_edges, graph.number_of_edges())\n sg = get_random_subgraph(graph, number_edges=250, number_seed_edges\n =5, seed=127)\n self.assertEqual(graph.number_of_edges(), sg.number_of_edges(), msg\n =\n 'since graph is too small, the subgraph should contain the whole thing'\n )\n",
"step-4": "<mask token>\n\n\ndef n():\n \"\"\"Generates a PyBEL node tuple\n\n :rtype: tuple\n \"\"\"\n return PROTEIN, 'TEST', str(uuid4())\n\n\nclass TestRandomSelectNode(unittest.TestCase):\n \"\"\"Test random node selection\"\"\"\n\n def setUp(self):\n self.random_state = np.random.RandomState(seed=127)\n self.trials = 30000\n\n def test_randomly_select_node_1(self):\n \"\"\"Tests that randomly selecting nodes works\"\"\"\n a, b, c, d = (n() for _ in range(4))\n g = BELGraph()\n g.add_edge(a, b)\n g.add_edge(b, c)\n g.add_edge(b, d)\n self.assertEqual(1, g.degree(a))\n self.assertEqual(3, g.degree(b))\n self.assertEqual(1, g.degree(c))\n self.assertEqual(1, g.degree(d))\n no_grow = set()\n node_counter = Counter(randomly_select_node(g, no_grow, self.\n random_state) for _ in range(self.trials))\n self.assertIn(a, node_counter)\n self.assertAlmostEqual(1 / 6, node_counter[a] / self.trials, places=2)\n self.assertIn(b, node_counter)\n self.assertAlmostEqual(3 / 6, node_counter[b] / self.trials, places=2)\n self.assertIn(c, node_counter)\n self.assertAlmostEqual(1 / 6, node_counter[c] / self.trials, places=2)\n self.assertIn(d, node_counter)\n self.assertAlmostEqual(1 / 6, node_counter[d] / self.trials, places=2)\n\n def test_randomly_select_node_2(self):\n \"\"\"Tests that randomly selecting nodes works, but disallow C\"\"\"\n a, b, c, d = (n() for _ in range(4))\n g = BELGraph()\n g.add_edge(a, b)\n g.add_edge(b, c)\n g.add_edge(b, d)\n self.assertEqual(1, g.degree(a))\n self.assertEqual(3, g.degree(b))\n self.assertEqual(1, g.degree(c))\n self.assertEqual(1, g.degree(d))\n no_grow = {c}\n node_counter = Counter(randomly_select_node(g, no_grow, self.\n random_state) for _ in range(self.trials))\n self.assertIn(a, node_counter)\n self.assertAlmostEqual(1 / 5, node_counter[a] / self.trials, places=2)\n self.assertIn(b, node_counter)\n self.assertAlmostEqual(3 / 5, node_counter[b] / self.trials, places=2)\n self.assertNotIn(c, node_counter)\n self.assertIn(d, node_counter)\n self.assertAlmostEqual(1 / 5, node_counter[d] / self.trials, places=2)\n\n\n<mask token>\n\n\nclass TestRandomSample(unittest.TestCase):\n\n def setUp(self):\n np.random.seed(127)\n\n def test_okay(self):\n graph = BELGraph()\n nodes = make_nodes(50)\n edges = list(itt.combinations(nodes, r=2))\n random.shuffle(edges)\n n_edges = 500\n for u, v in edges[:n_edges]:\n graph.add_qualified_edge(u, v, relation=INCREASES, citation=str\n (uuid4()), evidence=str(uuid4()))\n self.assertEqual(n_edges, graph.number_of_edges())\n sg = get_random_subgraph(graph, number_edges=250, number_seed_edges\n =5, seed=127)\n self.assertEqual(250, sg.number_of_edges())\n\n def test_too_small(self):\n graph = BELGraph()\n nodes = make_nodes(11)\n edges = list(itt.combinations(nodes, r=2))\n random.shuffle(edges)\n n_edges = 25\n for u, v in edges[:n_edges]:\n graph.add_qualified_edge(u, v, relation=INCREASES, citation=str\n (uuid4()), evidence=str(uuid4()))\n self.assertEqual(n_edges, graph.number_of_edges())\n sg = get_random_subgraph(graph, number_edges=250, number_seed_edges\n =5, seed=127)\n self.assertEqual(graph.number_of_edges(), sg.number_of_edges(), msg\n =\n 'since graph is too small, the subgraph should contain the whole thing'\n )\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport itertools as itt\nimport random\nimport unittest\nfrom collections import Counter\nfrom uuid import uuid4\n\nimport numpy as np\n\nfrom pybel import BELGraph\nfrom pybel.constants import INCREASES, PROTEIN\nfrom pybel.dsl import protein\nfrom pybel_tools.selection import get_random_subgraph\nfrom pybel_tools.selection.random_subgraph import randomly_select_node\n\n\ndef n():\n \"\"\"Generates a PyBEL node tuple\n\n :rtype: tuple\n \"\"\"\n return PROTEIN, 'TEST', str(uuid4())\n\n\nclass TestRandomSelectNode(unittest.TestCase):\n \"\"\"Test random node selection\"\"\"\n\n def setUp(self):\n self.random_state = np.random.RandomState(seed=127)\n self.trials = 30000\n\n def test_randomly_select_node_1(self):\n \"\"\"Tests that randomly selecting nodes works\"\"\"\n a, b, c, d = (n() for _ in range(4))\n\n g = BELGraph()\n g.add_edge(a, b)\n g.add_edge(b, c)\n g.add_edge(b, d)\n\n self.assertEqual(1, g.degree(a))\n self.assertEqual(3, g.degree(b))\n self.assertEqual(1, g.degree(c))\n self.assertEqual(1, g.degree(d))\n\n no_grow = set()\n\n node_counter = Counter(\n randomly_select_node(g, no_grow, self.random_state)\n for _ in range(self.trials)\n )\n\n self.assertIn(a, node_counter)\n self.assertAlmostEqual((1 / 6), node_counter[a] / self.trials, places=2)\n\n self.assertIn(b, node_counter)\n self.assertAlmostEqual((3 / 6), node_counter[b] / self.trials, places=2)\n\n self.assertIn(c, node_counter)\n self.assertAlmostEqual((1 / 6), node_counter[c] / self.trials, places=2)\n\n self.assertIn(d, node_counter)\n self.assertAlmostEqual((1 / 6), node_counter[d] / self.trials, places=2)\n\n def test_randomly_select_node_2(self):\n \"\"\"Tests that randomly selecting nodes works, but disallow C\"\"\"\n a, b, c, d = (n() for _ in range(4))\n\n g = BELGraph()\n g.add_edge(a, b)\n g.add_edge(b, c)\n g.add_edge(b, d)\n\n self.assertEqual(1, g.degree(a))\n self.assertEqual(3, g.degree(b))\n self.assertEqual(1, g.degree(c))\n self.assertEqual(1, g.degree(d))\n\n no_grow = {c}\n\n node_counter = Counter(\n randomly_select_node(g, no_grow, self.random_state)\n for _ in range(self.trials)\n )\n\n self.assertIn(a, node_counter)\n self.assertAlmostEqual((1 / 5), node_counter[a] / self.trials, places=2)\n\n self.assertIn(b, node_counter)\n self.assertAlmostEqual((3 / 5), node_counter[b] / self.trials, places=2)\n\n self.assertNotIn(c, node_counter)\n\n self.assertIn(d, node_counter)\n self.assertAlmostEqual((1 / 5), node_counter[d] / self.trials, places=2)\n\n\ndef make_nodes(n):\n \"\"\"Returns a list of PyBEL node data dictionaries\n\n :param int n: number nodes\n :rtype: list[protein]\n \"\"\"\n return [\n protein(namespace='NS', name=str(i))\n for i in range(1, n)\n ]\n\n\nclass TestRandomSample(unittest.TestCase):\n def setUp(self):\n np.random.seed(127)\n\n def test_okay(self):\n graph = BELGraph()\n nodes = make_nodes(50)\n\n edges = list(itt.combinations(nodes, r=2))\n random.shuffle(edges)\n\n n_edges = 500\n\n for u, v in edges[:n_edges]:\n graph.add_qualified_edge(\n u, v,\n relation=INCREASES,\n citation=str(uuid4()),\n evidence=str(uuid4()),\n )\n\n self.assertEqual(n_edges, graph.number_of_edges())\n\n sg = get_random_subgraph(graph, number_edges=250, number_seed_edges=5, seed=127)\n self.assertEqual(250, sg.number_of_edges())\n\n def test_too_small(self):\n graph = BELGraph()\n nodes = make_nodes(11)\n\n edges = list(itt.combinations(nodes, r=2))\n random.shuffle(edges)\n\n n_edges = 25\n\n for u, v in edges[:n_edges]:\n graph.add_qualified_edge(\n u, v,\n relation=INCREASES,\n citation=str(uuid4()),\n evidence=str(uuid4()),\n )\n\n self.assertEqual(n_edges, graph.number_of_edges())\n\n sg = get_random_subgraph(graph, number_edges=250, number_seed_edges=5, seed=127)\n\n self.assertEqual(graph.number_of_edges(), sg.number_of_edges(),\n msg='since graph is too small, the subgraph should contain the whole thing')\n",
"step-ids": [
5,
8,
9,
10,
13
]
}
|
[
5,
8,
9,
10,
13
] |
<|reserved_special_token_0|>
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Describe(base.DescribeCommand):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def Run(self, args):
guest_policy_ref = args.CONCEPTS.guest_policy.Parse()
release_track = self.ReleaseTrack()
client = osconfig_utils.GetClientInstance(release_track)
messages = osconfig_utils.GetClientMessages(release_track)
guest_policy_type = guest_policy_ref.type_
guest_policy_name = guest_policy_ref.result.RelativeName()
if guest_policy_type == type(guest_policy_type
).organization_guest_policy:
request = messages.OsconfigOrganizationsGuestPoliciesGetRequest(
name=guest_policy_name)
service = client.organizations_guestPolicies
elif guest_policy_type == type(guest_policy_type).folder_guest_policy:
request = messages.OsconfigFoldersGuestPoliciesGetRequest(name=
guest_policy_name)
service = client.folders_guestPolicies
else:
request = messages.OsconfigProjectsGuestPoliciesGetRequest(name
=guest_policy_name)
service = client.projects_guestPolicies
return service.Get(request)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Describe(base.DescribeCommand):
<|reserved_special_token_0|>
@staticmethod
def Args(parser):
resource_args.AddGuestPolicyResourceArg(parser, 'to describe.')
def Run(self, args):
guest_policy_ref = args.CONCEPTS.guest_policy.Parse()
release_track = self.ReleaseTrack()
client = osconfig_utils.GetClientInstance(release_track)
messages = osconfig_utils.GetClientMessages(release_track)
guest_policy_type = guest_policy_ref.type_
guest_policy_name = guest_policy_ref.result.RelativeName()
if guest_policy_type == type(guest_policy_type
).organization_guest_policy:
request = messages.OsconfigOrganizationsGuestPoliciesGetRequest(
name=guest_policy_name)
service = client.organizations_guestPolicies
elif guest_policy_type == type(guest_policy_type).folder_guest_policy:
request = messages.OsconfigFoldersGuestPoliciesGetRequest(name=
guest_policy_name)
service = client.folders_guestPolicies
else:
request = messages.OsconfigProjectsGuestPoliciesGetRequest(name
=guest_policy_name)
service = client.projects_guestPolicies
return service.Get(request)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Describe(base.DescribeCommand):
"""Describe the given guest policy.
## EXAMPLES
To describe the guest policy 'policy1' in the project 'project1', run:
$ {command} policy1 --project=project1
To describe the guest policy 'policy1' in the organization '12345', run:
$ {command} policy1 --organization=12345
"""
@staticmethod
def Args(parser):
resource_args.AddGuestPolicyResourceArg(parser, 'to describe.')
def Run(self, args):
guest_policy_ref = args.CONCEPTS.guest_policy.Parse()
release_track = self.ReleaseTrack()
client = osconfig_utils.GetClientInstance(release_track)
messages = osconfig_utils.GetClientMessages(release_track)
guest_policy_type = guest_policy_ref.type_
guest_policy_name = guest_policy_ref.result.RelativeName()
if guest_policy_type == type(guest_policy_type
).organization_guest_policy:
request = messages.OsconfigOrganizationsGuestPoliciesGetRequest(
name=guest_policy_name)
service = client.organizations_guestPolicies
elif guest_policy_type == type(guest_policy_type).folder_guest_policy:
request = messages.OsconfigFoldersGuestPoliciesGetRequest(name=
guest_policy_name)
service = client.folders_guestPolicies
else:
request = messages.OsconfigProjectsGuestPoliciesGetRequest(name
=guest_policy_name)
service = client.projects_guestPolicies
return service.Get(request)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute.os_config import osconfig_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.os_config import resource_args
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Describe(base.DescribeCommand):
"""Describe the given guest policy.
## EXAMPLES
To describe the guest policy 'policy1' in the project 'project1', run:
$ {command} policy1 --project=project1
To describe the guest policy 'policy1' in the organization '12345', run:
$ {command} policy1 --organization=12345
"""
@staticmethod
def Args(parser):
resource_args.AddGuestPolicyResourceArg(parser, 'to describe.')
def Run(self, args):
guest_policy_ref = args.CONCEPTS.guest_policy.Parse()
release_track = self.ReleaseTrack()
client = osconfig_utils.GetClientInstance(release_track)
messages = osconfig_utils.GetClientMessages(release_track)
guest_policy_type = guest_policy_ref.type_
guest_policy_name = guest_policy_ref.result.RelativeName()
if guest_policy_type == type(guest_policy_type
).organization_guest_policy:
request = messages.OsconfigOrganizationsGuestPoliciesGetRequest(
name=guest_policy_name)
service = client.organizations_guestPolicies
elif guest_policy_type == type(guest_policy_type).folder_guest_policy:
request = messages.OsconfigFoldersGuestPoliciesGetRequest(name=
guest_policy_name)
service = client.folders_guestPolicies
else:
request = messages.OsconfigProjectsGuestPoliciesGetRequest(name
=guest_policy_name)
service = client.projects_guestPolicies
return service.Get(request)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements command to describe a given guest policy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute.os_config import osconfig_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.os_config import resource_args
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Describe(base.DescribeCommand):
"""Describe the given guest policy.
## EXAMPLES
To describe the guest policy 'policy1' in the project 'project1', run:
$ {command} policy1 --project=project1
To describe the guest policy 'policy1' in the organization '12345', run:
$ {command} policy1 --organization=12345
"""
@staticmethod
def Args(parser):
resource_args.AddGuestPolicyResourceArg(parser, 'to describe.')
def Run(self, args):
guest_policy_ref = args.CONCEPTS.guest_policy.Parse()
release_track = self.ReleaseTrack()
client = osconfig_utils.GetClientInstance(release_track)
messages = osconfig_utils.GetClientMessages(release_track)
guest_policy_type = guest_policy_ref.type_
guest_policy_name = guest_policy_ref.result.RelativeName()
if guest_policy_type == type(guest_policy_type).organization_guest_policy:
request = messages.OsconfigOrganizationsGuestPoliciesGetRequest(
name=guest_policy_name)
service = client.organizations_guestPolicies
elif guest_policy_type == type(guest_policy_type).folder_guest_policy:
request = messages.OsconfigFoldersGuestPoliciesGetRequest(
name=guest_policy_name)
service = client.folders_guestPolicies
else:
request = messages.OsconfigProjectsGuestPoliciesGetRequest(
name=guest_policy_name)
service = client.projects_guestPolicies
return service.Get(request)
|
flexible
|
{
"blob_id": "d6a677ed537f6493bb43bd893f3096dc058e27da",
"index": 507,
"step-1": "<mask token>\n\n\n@base.ReleaseTracks(base.ReleaseTrack.ALPHA)\nclass Describe(base.DescribeCommand):\n <mask token>\n <mask token>\n\n def Run(self, args):\n guest_policy_ref = args.CONCEPTS.guest_policy.Parse()\n release_track = self.ReleaseTrack()\n client = osconfig_utils.GetClientInstance(release_track)\n messages = osconfig_utils.GetClientMessages(release_track)\n guest_policy_type = guest_policy_ref.type_\n guest_policy_name = guest_policy_ref.result.RelativeName()\n if guest_policy_type == type(guest_policy_type\n ).organization_guest_policy:\n request = messages.OsconfigOrganizationsGuestPoliciesGetRequest(\n name=guest_policy_name)\n service = client.organizations_guestPolicies\n elif guest_policy_type == type(guest_policy_type).folder_guest_policy:\n request = messages.OsconfigFoldersGuestPoliciesGetRequest(name=\n guest_policy_name)\n service = client.folders_guestPolicies\n else:\n request = messages.OsconfigProjectsGuestPoliciesGetRequest(name\n =guest_policy_name)\n service = client.projects_guestPolicies\n return service.Get(request)\n",
"step-2": "<mask token>\n\n\n@base.ReleaseTracks(base.ReleaseTrack.ALPHA)\nclass Describe(base.DescribeCommand):\n <mask token>\n\n @staticmethod\n def Args(parser):\n resource_args.AddGuestPolicyResourceArg(parser, 'to describe.')\n\n def Run(self, args):\n guest_policy_ref = args.CONCEPTS.guest_policy.Parse()\n release_track = self.ReleaseTrack()\n client = osconfig_utils.GetClientInstance(release_track)\n messages = osconfig_utils.GetClientMessages(release_track)\n guest_policy_type = guest_policy_ref.type_\n guest_policy_name = guest_policy_ref.result.RelativeName()\n if guest_policy_type == type(guest_policy_type\n ).organization_guest_policy:\n request = messages.OsconfigOrganizationsGuestPoliciesGetRequest(\n name=guest_policy_name)\n service = client.organizations_guestPolicies\n elif guest_policy_type == type(guest_policy_type).folder_guest_policy:\n request = messages.OsconfigFoldersGuestPoliciesGetRequest(name=\n guest_policy_name)\n service = client.folders_guestPolicies\n else:\n request = messages.OsconfigProjectsGuestPoliciesGetRequest(name\n =guest_policy_name)\n service = client.projects_guestPolicies\n return service.Get(request)\n",
"step-3": "<mask token>\n\n\n@base.ReleaseTracks(base.ReleaseTrack.ALPHA)\nclass Describe(base.DescribeCommand):\n \"\"\"Describe the given guest policy.\n\n ## EXAMPLES\n\n To describe the guest policy 'policy1' in the project 'project1', run:\n\n $ {command} policy1 --project=project1\n\n To describe the guest policy 'policy1' in the organization '12345', run:\n\n $ {command} policy1 --organization=12345\n\n \"\"\"\n\n @staticmethod\n def Args(parser):\n resource_args.AddGuestPolicyResourceArg(parser, 'to describe.')\n\n def Run(self, args):\n guest_policy_ref = args.CONCEPTS.guest_policy.Parse()\n release_track = self.ReleaseTrack()\n client = osconfig_utils.GetClientInstance(release_track)\n messages = osconfig_utils.GetClientMessages(release_track)\n guest_policy_type = guest_policy_ref.type_\n guest_policy_name = guest_policy_ref.result.RelativeName()\n if guest_policy_type == type(guest_policy_type\n ).organization_guest_policy:\n request = messages.OsconfigOrganizationsGuestPoliciesGetRequest(\n name=guest_policy_name)\n service = client.organizations_guestPolicies\n elif guest_policy_type == type(guest_policy_type).folder_guest_policy:\n request = messages.OsconfigFoldersGuestPoliciesGetRequest(name=\n guest_policy_name)\n service = client.folders_guestPolicies\n else:\n request = messages.OsconfigProjectsGuestPoliciesGetRequest(name\n =guest_policy_name)\n service = client.projects_guestPolicies\n return service.Get(request)\n",
"step-4": "<mask token>\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\nfrom googlecloudsdk.api_lib.compute.os_config import osconfig_utils\nfrom googlecloudsdk.calliope import base\nfrom googlecloudsdk.command_lib.compute.os_config import resource_args\n\n\n@base.ReleaseTracks(base.ReleaseTrack.ALPHA)\nclass Describe(base.DescribeCommand):\n \"\"\"Describe the given guest policy.\n\n ## EXAMPLES\n\n To describe the guest policy 'policy1' in the project 'project1', run:\n\n $ {command} policy1 --project=project1\n\n To describe the guest policy 'policy1' in the organization '12345', run:\n\n $ {command} policy1 --organization=12345\n\n \"\"\"\n\n @staticmethod\n def Args(parser):\n resource_args.AddGuestPolicyResourceArg(parser, 'to describe.')\n\n def Run(self, args):\n guest_policy_ref = args.CONCEPTS.guest_policy.Parse()\n release_track = self.ReleaseTrack()\n client = osconfig_utils.GetClientInstance(release_track)\n messages = osconfig_utils.GetClientMessages(release_track)\n guest_policy_type = guest_policy_ref.type_\n guest_policy_name = guest_policy_ref.result.RelativeName()\n if guest_policy_type == type(guest_policy_type\n ).organization_guest_policy:\n request = messages.OsconfigOrganizationsGuestPoliciesGetRequest(\n name=guest_policy_name)\n service = client.organizations_guestPolicies\n elif guest_policy_type == type(guest_policy_type).folder_guest_policy:\n request = messages.OsconfigFoldersGuestPoliciesGetRequest(name=\n guest_policy_name)\n service = client.folders_guestPolicies\n else:\n request = messages.OsconfigProjectsGuestPoliciesGetRequest(name\n =guest_policy_name)\n service = client.projects_guestPolicies\n return service.Get(request)\n",
"step-5": "# -*- coding: utf-8 -*- #\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Implements command to describe a given guest policy.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom googlecloudsdk.api_lib.compute.os_config import osconfig_utils\nfrom googlecloudsdk.calliope import base\nfrom googlecloudsdk.command_lib.compute.os_config import resource_args\n\n\n@base.ReleaseTracks(base.ReleaseTrack.ALPHA)\nclass Describe(base.DescribeCommand):\n \"\"\"Describe the given guest policy.\n\n ## EXAMPLES\n\n To describe the guest policy 'policy1' in the project 'project1', run:\n\n $ {command} policy1 --project=project1\n\n To describe the guest policy 'policy1' in the organization '12345', run:\n\n $ {command} policy1 --organization=12345\n\n \"\"\"\n\n @staticmethod\n def Args(parser):\n resource_args.AddGuestPolicyResourceArg(parser, 'to describe.')\n\n def Run(self, args):\n guest_policy_ref = args.CONCEPTS.guest_policy.Parse()\n\n release_track = self.ReleaseTrack()\n client = osconfig_utils.GetClientInstance(release_track)\n messages = osconfig_utils.GetClientMessages(release_track)\n\n guest_policy_type = guest_policy_ref.type_\n guest_policy_name = guest_policy_ref.result.RelativeName()\n\n if guest_policy_type == type(guest_policy_type).organization_guest_policy:\n request = messages.OsconfigOrganizationsGuestPoliciesGetRequest(\n name=guest_policy_name)\n service = client.organizations_guestPolicies\n elif guest_policy_type == type(guest_policy_type).folder_guest_policy:\n request = messages.OsconfigFoldersGuestPoliciesGetRequest(\n name=guest_policy_name)\n service = client.folders_guestPolicies\n else:\n request = messages.OsconfigProjectsGuestPoliciesGetRequest(\n name=guest_policy_name)\n service = client.projects_guestPolicies\n\n return service.Get(request)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Post(db.Model):
post_id = db.Column(db.Integer, primary_key=True, nullable=False)
title = db.Column(db.String(50))
body = db.Column(db.String(200))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
user_id = db.relationship(db.Integer, db.ForeignKey('User.user_id'))
def __repr__(self):
return '<Post: {} authoured by {}>'.format(self.title, self.user_id)
class Following(db.Model):
follower_id = db.Column(db.Integer, primary_key=True, nullable=False)
following_id = db.Column(db.Integer, primary_key=True, nullable=False)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User(UserMixin, db.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __repr__(self):
return '<User {}>'.format(self.username)
<|reserved_special_token_0|>
def check_pass(self, password):
return check_password_hash(self.pass_hash, password)
class Post(db.Model):
post_id = db.Column(db.Integer, primary_key=True, nullable=False)
title = db.Column(db.String(50))
body = db.Column(db.String(200))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
user_id = db.relationship(db.Integer, db.ForeignKey('User.user_id'))
def __repr__(self):
return '<Post: {} authoured by {}>'.format(self.title, self.user_id)
class Following(db.Model):
follower_id = db.Column(db.Integer, primary_key=True, nullable=False)
following_id = db.Column(db.Integer, primary_key=True, nullable=False)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User(UserMixin, db.Model):
user_id = db.Column(db.Integer, primary_key=True, nullable=False)
username = db.Column(db.String(50), unique=True, nullable=False)
email = db.Column(db.String(100))
pass_hash = db.Column(db.String(128))
posts = db.relationship('Post', backref='user.id', lazy='dynamic')
def __repr__(self):
return '<User {}>'.format(self.username)
def set_pass(self, password):
self.pass_hash = generate_password_hash(password)
def check_pass(self, password):
return check_password_hash(self.pass_hash, password)
class Post(db.Model):
post_id = db.Column(db.Integer, primary_key=True, nullable=False)
title = db.Column(db.String(50))
body = db.Column(db.String(200))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
user_id = db.relationship(db.Integer, db.ForeignKey('User.user_id'))
def __repr__(self):
return '<Post: {} authoured by {}>'.format(self.title, self.user_id)
class Following(db.Model):
follower_id = db.Column(db.Integer, primary_key=True, nullable=False)
following_id = db.Column(db.Integer, primary_key=True, nullable=False)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@login.user_loader
def load_user(id):
return User.query.get(int(id))
class User(UserMixin, db.Model):
user_id = db.Column(db.Integer, primary_key=True, nullable=False)
username = db.Column(db.String(50), unique=True, nullable=False)
email = db.Column(db.String(100))
pass_hash = db.Column(db.String(128))
posts = db.relationship('Post', backref='user.id', lazy='dynamic')
def __repr__(self):
return '<User {}>'.format(self.username)
def set_pass(self, password):
self.pass_hash = generate_password_hash(password)
def check_pass(self, password):
return check_password_hash(self.pass_hash, password)
class Post(db.Model):
post_id = db.Column(db.Integer, primary_key=True, nullable=False)
title = db.Column(db.String(50))
body = db.Column(db.String(200))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
user_id = db.relationship(db.Integer, db.ForeignKey('User.user_id'))
def __repr__(self):
return '<Post: {} authoured by {}>'.format(self.title, self.user_id)
class Following(db.Model):
follower_id = db.Column(db.Integer, primary_key=True, nullable=False)
following_id = db.Column(db.Integer, primary_key=True, nullable=False)
<|reserved_special_token_1|>
from werkzeug.security import check_password_hash, generate_password_hash
from datetime import datetime
from app import db
from app import login
from flask_login import UserMixin
@login.user_loader
def load_user(id):
return User.query.get(int(id))
class User(UserMixin, db.Model):
user_id = db.Column(db.Integer, primary_key=True, nullable=False)
username = db.Column(db.String(50), unique=True, nullable=False)
email = db.Column(db.String(100))
pass_hash = db.Column(db.String(128))
posts = db.relationship('Post', backref='user.id', lazy='dynamic')
def __repr__(self):
return "<User {}>".format(self.username)
def set_pass(self, password):
self.pass_hash = generate_password_hash(password)
def check_pass(self, password):
return check_password_hash(self.pass_hash, password)
class Post(db.Model):
post_id = db.Column(db.Integer, primary_key=True, nullable=False)
title = db.Column(db.String(50))
body = db.Column(db.String(200))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
user_id = db.relationship(db.Integer, db.ForeignKey('User.user_id'))
def __repr__(self):
return "<Post: {} authoured by {}>".format(self.title, self.user_id)
class Following(db.Model):
follower_id = db.Column(db.Integer, primary_key=True, nullable=False)
following_id = db.Column(db.Integer, primary_key=True, nullable=False)
|
flexible
|
{
"blob_id": "5cfdb1f6b99f59a83a9bd42b7daf3e016eee94a8",
"index": 2898,
"step-1": "<mask token>\n\n\nclass Post(db.Model):\n post_id = db.Column(db.Integer, primary_key=True, nullable=False)\n title = db.Column(db.String(50))\n body = db.Column(db.String(200))\n timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)\n user_id = db.relationship(db.Integer, db.ForeignKey('User.user_id'))\n\n def __repr__(self):\n return '<Post: {} authoured by {}>'.format(self.title, self.user_id)\n\n\nclass Following(db.Model):\n follower_id = db.Column(db.Integer, primary_key=True, nullable=False)\n following_id = db.Column(db.Integer, primary_key=True, nullable=False)\n",
"step-2": "<mask token>\n\n\nclass User(UserMixin, db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __repr__(self):\n return '<User {}>'.format(self.username)\n <mask token>\n\n def check_pass(self, password):\n return check_password_hash(self.pass_hash, password)\n\n\nclass Post(db.Model):\n post_id = db.Column(db.Integer, primary_key=True, nullable=False)\n title = db.Column(db.String(50))\n body = db.Column(db.String(200))\n timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)\n user_id = db.relationship(db.Integer, db.ForeignKey('User.user_id'))\n\n def __repr__(self):\n return '<Post: {} authoured by {}>'.format(self.title, self.user_id)\n\n\nclass Following(db.Model):\n follower_id = db.Column(db.Integer, primary_key=True, nullable=False)\n following_id = db.Column(db.Integer, primary_key=True, nullable=False)\n",
"step-3": "<mask token>\n\n\nclass User(UserMixin, db.Model):\n user_id = db.Column(db.Integer, primary_key=True, nullable=False)\n username = db.Column(db.String(50), unique=True, nullable=False)\n email = db.Column(db.String(100))\n pass_hash = db.Column(db.String(128))\n posts = db.relationship('Post', backref='user.id', lazy='dynamic')\n\n def __repr__(self):\n return '<User {}>'.format(self.username)\n\n def set_pass(self, password):\n self.pass_hash = generate_password_hash(password)\n\n def check_pass(self, password):\n return check_password_hash(self.pass_hash, password)\n\n\nclass Post(db.Model):\n post_id = db.Column(db.Integer, primary_key=True, nullable=False)\n title = db.Column(db.String(50))\n body = db.Column(db.String(200))\n timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)\n user_id = db.relationship(db.Integer, db.ForeignKey('User.user_id'))\n\n def __repr__(self):\n return '<Post: {} authoured by {}>'.format(self.title, self.user_id)\n\n\nclass Following(db.Model):\n follower_id = db.Column(db.Integer, primary_key=True, nullable=False)\n following_id = db.Column(db.Integer, primary_key=True, nullable=False)\n",
"step-4": "<mask token>\n\n\n@login.user_loader\ndef load_user(id):\n return User.query.get(int(id))\n\n\nclass User(UserMixin, db.Model):\n user_id = db.Column(db.Integer, primary_key=True, nullable=False)\n username = db.Column(db.String(50), unique=True, nullable=False)\n email = db.Column(db.String(100))\n pass_hash = db.Column(db.String(128))\n posts = db.relationship('Post', backref='user.id', lazy='dynamic')\n\n def __repr__(self):\n return '<User {}>'.format(self.username)\n\n def set_pass(self, password):\n self.pass_hash = generate_password_hash(password)\n\n def check_pass(self, password):\n return check_password_hash(self.pass_hash, password)\n\n\nclass Post(db.Model):\n post_id = db.Column(db.Integer, primary_key=True, nullable=False)\n title = db.Column(db.String(50))\n body = db.Column(db.String(200))\n timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)\n user_id = db.relationship(db.Integer, db.ForeignKey('User.user_id'))\n\n def __repr__(self):\n return '<Post: {} authoured by {}>'.format(self.title, self.user_id)\n\n\nclass Following(db.Model):\n follower_id = db.Column(db.Integer, primary_key=True, nullable=False)\n following_id = db.Column(db.Integer, primary_key=True, nullable=False)\n",
"step-5": "from werkzeug.security import check_password_hash, generate_password_hash\nfrom datetime import datetime\nfrom app import db\nfrom app import login\nfrom flask_login import UserMixin\n\n\n@login.user_loader\ndef load_user(id):\n\treturn User.query.get(int(id))\n\n\nclass User(UserMixin, db.Model):\n\tuser_id = db.Column(db.Integer, primary_key=True, nullable=False)\n\tusername = db.Column(db.String(50), unique=True, nullable=False)\n\temail = db.Column(db.String(100))\n\tpass_hash = db.Column(db.String(128))\n\tposts = db.relationship('Post', backref='user.id', lazy='dynamic')\n\n\tdef __repr__(self):\n\t\treturn \"<User {}>\".format(self.username)\n\n\tdef set_pass(self, password):\n\t\tself.pass_hash = generate_password_hash(password)\n\n\tdef check_pass(self, password):\n\t\treturn check_password_hash(self.pass_hash, password)\n\n\nclass Post(db.Model):\n\tpost_id = db.Column(db.Integer, primary_key=True, nullable=False)\n\ttitle = db.Column(db.String(50))\n\tbody = db.Column(db.String(200))\n\ttimestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)\n\tuser_id = db.relationship(db.Integer, db.ForeignKey('User.user_id'))\n\n\tdef __repr__(self):\n\t\treturn \"<Post: {} authoured by {}>\".format(self.title, self.user_id)\n\n\nclass Following(db.Model):\n\tfollower_id = db.Column(db.Integer, primary_key=True, nullable=False)\n\tfollowing_id = db.Column(db.Integer, primary_key=True, nullable=False)\n",
"step-ids": [
5,
8,
10,
11,
13
]
}
|
[
5,
8,
10,
11,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if len(sys.argv) <= 3:
print('Not enough args usage: anova.py <*.csv> <rv1,rv2> <target to beat>')
print('ex: best-mean.py testdata.csv nicdrop 95000')
print('<rv> is response variable')
exit()
<|reserved_special_token_0|>
print('Re-run factor means')
print(response_var.groupby('code')[rv[1]].mean())
print('Lowest observed sample mean (target to beat)')
print(response_var.groupby('code')[rv[1]].mean().min())
<|reserved_special_token_0|>
for y in candidiate_factors_index:
if improved_factors_bools[i]:
all = all + y + ','
i = i + 1
print('Effects')
if len(all) == 0:
print('NONE')
exit()
print(all.rstrip(','))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if len(sys.argv) <= 3:
print('Not enough args usage: anova.py <*.csv> <rv1,rv2> <target to beat>')
print('ex: best-mean.py testdata.csv nicdrop 95000')
print('<rv> is response variable')
exit()
target_to_beat = int(sys.argv[3])
rv = sys.argv[2].split(',')
data = pd.read_csv(sys.argv[1], header=[0, 1])
response_var = data[[rv[0], 'factors']]
response_var.columns = response_var.columns.get_level_values(1)
print('Re-run factor means')
print(response_var.groupby('code')[rv[1]].mean())
print('Lowest observed sample mean (target to beat)')
print(response_var.groupby('code')[rv[1]].mean().min())
candidiate_factors_index = response_var.groupby('code')[rv[1]].mean(
).index.array.to_numpy()
improved_factors_bools = (response_var.groupby('code')[rv[1]].mean() <
target_to_beat).to_numpy()
all = ''
i = 0
for y in candidiate_factors_index:
if improved_factors_bools[i]:
all = all + y + ','
i = i + 1
print('Effects')
if len(all) == 0:
print('NONE')
exit()
print(all.rstrip(','))
<|reserved_special_token_1|>
import pandas as pd
import numpy as np
import sys
if len(sys.argv) <= 3:
print('Not enough args usage: anova.py <*.csv> <rv1,rv2> <target to beat>')
print('ex: best-mean.py testdata.csv nicdrop 95000')
print('<rv> is response variable')
exit()
target_to_beat = int(sys.argv[3])
rv = sys.argv[2].split(',')
data = pd.read_csv(sys.argv[1], header=[0, 1])
response_var = data[[rv[0], 'factors']]
response_var.columns = response_var.columns.get_level_values(1)
print('Re-run factor means')
print(response_var.groupby('code')[rv[1]].mean())
print('Lowest observed sample mean (target to beat)')
print(response_var.groupby('code')[rv[1]].mean().min())
candidiate_factors_index = response_var.groupby('code')[rv[1]].mean(
).index.array.to_numpy()
improved_factors_bools = (response_var.groupby('code')[rv[1]].mean() <
target_to_beat).to_numpy()
all = ''
i = 0
for y in candidiate_factors_index:
if improved_factors_bools[i]:
all = all + y + ','
i = i + 1
print('Effects')
if len(all) == 0:
print('NONE')
exit()
print(all.rstrip(','))
<|reserved_special_token_1|>
import pandas as pd
import numpy as np
import sys
#Best Mean Test
if len(sys.argv) <= 3:
print("Not enough args usage: anova.py <*.csv> <rv1,rv2> <target to beat>")
print("ex: best-mean.py testdata.csv nicdrop 95000")
print("<rv> is response variable")
exit()
target_to_beat = int(sys.argv[3]) #factors
rv = sys.argv[2].split(',')
data = pd.read_csv(sys.argv[1], header=[0,1])
response_var = data[[rv[0],'factors']]
response_var.columns = response_var.columns.get_level_values(1)
print("Re-run factor means")
print(response_var.groupby('code')[rv[1]].mean())
print("Lowest observed sample mean (target to beat)")
print(response_var.groupby('code')[rv[1]].mean().min())
#print factors still remaining as viable
candidiate_factors_index = response_var.groupby('code')[rv[1]].mean().index.array.to_numpy() #all factors from csv
improved_factors_bools = (response_var.groupby('code')[rv[1]].mean() < target_to_beat).to_numpy() #boolean series
all = ""
i=0
for y in candidiate_factors_index:
if improved_factors_bools[i]:
all = all + y + ","
i=i+1
print("Effects")
if len(all) == 0:
print("NONE")
exit()
print(all.rstrip(','))
|
flexible
|
{
"blob_id": "b9e78629fe094d933fdc0ffa2f9d9d1880e78c12",
"index": 9078,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(sys.argv) <= 3:\n print('Not enough args usage: anova.py <*.csv> <rv1,rv2> <target to beat>')\n print('ex: best-mean.py testdata.csv nicdrop 95000')\n print('<rv> is response variable')\n exit()\n<mask token>\nprint('Re-run factor means')\nprint(response_var.groupby('code')[rv[1]].mean())\nprint('Lowest observed sample mean (target to beat)')\nprint(response_var.groupby('code')[rv[1]].mean().min())\n<mask token>\nfor y in candidiate_factors_index:\n if improved_factors_bools[i]:\n all = all + y + ','\n i = i + 1\nprint('Effects')\nif len(all) == 0:\n print('NONE')\n exit()\nprint(all.rstrip(','))\n",
"step-3": "<mask token>\nif len(sys.argv) <= 3:\n print('Not enough args usage: anova.py <*.csv> <rv1,rv2> <target to beat>')\n print('ex: best-mean.py testdata.csv nicdrop 95000')\n print('<rv> is response variable')\n exit()\ntarget_to_beat = int(sys.argv[3])\nrv = sys.argv[2].split(',')\ndata = pd.read_csv(sys.argv[1], header=[0, 1])\nresponse_var = data[[rv[0], 'factors']]\nresponse_var.columns = response_var.columns.get_level_values(1)\nprint('Re-run factor means')\nprint(response_var.groupby('code')[rv[1]].mean())\nprint('Lowest observed sample mean (target to beat)')\nprint(response_var.groupby('code')[rv[1]].mean().min())\ncandidiate_factors_index = response_var.groupby('code')[rv[1]].mean(\n ).index.array.to_numpy()\nimproved_factors_bools = (response_var.groupby('code')[rv[1]].mean() <\n target_to_beat).to_numpy()\nall = ''\ni = 0\nfor y in candidiate_factors_index:\n if improved_factors_bools[i]:\n all = all + y + ','\n i = i + 1\nprint('Effects')\nif len(all) == 0:\n print('NONE')\n exit()\nprint(all.rstrip(','))\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport sys\nif len(sys.argv) <= 3:\n print('Not enough args usage: anova.py <*.csv> <rv1,rv2> <target to beat>')\n print('ex: best-mean.py testdata.csv nicdrop 95000')\n print('<rv> is response variable')\n exit()\ntarget_to_beat = int(sys.argv[3])\nrv = sys.argv[2].split(',')\ndata = pd.read_csv(sys.argv[1], header=[0, 1])\nresponse_var = data[[rv[0], 'factors']]\nresponse_var.columns = response_var.columns.get_level_values(1)\nprint('Re-run factor means')\nprint(response_var.groupby('code')[rv[1]].mean())\nprint('Lowest observed sample mean (target to beat)')\nprint(response_var.groupby('code')[rv[1]].mean().min())\ncandidiate_factors_index = response_var.groupby('code')[rv[1]].mean(\n ).index.array.to_numpy()\nimproved_factors_bools = (response_var.groupby('code')[rv[1]].mean() <\n target_to_beat).to_numpy()\nall = ''\ni = 0\nfor y in candidiate_factors_index:\n if improved_factors_bools[i]:\n all = all + y + ','\n i = i + 1\nprint('Effects')\nif len(all) == 0:\n print('NONE')\n exit()\nprint(all.rstrip(','))\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport sys\n\n#Best Mean Test\nif len(sys.argv) <= 3:\n\tprint(\"Not enough args usage: anova.py <*.csv> <rv1,rv2> <target to beat>\")\n\tprint(\"ex: best-mean.py testdata.csv nicdrop 95000\")\n\tprint(\"<rv> is response variable\")\n\texit()\n\ntarget_to_beat = int(sys.argv[3]) #factors\nrv = sys.argv[2].split(',')\n\ndata = pd.read_csv(sys.argv[1], header=[0,1])\nresponse_var = data[[rv[0],'factors']]\nresponse_var.columns = response_var.columns.get_level_values(1)\n\nprint(\"Re-run factor means\")\nprint(response_var.groupby('code')[rv[1]].mean())\n\nprint(\"Lowest observed sample mean (target to beat)\")\nprint(response_var.groupby('code')[rv[1]].mean().min())\n\n#print factors still remaining as viable\ncandidiate_factors_index = response_var.groupby('code')[rv[1]].mean().index.array.to_numpy() #all factors from csv\nimproved_factors_bools = (response_var.groupby('code')[rv[1]].mean() < target_to_beat).to_numpy() #boolean series\nall = \"\"\ni=0\nfor y in candidiate_factors_index:\n\tif improved_factors_bools[i]:\n\t\tall = all + y + \",\"\n\ti=i+1\nprint(\"Effects\")\nif len(all) == 0:\n\tprint(\"NONE\")\n\texit()\nprint(all.rstrip(','))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
seenum = dict()
for idx, val in enumerate(nums):
if target - val in seenum:
return [seenum[target - val], idx]
seenum[val] = idx
return [-1, -1]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
seenum = dict()
for idx, val in enumerate(nums):
if target - val in seenum:
return [seenum[target - val], idx]
seenum[val] = idx
return [-1, -1]
if __name__ == '__main__':
nums = [2, 7, 11, 15]
target = 9
sol = Solution()
print(sol.twoSum(nums, target))
<|reserved_special_token_1|>
'''
O(n) time complexity
O(n) space complexity
'''
class Solution:
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
seenum = dict()
for idx, val in enumerate(nums):
if target - val in seenum:
return [seenum[target-val], idx]
seenum[val] = idx
return [-1, -1]
if __name__ == "__main__":
nums = [2,7,11,15]
target = 9
sol = Solution()
print(sol.twoSum(nums, target))
|
flexible
|
{
"blob_id": "b3f62c331ff4ae9f909fc90cc7303997b32daceb",
"index": 1876,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n seenum = dict()\n for idx, val in enumerate(nums):\n if target - val in seenum:\n return [seenum[target - val], idx]\n seenum[val] = idx\n return [-1, -1]\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Solution:\n\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n seenum = dict()\n for idx, val in enumerate(nums):\n if target - val in seenum:\n return [seenum[target - val], idx]\n seenum[val] = idx\n return [-1, -1]\n\n\nif __name__ == '__main__':\n nums = [2, 7, 11, 15]\n target = 9\n sol = Solution()\n print(sol.twoSum(nums, target))\n",
"step-5": "'''\nO(n) time complexity\nO(n) space complexity\n'''\n\nclass Solution:\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n seenum = dict()\n for idx, val in enumerate(nums):\n if target - val in seenum:\n return [seenum[target-val], idx]\n seenum[val] = idx\n return [-1, -1]\n\nif __name__ == \"__main__\":\n nums = [2,7,11,15]\n target = 9\n sol = Solution()\n print(sol.twoSum(nums, target))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print("We're going to speak anything you type in a different accent")
<|reserved_special_token_0|>
print(language_code)
<|reserved_special_token_0|>
myobj.save('texty.mp3')
os.system('mpg321 texty.mp3')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
language_code = """
Language Code
-------- ----
Afrikaans af
Albanian sq
Arabic ar
Belarusian be
Bulgarian bg
Catalan ca
Chinese Simplified zh-CN
Chinese Traditional zh-TW
Croatian hr
Czech cs
Danish da
Dutch nl
English en
Estonian et
Filipino tl
Finnish fi
French fr
Galician gl
German de
Greek el
Hebrew iw
Hindi hi
Hungarian hu
Icelandic is
Indonesian id
Irish ga
Italian it
Japanese ja
Korean ko
Latvian lv
Lithuanian lt
Macedonian mk
Malay ms
Maltese mt
Norwegian no
Persian fa
Polish pl
Portuguese pt
Romanian ro
Russian ru
Serbian sr
Slovak sk
Slovenian sl
Spanish es
Swahili sw
Swedish sv
Thai th
Turkish tr
Ukrainian uk
Vietnamese vi
Welsh cy
Yiddish yi
"""
print("We're going to speak anything you type in a different accent")
mytext = input('Please enter some text: ')
print(language_code)
language = input('Please select the accent: ')
myobj = gTTS(text=mytext, lang=language, slow=True)
myobj.save('texty.mp3')
os.system('mpg321 texty.mp3')
<|reserved_special_token_1|>
from gtts import gTTS
import os
language_code = """
Language Code
-------- ----
Afrikaans af
Albanian sq
Arabic ar
Belarusian be
Bulgarian bg
Catalan ca
Chinese Simplified zh-CN
Chinese Traditional zh-TW
Croatian hr
Czech cs
Danish da
Dutch nl
English en
Estonian et
Filipino tl
Finnish fi
French fr
Galician gl
German de
Greek el
Hebrew iw
Hindi hi
Hungarian hu
Icelandic is
Indonesian id
Irish ga
Italian it
Japanese ja
Korean ko
Latvian lv
Lithuanian lt
Macedonian mk
Malay ms
Maltese mt
Norwegian no
Persian fa
Polish pl
Portuguese pt
Romanian ro
Russian ru
Serbian sr
Slovak sk
Slovenian sl
Spanish es
Swahili sw
Swedish sv
Thai th
Turkish tr
Ukrainian uk
Vietnamese vi
Welsh cy
Yiddish yi
"""
print("We're going to speak anything you type in a different accent")
mytext = input('Please enter some text: ')
print(language_code)
language = input('Please select the accent: ')
myobj = gTTS(text=mytext, lang=language, slow=True)
myobj.save('texty.mp3')
os.system('mpg321 texty.mp3')
<|reserved_special_token_1|>
# Converts text to speech in different accents. Requires pip3 install gTTS
from gtts import gTTS
import os
language_code = """
Language Code
-------- ----
Afrikaans af
Albanian sq
Arabic ar
Belarusian be
Bulgarian bg
Catalan ca
Chinese Simplified zh-CN
Chinese Traditional zh-TW
Croatian hr
Czech cs
Danish da
Dutch nl
English en
Estonian et
Filipino tl
Finnish fi
French fr
Galician gl
German de
Greek el
Hebrew iw
Hindi hi
Hungarian hu
Icelandic is
Indonesian id
Irish ga
Italian it
Japanese ja
Korean ko
Latvian lv
Lithuanian lt
Macedonian mk
Malay ms
Maltese mt
Norwegian no
Persian fa
Polish pl
Portuguese pt
Romanian ro
Russian ru
Serbian sr
Slovak sk
Slovenian sl
Spanish es
Swahili sw
Swedish sv
Thai th
Turkish tr
Ukrainian uk
Vietnamese vi
Welsh cy
Yiddish yi
"""
print("We're going to speak anything you type in a different accent")
mytext = input("Please enter some text: ")
print(language_code)
language = input("Please select the accent: ")
# Passing the text and language to the engine
myobj = gTTS(text=mytext, lang=language, slow=True)
# Saving the converted audio in a mp3 file named texty
myobj.save("texty.mp3")
# It does create the file but doesnt play.
# Also, I wanted it to actually translate to a different language, but all it does is say it in a different accent!
os.system("mpg321 texty.mp3")
|
flexible
|
{
"blob_id": "545053bc2b7c8687622d747673f2ad37b978014c",
"index": 3403,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\"We're going to speak anything you type in a different accent\")\n<mask token>\nprint(language_code)\n<mask token>\nmyobj.save('texty.mp3')\nos.system('mpg321 texty.mp3')\n",
"step-3": "<mask token>\nlanguage_code = \"\"\"\nLanguage Code\n-------- ----\nAfrikaans af\nAlbanian sq\nArabic ar\nBelarusian be\nBulgarian bg\nCatalan ca\nChinese Simplified zh-CN\nChinese Traditional zh-TW\nCroatian hr\nCzech cs\nDanish da\nDutch nl\nEnglish en\nEstonian et\nFilipino tl\nFinnish fi\nFrench fr\nGalician gl\nGerman de\nGreek el\nHebrew iw\nHindi hi\nHungarian hu\nIcelandic is\nIndonesian id\nIrish ga\nItalian it\nJapanese ja\nKorean ko\nLatvian lv\nLithuanian lt\nMacedonian mk\nMalay ms\nMaltese mt\nNorwegian no\nPersian fa\nPolish pl\nPortuguese pt\nRomanian ro\nRussian ru\nSerbian sr\nSlovak sk\nSlovenian sl\nSpanish es\nSwahili sw\nSwedish sv\nThai th\nTurkish tr\nUkrainian uk\nVietnamese vi\nWelsh cy\nYiddish yi\n\"\"\"\nprint(\"We're going to speak anything you type in a different accent\")\nmytext = input('Please enter some text: ')\nprint(language_code)\nlanguage = input('Please select the accent: ')\nmyobj = gTTS(text=mytext, lang=language, slow=True)\nmyobj.save('texty.mp3')\nos.system('mpg321 texty.mp3')\n",
"step-4": "from gtts import gTTS\nimport os\nlanguage_code = \"\"\"\nLanguage Code\n-------- ----\nAfrikaans af\nAlbanian sq\nArabic ar\nBelarusian be\nBulgarian bg\nCatalan ca\nChinese Simplified zh-CN\nChinese Traditional zh-TW\nCroatian hr\nCzech cs\nDanish da\nDutch nl\nEnglish en\nEstonian et\nFilipino tl\nFinnish fi\nFrench fr\nGalician gl\nGerman de\nGreek el\nHebrew iw\nHindi hi\nHungarian hu\nIcelandic is\nIndonesian id\nIrish ga\nItalian it\nJapanese ja\nKorean ko\nLatvian lv\nLithuanian lt\nMacedonian mk\nMalay ms\nMaltese mt\nNorwegian no\nPersian fa\nPolish pl\nPortuguese pt\nRomanian ro\nRussian ru\nSerbian sr\nSlovak sk\nSlovenian sl\nSpanish es\nSwahili sw\nSwedish sv\nThai th\nTurkish tr\nUkrainian uk\nVietnamese vi\nWelsh cy\nYiddish yi\n\"\"\"\nprint(\"We're going to speak anything you type in a different accent\")\nmytext = input('Please enter some text: ')\nprint(language_code)\nlanguage = input('Please select the accent: ')\nmyobj = gTTS(text=mytext, lang=language, slow=True)\nmyobj.save('texty.mp3')\nos.system('mpg321 texty.mp3')\n",
"step-5": "# Converts text to speech in different accents. Requires pip3 install gTTS\nfrom gtts import gTTS\nimport os\n\nlanguage_code = \"\"\"\nLanguage Code\n-------- ----\nAfrikaans af\nAlbanian sq\nArabic ar\nBelarusian be\nBulgarian bg\nCatalan ca\nChinese Simplified zh-CN\nChinese Traditional zh-TW\nCroatian hr\nCzech cs\nDanish da\nDutch nl\nEnglish en\nEstonian et\nFilipino tl\nFinnish fi\nFrench fr\nGalician gl\nGerman de\nGreek el\nHebrew iw\nHindi hi\nHungarian hu\nIcelandic is\nIndonesian id\nIrish ga\nItalian it\nJapanese ja\nKorean ko\nLatvian lv\nLithuanian lt\nMacedonian mk\nMalay ms\nMaltese mt\nNorwegian no\nPersian fa\nPolish pl\nPortuguese pt\nRomanian ro\nRussian ru\nSerbian sr\nSlovak sk\nSlovenian sl\nSpanish es\nSwahili sw\nSwedish sv\nThai th\nTurkish tr\nUkrainian uk\nVietnamese vi\nWelsh cy\nYiddish yi\n\"\"\"\n\nprint(\"We're going to speak anything you type in a different accent\")\nmytext = input(\"Please enter some text: \")\nprint(language_code)\nlanguage = input(\"Please select the accent: \")\n\n# Passing the text and language to the engine\nmyobj = gTTS(text=mytext, lang=language, slow=True)\n\n# Saving the converted audio in a mp3 file named texty\nmyobj.save(\"texty.mp3\")\n\n# It does create the file but doesnt play. \n# Also, I wanted it to actually translate to a different language, but all it does is say it in a different accent!\nos.system(\"mpg321 texty.mp3\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
print('I am main!')
else:
print(__name__)
for i in range(0, 6):
print(i)
<|reserved_special_token_0|>
print(mylist)
<|reserved_special_token_0|>
while value not in range(0, 6):
try:
value = int(input('Enter #test runs [0-5]:'))
except ValueError:
print('Invalid value entered, retry')
print('Final value entered {}'.format(value))
dir(sys)
print('done!')
for i in mylist:
utilities.myfct(i, 'hi')
utilities.myfct1(i)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
print('I am main!')
else:
print(__name__)
for i in range(0, 6):
print(i)
mylist = [12, 13, 14, 13, 12]
print(mylist)
value = 3
while value not in range(0, 6):
try:
value = int(input('Enter #test runs [0-5]:'))
except ValueError:
print('Invalid value entered, retry')
print('Final value entered {}'.format(value))
dir(sys)
print('done!')
for i in mylist:
utilities.myfct(i, 'hi')
utilities.myfct1(i)
<|reserved_special_token_1|>
import utilities
import sys
if __name__ == '__main__':
print('I am main!')
else:
print(__name__)
for i in range(0, 6):
print(i)
mylist = [12, 13, 14, 13, 12]
print(mylist)
value = 3
while value not in range(0, 6):
try:
value = int(input('Enter #test runs [0-5]:'))
except ValueError:
print('Invalid value entered, retry')
print('Final value entered {}'.format(value))
dir(sys)
print('done!')
for i in mylist:
utilities.myfct(i, 'hi')
utilities.myfct1(i)
<|reserved_special_token_1|>
import utilities
import sys
if __name__ == "__main__":
print('I am main!')
else:
print(__name__)
for i in range(0,6):
print(i)
mylist = [12, 13, 14, 13, 12]
print(mylist)
#Enter iterations to run [0-5]
#value = -1
value = 3
while (value not in range(0,6)):
try:
value = int(input('Enter #test runs [0-5]:'))
except ValueError:
print('Invalid value entered, retry')
print('Final value entered {}'.format(value))
dir(sys)
print('done!')
for i in mylist:
utilities.myfct(i, 'hi')
utilities.myfct1(i)
|
flexible
|
{
"blob_id": "f218f47acfb078877645de26c64e57f92dbcd953",
"index": 8003,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n print('I am main!')\nelse:\n print(__name__)\nfor i in range(0, 6):\n print(i)\n<mask token>\nprint(mylist)\n<mask token>\nwhile value not in range(0, 6):\n try:\n value = int(input('Enter #test runs [0-5]:'))\n except ValueError:\n print('Invalid value entered, retry')\nprint('Final value entered {}'.format(value))\ndir(sys)\nprint('done!')\nfor i in mylist:\n utilities.myfct(i, 'hi')\n utilities.myfct1(i)\n",
"step-3": "<mask token>\nif __name__ == '__main__':\n print('I am main!')\nelse:\n print(__name__)\nfor i in range(0, 6):\n print(i)\nmylist = [12, 13, 14, 13, 12]\nprint(mylist)\nvalue = 3\nwhile value not in range(0, 6):\n try:\n value = int(input('Enter #test runs [0-5]:'))\n except ValueError:\n print('Invalid value entered, retry')\nprint('Final value entered {}'.format(value))\ndir(sys)\nprint('done!')\nfor i in mylist:\n utilities.myfct(i, 'hi')\n utilities.myfct1(i)\n",
"step-4": "import utilities\nimport sys\nif __name__ == '__main__':\n print('I am main!')\nelse:\n print(__name__)\nfor i in range(0, 6):\n print(i)\nmylist = [12, 13, 14, 13, 12]\nprint(mylist)\nvalue = 3\nwhile value not in range(0, 6):\n try:\n value = int(input('Enter #test runs [0-5]:'))\n except ValueError:\n print('Invalid value entered, retry')\nprint('Final value entered {}'.format(value))\ndir(sys)\nprint('done!')\nfor i in mylist:\n utilities.myfct(i, 'hi')\n utilities.myfct1(i)\n",
"step-5": "import utilities\r\nimport sys\r\n\r\nif __name__ == \"__main__\":\r\n print('I am main!')\r\nelse:\r\n print(__name__)\r\n\r\nfor i in range(0,6):\r\n print(i)\r\n \r\nmylist = [12, 13, 14, 13, 12]\r\nprint(mylist)\r\n\r\n#Enter iterations to run [0-5]\r\n#value = -1\r\nvalue = 3\r\nwhile (value not in range(0,6)):\r\n try:\r\n value = int(input('Enter #test runs [0-5]:'))\r\n except ValueError:\r\n print('Invalid value entered, retry')\r\nprint('Final value entered {}'.format(value))\r\n\r\ndir(sys)\r\nprint('done!')\r\nfor i in mylist:\r\n utilities.myfct(i, 'hi')\r\n utilities.myfct1(i)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def parse_cr(cr):
binary = cr.value
string = binary.decode('utf-8')
return string.split(',')
def get_title(cr):
get = parse_cr(cr)[2]
head = get[5:9]
if head == 'data':
trunc = get[12:]
return trunc.split('/')[0]
else:
trunc = get[10:]
return trunc.split('=')[0]
<|reserved_special_token_0|>
def gather_popularity():
first = None
popularity = dict()
consumer = KafkaConsumer('movielog', bootstrap_servers=[
'localhost:9092'], auto_offset_reset='earliest', group_id=
'jcerwin-stream', enable_auto_commit=True, auto_commit_interval_ms=1000
)
duration = 0
max_duration = 500000000
for message in consumer:
if duration > max_duration:
break
else:
duration += 1
if duration % (max_duration / 100) == 0:
print(duration / (max_duration / 100), '% complete')
if first is None:
first = message
elif message == first:
print('repeat')
break
parsed = parse_cr(message)
r_block = parsed[2]
head = r_block[5:9]
if head == 'data':
trunc = r_block[12:]
title = trunc.split('/')[0]
minutes = r_block.split('/')[4][:-4]
else:
continue
if int(minutes) == 0:
date = parsed[0][5:10]
if title in popularity:
count = popularity[title]
popularity[title] = count + 1
else:
popularity[title] = 1
dates.add(date)
return popularity
def gather_titles():
consumer = KafkaConsumer('movielog', bootstrap_servers=[
'localhost:9092'], auto_offset_reset='earliest', group_id=
'jcerwin-new', enable_auto_commit=True, auto_commit_interval_ms=1000)
f = open('movie_titles.txt', 'r')
fl = f.readlines()
f.close()
s = set(fl)
i = len(s)
f = open('movie_titles.txt', 'a')
for message in consumer:
if i > 27000:
break
title = get_title(message) + '\n'
if title in s:
continue
else:
s.add(title)
f.write(title)
i = i + 1
f.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parse_cr(cr):
binary = cr.value
string = binary.decode('utf-8')
return string.split(',')
def get_title(cr):
get = parse_cr(cr)[2]
head = get[5:9]
if head == 'data':
trunc = get[12:]
return trunc.split('/')[0]
else:
trunc = get[10:]
return trunc.split('=')[0]
<|reserved_special_token_0|>
def gather_popularity():
first = None
popularity = dict()
consumer = KafkaConsumer('movielog', bootstrap_servers=[
'localhost:9092'], auto_offset_reset='earliest', group_id=
'jcerwin-stream', enable_auto_commit=True, auto_commit_interval_ms=1000
)
duration = 0
max_duration = 500000000
for message in consumer:
if duration > max_duration:
break
else:
duration += 1
if duration % (max_duration / 100) == 0:
print(duration / (max_duration / 100), '% complete')
if first is None:
first = message
elif message == first:
print('repeat')
break
parsed = parse_cr(message)
r_block = parsed[2]
head = r_block[5:9]
if head == 'data':
trunc = r_block[12:]
title = trunc.split('/')[0]
minutes = r_block.split('/')[4][:-4]
else:
continue
if int(minutes) == 0:
date = parsed[0][5:10]
if title in popularity:
count = popularity[title]
popularity[title] = count + 1
else:
popularity[title] = 1
dates.add(date)
return popularity
def gather_titles():
consumer = KafkaConsumer('movielog', bootstrap_servers=[
'localhost:9092'], auto_offset_reset='earliest', group_id=
'jcerwin-new', enable_auto_commit=True, auto_commit_interval_ms=1000)
f = open('movie_titles.txt', 'r')
fl = f.readlines()
f.close()
s = set(fl)
i = len(s)
f = open('movie_titles.txt', 'a')
for message in consumer:
if i > 27000:
break
title = get_title(message) + '\n'
if title in s:
continue
else:
s.add(title)
f.write(title)
i = i + 1
f.close()
<|reserved_special_token_0|>
with open('views3.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
for key, value in results.items():
writer.writerow([key, value / num_days])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
users = set()
def parse_cr(cr):
binary = cr.value
string = binary.decode('utf-8')
return string.split(',')
def get_title(cr):
get = parse_cr(cr)[2]
head = get[5:9]
if head == 'data':
trunc = get[12:]
return trunc.split('/')[0]
else:
trunc = get[10:]
return trunc.split('=')[0]
dates = set()
def gather_popularity():
first = None
popularity = dict()
consumer = KafkaConsumer('movielog', bootstrap_servers=[
'localhost:9092'], auto_offset_reset='earliest', group_id=
'jcerwin-stream', enable_auto_commit=True, auto_commit_interval_ms=1000
)
duration = 0
max_duration = 500000000
for message in consumer:
if duration > max_duration:
break
else:
duration += 1
if duration % (max_duration / 100) == 0:
print(duration / (max_duration / 100), '% complete')
if first is None:
first = message
elif message == first:
print('repeat')
break
parsed = parse_cr(message)
r_block = parsed[2]
head = r_block[5:9]
if head == 'data':
trunc = r_block[12:]
title = trunc.split('/')[0]
minutes = r_block.split('/')[4][:-4]
else:
continue
if int(minutes) == 0:
date = parsed[0][5:10]
if title in popularity:
count = popularity[title]
popularity[title] = count + 1
else:
popularity[title] = 1
dates.add(date)
return popularity
def gather_titles():
consumer = KafkaConsumer('movielog', bootstrap_servers=[
'localhost:9092'], auto_offset_reset='earliest', group_id=
'jcerwin-new', enable_auto_commit=True, auto_commit_interval_ms=1000)
f = open('movie_titles.txt', 'r')
fl = f.readlines()
f.close()
s = set(fl)
i = len(s)
f = open('movie_titles.txt', 'a')
for message in consumer:
if i > 27000:
break
title = get_title(message) + '\n'
if title in s:
continue
else:
s.add(title)
f.write(title)
i = i + 1
f.close()
results = gather_popularity()
num_days = len(dates)
with open('views3.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
for key, value in results.items():
writer.writerow([key, value / num_days])
<|reserved_special_token_1|>
from kafka import KafkaConsumer
import csv
users = set()
def parse_cr(cr):
binary = cr.value
string = binary.decode('utf-8')
return string.split(',')
def get_title(cr):
get = parse_cr(cr)[2]
head = get[5:9]
if head == 'data':
trunc = get[12:]
return trunc.split('/')[0]
else:
trunc = get[10:]
return trunc.split('=')[0]
dates = set()
def gather_popularity():
first = None
popularity = dict()
consumer = KafkaConsumer('movielog', bootstrap_servers=[
'localhost:9092'], auto_offset_reset='earliest', group_id=
'jcerwin-stream', enable_auto_commit=True, auto_commit_interval_ms=1000
)
duration = 0
max_duration = 500000000
for message in consumer:
if duration > max_duration:
break
else:
duration += 1
if duration % (max_duration / 100) == 0:
print(duration / (max_duration / 100), '% complete')
if first is None:
first = message
elif message == first:
print('repeat')
break
parsed = parse_cr(message)
r_block = parsed[2]
head = r_block[5:9]
if head == 'data':
trunc = r_block[12:]
title = trunc.split('/')[0]
minutes = r_block.split('/')[4][:-4]
else:
continue
if int(minutes) == 0:
date = parsed[0][5:10]
if title in popularity:
count = popularity[title]
popularity[title] = count + 1
else:
popularity[title] = 1
dates.add(date)
return popularity
def gather_titles():
consumer = KafkaConsumer('movielog', bootstrap_servers=[
'localhost:9092'], auto_offset_reset='earliest', group_id=
'jcerwin-new', enable_auto_commit=True, auto_commit_interval_ms=1000)
f = open('movie_titles.txt', 'r')
fl = f.readlines()
f.close()
s = set(fl)
i = len(s)
f = open('movie_titles.txt', 'a')
for message in consumer:
if i > 27000:
break
title = get_title(message) + '\n'
if title in s:
continue
else:
s.add(title)
f.write(title)
i = i + 1
f.close()
results = gather_popularity()
num_days = len(dates)
with open('views3.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
for key, value in results.items():
writer.writerow([key, value / num_days])
<|reserved_special_token_1|>
from kafka import KafkaConsumer
import csv
users = set()
# returns string of title given a ConsumerRecord
def parse_cr(cr):
binary = cr.value
string = binary.decode('utf-8')
# [time, user id, GET request]
return string.split(',')
# returns string of title given a ConsumerRecord in name+name+year format regardless of rate or data
def get_title(cr):
get = parse_cr(cr)[2]
head = get[5:9]
if head == 'data':
trunc = get[12:]
return trunc.split('/')[0]
else:
trunc = get[10:]
return trunc.split('=')[0]
dates = set()
def gather_popularity():
first = None
popularity = dict()
consumer = KafkaConsumer(
'movielog',
bootstrap_servers=['localhost:9092'],
auto_offset_reset='earliest',
group_id='jcerwin-stream',
enable_auto_commit=True,
auto_commit_interval_ms=1000
)
duration = 0
max_duration = 500000000
for message in consumer:
if duration > max_duration: break
else: duration += 1
if duration % (max_duration / 100) == 0:
print(duration / (max_duration / 100), "% complete")
if first is None:
first = message
else:
if message == first:
print("repeat")
break
parsed = parse_cr(message)
r_block = parsed[2]
head = r_block[5:9]
# look for watches only not reviews
if head == 'data':
trunc = r_block[12:]
title = trunc.split('/')[0]
minutes = r_block.split('/')[4][:-4]
else:
continue
if int(minutes) == 0:
date = (parsed[0])[5:10]
if title in popularity:
count = popularity[title]
popularity[title] = count + 1
else:
popularity[title] = 1
dates.add(date)
return popularity
def gather_titles():
consumer = KafkaConsumer(
'movielog',
bootstrap_servers=['localhost:9092'],
auto_offset_reset='earliest',
group_id='jcerwin-new',
enable_auto_commit=True,
auto_commit_interval_ms=1000
)
f = open("movie_titles.txt", "r")
fl = f.readlines()
f.close()
s = set(fl)
i = len(s)
f = open("movie_titles.txt", "a")
for message in consumer:
if i > 27000:
break
title = get_title(message) + '\n'
if title in s:
continue
else:
s.add(title)
f.write(title)
i = i + 1
f.close()
#with open('views.csv', 'w') as csv_file:
# writer = csv.writer(csv_file)
# for key, value in gather_popularity().items():
# writer.writerow([key, value])
results = gather_popularity()
num_days = len(dates)
with open('views3.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
for key, value in results.items():
writer.writerow([key, value / num_days])
|
flexible
|
{
"blob_id": "374fbb986524f28cc86f6e579f504eeb8ddc9701",
"index": 1122,
"step-1": "<mask token>\n\n\ndef parse_cr(cr):\n binary = cr.value\n string = binary.decode('utf-8')\n return string.split(',')\n\n\ndef get_title(cr):\n get = parse_cr(cr)[2]\n head = get[5:9]\n if head == 'data':\n trunc = get[12:]\n return trunc.split('/')[0]\n else:\n trunc = get[10:]\n return trunc.split('=')[0]\n\n\n<mask token>\n\n\ndef gather_popularity():\n first = None\n popularity = dict()\n consumer = KafkaConsumer('movielog', bootstrap_servers=[\n 'localhost:9092'], auto_offset_reset='earliest', group_id=\n 'jcerwin-stream', enable_auto_commit=True, auto_commit_interval_ms=1000\n )\n duration = 0\n max_duration = 500000000\n for message in consumer:\n if duration > max_duration:\n break\n else:\n duration += 1\n if duration % (max_duration / 100) == 0:\n print(duration / (max_duration / 100), '% complete')\n if first is None:\n first = message\n elif message == first:\n print('repeat')\n break\n parsed = parse_cr(message)\n r_block = parsed[2]\n head = r_block[5:9]\n if head == 'data':\n trunc = r_block[12:]\n title = trunc.split('/')[0]\n minutes = r_block.split('/')[4][:-4]\n else:\n continue\n if int(minutes) == 0:\n date = parsed[0][5:10]\n if title in popularity:\n count = popularity[title]\n popularity[title] = count + 1\n else:\n popularity[title] = 1\n dates.add(date)\n return popularity\n\n\ndef gather_titles():\n consumer = KafkaConsumer('movielog', bootstrap_servers=[\n 'localhost:9092'], auto_offset_reset='earliest', group_id=\n 'jcerwin-new', enable_auto_commit=True, auto_commit_interval_ms=1000)\n f = open('movie_titles.txt', 'r')\n fl = f.readlines()\n f.close()\n s = set(fl)\n i = len(s)\n f = open('movie_titles.txt', 'a')\n for message in consumer:\n if i > 27000:\n break\n title = get_title(message) + '\\n'\n if title in s:\n continue\n else:\n s.add(title)\n f.write(title)\n i = i + 1\n f.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_cr(cr):\n binary = cr.value\n string = binary.decode('utf-8')\n return string.split(',')\n\n\ndef get_title(cr):\n get = parse_cr(cr)[2]\n head = get[5:9]\n if head == 'data':\n trunc = get[12:]\n return trunc.split('/')[0]\n else:\n trunc = get[10:]\n return trunc.split('=')[0]\n\n\n<mask token>\n\n\ndef gather_popularity():\n first = None\n popularity = dict()\n consumer = KafkaConsumer('movielog', bootstrap_servers=[\n 'localhost:9092'], auto_offset_reset='earliest', group_id=\n 'jcerwin-stream', enable_auto_commit=True, auto_commit_interval_ms=1000\n )\n duration = 0\n max_duration = 500000000\n for message in consumer:\n if duration > max_duration:\n break\n else:\n duration += 1\n if duration % (max_duration / 100) == 0:\n print(duration / (max_duration / 100), '% complete')\n if first is None:\n first = message\n elif message == first:\n print('repeat')\n break\n parsed = parse_cr(message)\n r_block = parsed[2]\n head = r_block[5:9]\n if head == 'data':\n trunc = r_block[12:]\n title = trunc.split('/')[0]\n minutes = r_block.split('/')[4][:-4]\n else:\n continue\n if int(minutes) == 0:\n date = parsed[0][5:10]\n if title in popularity:\n count = popularity[title]\n popularity[title] = count + 1\n else:\n popularity[title] = 1\n dates.add(date)\n return popularity\n\n\ndef gather_titles():\n consumer = KafkaConsumer('movielog', bootstrap_servers=[\n 'localhost:9092'], auto_offset_reset='earliest', group_id=\n 'jcerwin-new', enable_auto_commit=True, auto_commit_interval_ms=1000)\n f = open('movie_titles.txt', 'r')\n fl = f.readlines()\n f.close()\n s = set(fl)\n i = len(s)\n f = open('movie_titles.txt', 'a')\n for message in consumer:\n if i > 27000:\n break\n title = get_title(message) + '\\n'\n if title in s:\n continue\n else:\n s.add(title)\n f.write(title)\n i = i + 1\n f.close()\n\n\n<mask token>\nwith open('views3.csv', 'w') as csv_file:\n writer = csv.writer(csv_file)\n for key, value in results.items():\n writer.writerow([key, value / num_days])\n",
"step-3": "<mask token>\nusers = set()\n\n\ndef parse_cr(cr):\n binary = cr.value\n string = binary.decode('utf-8')\n return string.split(',')\n\n\ndef get_title(cr):\n get = parse_cr(cr)[2]\n head = get[5:9]\n if head == 'data':\n trunc = get[12:]\n return trunc.split('/')[0]\n else:\n trunc = get[10:]\n return trunc.split('=')[0]\n\n\ndates = set()\n\n\ndef gather_popularity():\n first = None\n popularity = dict()\n consumer = KafkaConsumer('movielog', bootstrap_servers=[\n 'localhost:9092'], auto_offset_reset='earliest', group_id=\n 'jcerwin-stream', enable_auto_commit=True, auto_commit_interval_ms=1000\n )\n duration = 0\n max_duration = 500000000\n for message in consumer:\n if duration > max_duration:\n break\n else:\n duration += 1\n if duration % (max_duration / 100) == 0:\n print(duration / (max_duration / 100), '% complete')\n if first is None:\n first = message\n elif message == first:\n print('repeat')\n break\n parsed = parse_cr(message)\n r_block = parsed[2]\n head = r_block[5:9]\n if head == 'data':\n trunc = r_block[12:]\n title = trunc.split('/')[0]\n minutes = r_block.split('/')[4][:-4]\n else:\n continue\n if int(minutes) == 0:\n date = parsed[0][5:10]\n if title in popularity:\n count = popularity[title]\n popularity[title] = count + 1\n else:\n popularity[title] = 1\n dates.add(date)\n return popularity\n\n\ndef gather_titles():\n consumer = KafkaConsumer('movielog', bootstrap_servers=[\n 'localhost:9092'], auto_offset_reset='earliest', group_id=\n 'jcerwin-new', enable_auto_commit=True, auto_commit_interval_ms=1000)\n f = open('movie_titles.txt', 'r')\n fl = f.readlines()\n f.close()\n s = set(fl)\n i = len(s)\n f = open('movie_titles.txt', 'a')\n for message in consumer:\n if i > 27000:\n break\n title = get_title(message) + '\\n'\n if title in s:\n continue\n else:\n s.add(title)\n f.write(title)\n i = i + 1\n f.close()\n\n\nresults = gather_popularity()\nnum_days = len(dates)\nwith open('views3.csv', 'w') as csv_file:\n writer = csv.writer(csv_file)\n for key, value in results.items():\n writer.writerow([key, value / num_days])\n",
"step-4": "from kafka import KafkaConsumer\nimport csv\nusers = set()\n\n\ndef parse_cr(cr):\n binary = cr.value\n string = binary.decode('utf-8')\n return string.split(',')\n\n\ndef get_title(cr):\n get = parse_cr(cr)[2]\n head = get[5:9]\n if head == 'data':\n trunc = get[12:]\n return trunc.split('/')[0]\n else:\n trunc = get[10:]\n return trunc.split('=')[0]\n\n\ndates = set()\n\n\ndef gather_popularity():\n first = None\n popularity = dict()\n consumer = KafkaConsumer('movielog', bootstrap_servers=[\n 'localhost:9092'], auto_offset_reset='earliest', group_id=\n 'jcerwin-stream', enable_auto_commit=True, auto_commit_interval_ms=1000\n )\n duration = 0\n max_duration = 500000000\n for message in consumer:\n if duration > max_duration:\n break\n else:\n duration += 1\n if duration % (max_duration / 100) == 0:\n print(duration / (max_duration / 100), '% complete')\n if first is None:\n first = message\n elif message == first:\n print('repeat')\n break\n parsed = parse_cr(message)\n r_block = parsed[2]\n head = r_block[5:9]\n if head == 'data':\n trunc = r_block[12:]\n title = trunc.split('/')[0]\n minutes = r_block.split('/')[4][:-4]\n else:\n continue\n if int(minutes) == 0:\n date = parsed[0][5:10]\n if title in popularity:\n count = popularity[title]\n popularity[title] = count + 1\n else:\n popularity[title] = 1\n dates.add(date)\n return popularity\n\n\ndef gather_titles():\n consumer = KafkaConsumer('movielog', bootstrap_servers=[\n 'localhost:9092'], auto_offset_reset='earliest', group_id=\n 'jcerwin-new', enable_auto_commit=True, auto_commit_interval_ms=1000)\n f = open('movie_titles.txt', 'r')\n fl = f.readlines()\n f.close()\n s = set(fl)\n i = len(s)\n f = open('movie_titles.txt', 'a')\n for message in consumer:\n if i > 27000:\n break\n title = get_title(message) + '\\n'\n if title in s:\n continue\n else:\n s.add(title)\n f.write(title)\n i = i + 1\n f.close()\n\n\nresults = gather_popularity()\nnum_days = len(dates)\nwith open('views3.csv', 'w') as csv_file:\n writer = csv.writer(csv_file)\n for key, value in results.items():\n writer.writerow([key, value / num_days])\n",
"step-5": "from kafka import KafkaConsumer\nimport csv\n\nusers = set()\n\n# returns string of title given a ConsumerRecord\ndef parse_cr(cr):\n binary = cr.value\n string = binary.decode('utf-8')\n # [time, user id, GET request]\n return string.split(',')\n\n\n# returns string of title given a ConsumerRecord in name+name+year format regardless of rate or data\ndef get_title(cr):\n get = parse_cr(cr)[2]\n head = get[5:9]\n if head == 'data':\n trunc = get[12:]\n return trunc.split('/')[0]\n else:\n trunc = get[10:]\n return trunc.split('=')[0]\n\ndates = set()\ndef gather_popularity():\n first = None\n popularity = dict()\n\n\n consumer = KafkaConsumer(\n 'movielog',\n bootstrap_servers=['localhost:9092'],\n auto_offset_reset='earliest',\n group_id='jcerwin-stream',\n enable_auto_commit=True,\n auto_commit_interval_ms=1000\n )\n duration = 0\n max_duration = 500000000\n for message in consumer:\n if duration > max_duration: break\n else: duration += 1\n\n if duration % (max_duration / 100) == 0:\n print(duration / (max_duration / 100), \"% complete\")\n\n if first is None:\n first = message\n else:\n if message == first:\n print(\"repeat\")\n break\n\n parsed = parse_cr(message)\n r_block = parsed[2]\n head = r_block[5:9]\n # look for watches only not reviews\n if head == 'data':\n trunc = r_block[12:]\n title = trunc.split('/')[0]\n\n minutes = r_block.split('/')[4][:-4]\n else:\n continue\n\n if int(minutes) == 0:\n date = (parsed[0])[5:10]\n if title in popularity:\n count = popularity[title]\n popularity[title] = count + 1\n\n else:\n popularity[title] = 1\n\n dates.add(date)\n\n\n return popularity\n\ndef gather_titles():\n consumer = KafkaConsumer(\n 'movielog',\n bootstrap_servers=['localhost:9092'],\n auto_offset_reset='earliest',\n group_id='jcerwin-new',\n enable_auto_commit=True,\n auto_commit_interval_ms=1000\n )\n\n f = open(\"movie_titles.txt\", \"r\")\n fl = f.readlines()\n f.close()\n s = set(fl)\n i = len(s)\n\n f = open(\"movie_titles.txt\", \"a\")\n for message in consumer:\n if i > 27000:\n break\n title = get_title(message) + '\\n'\n if title in s:\n continue\n else:\n s.add(title)\n f.write(title)\n i = i + 1\n\n f.close()\n\n#with open('views.csv', 'w') as csv_file:\n# writer = csv.writer(csv_file)\n# for key, value in gather_popularity().items():\n# writer.writerow([key, value])\n\n\nresults = gather_popularity()\nnum_days = len(dates)\n\nwith open('views3.csv', 'w') as csv_file:\n writer = csv.writer(csv_file)\n for key, value in results.items():\n writer.writerow([key, value / num_days])\n\n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class myPickle:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class myPickle:
def make(self, obj, fileName):
print('myPickle make file', fileName)
pickle.dump(obj, open(fileName, 'wb'))
print(' DONE')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class myPickle:
def make(self, obj, fileName):
print('myPickle make file', fileName)
pickle.dump(obj, open(fileName, 'wb'))
print(' DONE')
def load(self, fileName):
print('myPickle load file', fileName)
tr = pickle.load(open(fileName, 'rb'))
print(' DONE')
return tr
<|reserved_special_token_1|>
import pickle
class myPickle:
def make(self, obj, fileName):
print('myPickle make file', fileName)
pickle.dump(obj, open(fileName, 'wb'))
print(' DONE')
def load(self, fileName):
print('myPickle load file', fileName)
tr = pickle.load(open(fileName, 'rb'))
print(' DONE')
return tr
<|reserved_special_token_1|>
import pickle
class myPickle:
def make(self, obj,fileName):
print("myPickle make file",fileName)
pickle.dump( obj, open(fileName,'wb') )
print(" DONE")
def load(self, fileName):
print("myPickle load file",fileName)
tr = pickle.load( open(fileName,'rb') )
print(" DONE")
return tr
|
flexible
|
{
"blob_id": "e50feccd583d7e33877d5fcc377a1d79dc247d3a",
"index": 3117,
"step-1": "<mask token>\n\n\nclass myPickle:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass myPickle:\n\n def make(self, obj, fileName):\n print('myPickle make file', fileName)\n pickle.dump(obj, open(fileName, 'wb'))\n print(' DONE')\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass myPickle:\n\n def make(self, obj, fileName):\n print('myPickle make file', fileName)\n pickle.dump(obj, open(fileName, 'wb'))\n print(' DONE')\n\n def load(self, fileName):\n print('myPickle load file', fileName)\n tr = pickle.load(open(fileName, 'rb'))\n print(' DONE')\n return tr\n",
"step-4": "import pickle\n\n\nclass myPickle:\n\n def make(self, obj, fileName):\n print('myPickle make file', fileName)\n pickle.dump(obj, open(fileName, 'wb'))\n print(' DONE')\n\n def load(self, fileName):\n print('myPickle load file', fileName)\n tr = pickle.load(open(fileName, 'rb'))\n print(' DONE')\n return tr\n",
"step-5": "\nimport pickle\n\nclass myPickle:\n \n def make(self, obj,fileName):\n print(\"myPickle make file\",fileName)\n pickle.dump( obj, open(fileName,'wb') )\n print(\" DONE\")\n \n def load(self, fileName):\n print(\"myPickle load file\",fileName)\n tr = pickle.load( open(fileName,'rb') )\n print(\" DONE\")\n return tr\n ",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Trainer:
def __init__(self, model=None, opt=Config()):
self.model = model
self.opt = opt
self.criterion = opt.criterion
self.pred_id = self.opt.predictor_id
self.optimizer = opt.optimizer(self.model.parameters(), lr=opt.lr)
self.log_path = opt.LOGS_PATH
self.writer = SummaryWriter(log_dir=self.opt.LOGS_PATH + '/' + time
.strftime('%m%d_%H_%M'))
if opt.use_gpu:
torch.set_default_tensor_type(torch.cuda.FloatTensor)
def train(self, train_data, val_data=None):
print('Now Begin Training!')
train_loader = DataLoader(train_data, batch_size=self.opt.
batch_size, shuffle=True)
if self.opt.use_gpu:
self.model.cuda()
loss_meter = meter.AverageValueMeter()
abs_losses = MAvgMeter(self.pred_id)
previous_loss = 10000000000.0
for epoch in range(self.opt.max_epoch):
loss_meter.reset()
abs_losses.reset()
for i, (N, A, label) in enumerate(train_loader):
if self.opt.use_gpu:
N = N.type(torch.long).cuda()
A = A.cuda()
label = {key: value.cuda() for key, value in label.items()}
self.optimizer.zero_grad()
output = self.model(N, A, label)
loss = output['loss']
loss.backward()
self.optimizer.step()
abs_losses.add(output['visloss'])
loss_meter.add(loss.data.cpu())
if i % self.opt.print_feq == self.opt.print_feq - 1:
nither = epoch * len(train_loader) + i
print('EPOCH:{0},i:{1},loss:{2}'.format(epoch, i, loss.
data.cpu()), end=' ')
self.writer.add_scalar('train_loss', loss_meter.value()
[0], nither)
for key in self.pred_id:
self.writer.add_scalar(key, abs_losses.value(key)[0
], nither)
print(key, float(abs_losses.value(key)[0]), end=' ')
print('\n')
if val_data:
val_loss = self.test(val_data, val=True)
print('val loss:', val_loss)
self.writer.add_scalar('val_loss', val_loss, epoch)
print('!!!!!!now{0},previous{1}'.format(loss_meter.value()[0],
previous_loss))
if loss_meter.value()[0] >= previous_loss:
self.opt.lr = self.opt.lr * self.opt.lr_decay
print('!!!!!LR:', self.opt.lr)
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.opt.lr
previous_loss = loss_meter.value()[0]
def test(self, test_data, val=False):
if self.opt.use_gpu:
self.model.cuda()
self.model.eval()
test_loader = DataLoader(test_data, batch_size=self.opt.batch_size,
shuffle=True)
result = []
loss_meter = meter.AverageValueMeter()
for i, (H, A, label) in enumerate(test_loader):
if self.opt.use_gpu:
H = H.type(torch.long).cuda()
A = A.cuda()
label = {key: value.cuda() for key, value in label.items()}
loss = self.model(H, A, label)['loss']
loss_meter.add(loss.data.cpu().detach().numpy())
if val:
return loss_meter.value()[0]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Trainer:
def __init__(self, model=None, opt=Config()):
self.model = model
self.opt = opt
self.criterion = opt.criterion
self.pred_id = self.opt.predictor_id
self.optimizer = opt.optimizer(self.model.parameters(), lr=opt.lr)
self.log_path = opt.LOGS_PATH
self.writer = SummaryWriter(log_dir=self.opt.LOGS_PATH + '/' + time
.strftime('%m%d_%H_%M'))
if opt.use_gpu:
torch.set_default_tensor_type(torch.cuda.FloatTensor)
def train(self, train_data, val_data=None):
print('Now Begin Training!')
train_loader = DataLoader(train_data, batch_size=self.opt.
batch_size, shuffle=True)
if self.opt.use_gpu:
self.model.cuda()
loss_meter = meter.AverageValueMeter()
abs_losses = MAvgMeter(self.pred_id)
previous_loss = 10000000000.0
for epoch in range(self.opt.max_epoch):
loss_meter.reset()
abs_losses.reset()
for i, (N, A, label) in enumerate(train_loader):
if self.opt.use_gpu:
N = N.type(torch.long).cuda()
A = A.cuda()
label = {key: value.cuda() for key, value in label.items()}
self.optimizer.zero_grad()
output = self.model(N, A, label)
loss = output['loss']
loss.backward()
self.optimizer.step()
abs_losses.add(output['visloss'])
loss_meter.add(loss.data.cpu())
if i % self.opt.print_feq == self.opt.print_feq - 1:
nither = epoch * len(train_loader) + i
print('EPOCH:{0},i:{1},loss:{2}'.format(epoch, i, loss.
data.cpu()), end=' ')
self.writer.add_scalar('train_loss', loss_meter.value()
[0], nither)
for key in self.pred_id:
self.writer.add_scalar(key, abs_losses.value(key)[0
], nither)
print(key, float(abs_losses.value(key)[0]), end=' ')
print('\n')
if val_data:
val_loss = self.test(val_data, val=True)
print('val loss:', val_loss)
self.writer.add_scalar('val_loss', val_loss, epoch)
print('!!!!!!now{0},previous{1}'.format(loss_meter.value()[0],
previous_loss))
if loss_meter.value()[0] >= previous_loss:
self.opt.lr = self.opt.lr * self.opt.lr_decay
print('!!!!!LR:', self.opt.lr)
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.opt.lr
previous_loss = loss_meter.value()[0]
def test(self, test_data, val=False):
if self.opt.use_gpu:
self.model.cuda()
self.model.eval()
test_loader = DataLoader(test_data, batch_size=self.opt.batch_size,
shuffle=True)
result = []
loss_meter = meter.AverageValueMeter()
for i, (H, A, label) in enumerate(test_loader):
if self.opt.use_gpu:
H = H.type(torch.long).cuda()
A = A.cuda()
label = {key: value.cuda() for key, value in label.items()}
loss = self.model(H, A, label)['loss']
loss_meter.add(loss.data.cpu().detach().numpy())
if val:
return loss_meter.value()[0]
torch.set_default_tensor_type(torch.FloatTensor)
<|reserved_special_token_0|>
if load_path:
GAVAE.load_state_dict(torch.load(load_path))
<|reserved_special_token_0|>
print(GAVAE)
VAE_trainer.train(train_data, val_data=val_data)
GAVAE.save()
print('save success')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Trainer:
def __init__(self, model=None, opt=Config()):
self.model = model
self.opt = opt
self.criterion = opt.criterion
self.pred_id = self.opt.predictor_id
self.optimizer = opt.optimizer(self.model.parameters(), lr=opt.lr)
self.log_path = opt.LOGS_PATH
self.writer = SummaryWriter(log_dir=self.opt.LOGS_PATH + '/' + time
.strftime('%m%d_%H_%M'))
if opt.use_gpu:
torch.set_default_tensor_type(torch.cuda.FloatTensor)
def train(self, train_data, val_data=None):
print('Now Begin Training!')
train_loader = DataLoader(train_data, batch_size=self.opt.
batch_size, shuffle=True)
if self.opt.use_gpu:
self.model.cuda()
loss_meter = meter.AverageValueMeter()
abs_losses = MAvgMeter(self.pred_id)
previous_loss = 10000000000.0
for epoch in range(self.opt.max_epoch):
loss_meter.reset()
abs_losses.reset()
for i, (N, A, label) in enumerate(train_loader):
if self.opt.use_gpu:
N = N.type(torch.long).cuda()
A = A.cuda()
label = {key: value.cuda() for key, value in label.items()}
self.optimizer.zero_grad()
output = self.model(N, A, label)
loss = output['loss']
loss.backward()
self.optimizer.step()
abs_losses.add(output['visloss'])
loss_meter.add(loss.data.cpu())
if i % self.opt.print_feq == self.opt.print_feq - 1:
nither = epoch * len(train_loader) + i
print('EPOCH:{0},i:{1},loss:{2}'.format(epoch, i, loss.
data.cpu()), end=' ')
self.writer.add_scalar('train_loss', loss_meter.value()
[0], nither)
for key in self.pred_id:
self.writer.add_scalar(key, abs_losses.value(key)[0
], nither)
print(key, float(abs_losses.value(key)[0]), end=' ')
print('\n')
if val_data:
val_loss = self.test(val_data, val=True)
print('val loss:', val_loss)
self.writer.add_scalar('val_loss', val_loss, epoch)
print('!!!!!!now{0},previous{1}'.format(loss_meter.value()[0],
previous_loss))
if loss_meter.value()[0] >= previous_loss:
self.opt.lr = self.opt.lr * self.opt.lr_decay
print('!!!!!LR:', self.opt.lr)
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.opt.lr
previous_loss = loss_meter.value()[0]
def test(self, test_data, val=False):
if self.opt.use_gpu:
self.model.cuda()
self.model.eval()
test_loader = DataLoader(test_data, batch_size=self.opt.batch_size,
shuffle=True)
result = []
loss_meter = meter.AverageValueMeter()
for i, (H, A, label) in enumerate(test_loader):
if self.opt.use_gpu:
H = H.type(torch.long).cuda()
A = A.cuda()
label = {key: value.cuda() for key, value in label.items()}
loss = self.model(H, A, label)['loss']
loss_meter.add(loss.data.cpu().detach().numpy())
if val:
return loss_meter.value()[0]
torch.set_default_tensor_type(torch.FloatTensor)
dconfig = Config()
dconfig.optimizer = torch.optim.Adam
dconfig.lr = 0.005
dconfig.res_connection = True
dconfig.encoder_layers = 40
dconfig.node_feature_dim = 100
dconfig.batch_size = 50
zinc_path = (
'/home/jeffzhu/nips_gail/MCTs/dataset/datasets/zinc_dataset_clean.pkl')
load_path = None
GAVAE = VAE(dconfig)
if load_path:
GAVAE.load_state_dict(torch.load(load_path))
train_data, val_data, test_data = Zinc_dataset(zinc_path, 150000, 1000,
dconfig.predictor_id).Get_data()
VAE_trainer = Trainer(model=GAVAE, opt=dconfig)
print(GAVAE)
VAE_trainer.train(train_data, val_data=val_data)
GAVAE.save()
print('save success')
<|reserved_special_token_1|>
from torch.utils.data import DataLoader
from config import Config
from torchnet import meter
import numpy as np
import torch
from torch import nn
from tensorboardX import SummaryWriter
from Funcs import MAvgMeter
from vae.base_vae import VAE
from vae.data_util import Zinc_dataset
import time
import torch.optim
class Trainer:
def __init__(self, model=None, opt=Config()):
self.model = model
self.opt = opt
self.criterion = opt.criterion
self.pred_id = self.opt.predictor_id
self.optimizer = opt.optimizer(self.model.parameters(), lr=opt.lr)
self.log_path = opt.LOGS_PATH
self.writer = SummaryWriter(log_dir=self.opt.LOGS_PATH + '/' + time
.strftime('%m%d_%H_%M'))
if opt.use_gpu:
torch.set_default_tensor_type(torch.cuda.FloatTensor)
def train(self, train_data, val_data=None):
print('Now Begin Training!')
train_loader = DataLoader(train_data, batch_size=self.opt.
batch_size, shuffle=True)
if self.opt.use_gpu:
self.model.cuda()
loss_meter = meter.AverageValueMeter()
abs_losses = MAvgMeter(self.pred_id)
previous_loss = 10000000000.0
for epoch in range(self.opt.max_epoch):
loss_meter.reset()
abs_losses.reset()
for i, (N, A, label) in enumerate(train_loader):
if self.opt.use_gpu:
N = N.type(torch.long).cuda()
A = A.cuda()
label = {key: value.cuda() for key, value in label.items()}
self.optimizer.zero_grad()
output = self.model(N, A, label)
loss = output['loss']
loss.backward()
self.optimizer.step()
abs_losses.add(output['visloss'])
loss_meter.add(loss.data.cpu())
if i % self.opt.print_feq == self.opt.print_feq - 1:
nither = epoch * len(train_loader) + i
print('EPOCH:{0},i:{1},loss:{2}'.format(epoch, i, loss.
data.cpu()), end=' ')
self.writer.add_scalar('train_loss', loss_meter.value()
[0], nither)
for key in self.pred_id:
self.writer.add_scalar(key, abs_losses.value(key)[0
], nither)
print(key, float(abs_losses.value(key)[0]), end=' ')
print('\n')
if val_data:
val_loss = self.test(val_data, val=True)
print('val loss:', val_loss)
self.writer.add_scalar('val_loss', val_loss, epoch)
print('!!!!!!now{0},previous{1}'.format(loss_meter.value()[0],
previous_loss))
if loss_meter.value()[0] >= previous_loss:
self.opt.lr = self.opt.lr * self.opt.lr_decay
print('!!!!!LR:', self.opt.lr)
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.opt.lr
previous_loss = loss_meter.value()[0]
def test(self, test_data, val=False):
if self.opt.use_gpu:
self.model.cuda()
self.model.eval()
test_loader = DataLoader(test_data, batch_size=self.opt.batch_size,
shuffle=True)
result = []
loss_meter = meter.AverageValueMeter()
for i, (H, A, label) in enumerate(test_loader):
if self.opt.use_gpu:
H = H.type(torch.long).cuda()
A = A.cuda()
label = {key: value.cuda() for key, value in label.items()}
loss = self.model(H, A, label)['loss']
loss_meter.add(loss.data.cpu().detach().numpy())
if val:
return loss_meter.value()[0]
torch.set_default_tensor_type(torch.FloatTensor)
dconfig = Config()
dconfig.optimizer = torch.optim.Adam
dconfig.lr = 0.005
dconfig.res_connection = True
dconfig.encoder_layers = 40
dconfig.node_feature_dim = 100
dconfig.batch_size = 50
zinc_path = (
'/home/jeffzhu/nips_gail/MCTs/dataset/datasets/zinc_dataset_clean.pkl')
load_path = None
GAVAE = VAE(dconfig)
if load_path:
GAVAE.load_state_dict(torch.load(load_path))
train_data, val_data, test_data = Zinc_dataset(zinc_path, 150000, 1000,
dconfig.predictor_id).Get_data()
VAE_trainer = Trainer(model=GAVAE, opt=dconfig)
print(GAVAE)
VAE_trainer.train(train_data, val_data=val_data)
GAVAE.save()
print('save success')
<|reserved_special_token_1|>
from torch.utils.data import DataLoader
from config import Config
from torchnet import meter
import numpy as np
import torch
from torch import nn
from tensorboardX import SummaryWriter
from Funcs import MAvgMeter
from vae.base_vae import VAE
from vae.data_util import Zinc_dataset
import time
import torch.optim
class Trainer():
def __init__(self, model=None, opt=Config()):
self.model = model
self.opt = opt
self.criterion = opt.criterion
self.pred_id = self.opt.predictor_id
self.optimizer = opt.optimizer(self.model.parameters(), lr=opt.lr)
self.log_path = opt.LOGS_PATH
self.writer = SummaryWriter(log_dir=self.opt.LOGS_PATH + '/' + time.strftime('%m%d_%H_%M'))
if opt.use_gpu:
torch.set_default_tensor_type(torch.cuda.FloatTensor)
def train(self, train_data, val_data=None):
print('Now Begin Training!')
train_loader = DataLoader(train_data, batch_size=self.opt.batch_size, shuffle=True)
if self.opt.use_gpu:
self.model.cuda()
# meter initialize
loss_meter = meter.AverageValueMeter()
abs_losses = MAvgMeter(self.pred_id)
previous_loss = 1e10
for epoch in range(self.opt.max_epoch):
loss_meter.reset()
abs_losses.reset()
# train
for i, (N, A, label) in enumerate(train_loader):
if self.opt.use_gpu:
N = N.type(torch.long).cuda()
A = A.cuda()
label = {key: value.cuda() for key, value in label.items()}
# label = torch.unsqueeze(label, 1) # 数据预处理问题补丁
self.optimizer.zero_grad()
output = self.model(N, A, label)
loss = output['loss']
loss.backward()
self.optimizer.step()
abs_losses.add(output['visloss'])
loss_meter.add(loss.data.cpu())
# tensorboard visulize module
if i % self.opt.print_feq == self.opt.print_feq - 1:
nither = epoch * len(train_loader) + i
print('EPOCH:{0},i:{1},loss:{2}'.format(epoch, i, loss.data.cpu()), end=' ')
self.writer.add_scalar('train_loss', loss_meter.value()[0], nither)
for key in self.pred_id:
self.writer.add_scalar(key, abs_losses.value(key)[0], nither)
print(key, float(abs_losses.value(key)[0]), end=' ')
print('\n')
if val_data:
val_loss = self.test(val_data, val=True)
print('val loss:', val_loss)
self.writer.add_scalar('val_loss', val_loss, epoch)
print('!!!!!!now{0},previous{1}'.format(loss_meter.value()[0], previous_loss))
if loss_meter.value()[0] >= previous_loss:
self.opt.lr = self.opt.lr * self.opt.lr_decay
print('!!!!!LR:', self.opt.lr)
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.opt.lr
previous_loss = loss_meter.value()[0]
def test(self, test_data, val=False):
if self.opt.use_gpu:
self.model.cuda()
self.model.eval()
test_loader = DataLoader(test_data, batch_size=self.opt.batch_size, shuffle=True)
result = []
loss_meter = meter.AverageValueMeter()
for i, (H, A, label) in enumerate(test_loader):
# 数据格式转换
if self.opt.use_gpu:
H = H.type(torch.long).cuda()
A = A.cuda()
label = {key: value.cuda() for key, value in label.items()}
# label = torch.unsqueeze(label, 1) # 数据预处理问题补丁
loss = self.model(H, A, label)['loss']
loss_meter.add(loss.data.cpu().detach().numpy())
#
#
# if not val:
# result.append(score.cpu().detach().numpy())
#
#
#
# self.model.train()
if val:
return loss_meter.value()[0]
# else:
# result = np.stack(result)
# return result,loss_meter.value()[0]
# begin main training
torch.set_default_tensor_type(torch.FloatTensor)
dconfig = Config()
dconfig.optimizer = torch.optim.Adam
dconfig.lr = 5e-3
dconfig.res_connection = True
dconfig.encoder_layers = 40
dconfig.node_feature_dim = 100
dconfig.batch_size = 50
zinc_path = '/home/jeffzhu/nips_gail/MCTs/dataset/datasets/zinc_dataset_clean.pkl'
load_path = None
GAVAE = VAE(dconfig)
if load_path:
GAVAE.load_state_dict(torch.load(load_path))
train_data, val_data, test_data = Zinc_dataset(zinc_path, 150000, 1000, dconfig.predictor_id).Get_data()
VAE_trainer = Trainer(model=GAVAE, opt=dconfig)
print(GAVAE)
VAE_trainer.train(train_data, val_data=val_data)
GAVAE.save()
print('save success')
|
flexible
|
{
"blob_id": "8b7894e274647e48e3a1fe12473937bd6c62e943",
"index": 8741,
"step-1": "<mask token>\n\n\nclass Trainer:\n\n def __init__(self, model=None, opt=Config()):\n self.model = model\n self.opt = opt\n self.criterion = opt.criterion\n self.pred_id = self.opt.predictor_id\n self.optimizer = opt.optimizer(self.model.parameters(), lr=opt.lr)\n self.log_path = opt.LOGS_PATH\n self.writer = SummaryWriter(log_dir=self.opt.LOGS_PATH + '/' + time\n .strftime('%m%d_%H_%M'))\n if opt.use_gpu:\n torch.set_default_tensor_type(torch.cuda.FloatTensor)\n\n def train(self, train_data, val_data=None):\n print('Now Begin Training!')\n train_loader = DataLoader(train_data, batch_size=self.opt.\n batch_size, shuffle=True)\n if self.opt.use_gpu:\n self.model.cuda()\n loss_meter = meter.AverageValueMeter()\n abs_losses = MAvgMeter(self.pred_id)\n previous_loss = 10000000000.0\n for epoch in range(self.opt.max_epoch):\n loss_meter.reset()\n abs_losses.reset()\n for i, (N, A, label) in enumerate(train_loader):\n if self.opt.use_gpu:\n N = N.type(torch.long).cuda()\n A = A.cuda()\n label = {key: value.cuda() for key, value in label.items()}\n self.optimizer.zero_grad()\n output = self.model(N, A, label)\n loss = output['loss']\n loss.backward()\n self.optimizer.step()\n abs_losses.add(output['visloss'])\n loss_meter.add(loss.data.cpu())\n if i % self.opt.print_feq == self.opt.print_feq - 1:\n nither = epoch * len(train_loader) + i\n print('EPOCH:{0},i:{1},loss:{2}'.format(epoch, i, loss.\n data.cpu()), end=' ')\n self.writer.add_scalar('train_loss', loss_meter.value()\n [0], nither)\n for key in self.pred_id:\n self.writer.add_scalar(key, abs_losses.value(key)[0\n ], nither)\n print(key, float(abs_losses.value(key)[0]), end=' ')\n print('\\n')\n if val_data:\n val_loss = self.test(val_data, val=True)\n print('val loss:', val_loss)\n self.writer.add_scalar('val_loss', val_loss, epoch)\n print('!!!!!!now{0},previous{1}'.format(loss_meter.value()[0],\n previous_loss))\n if loss_meter.value()[0] >= previous_loss:\n self.opt.lr = self.opt.lr * self.opt.lr_decay\n print('!!!!!LR:', self.opt.lr)\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = self.opt.lr\n previous_loss = loss_meter.value()[0]\n\n def test(self, test_data, val=False):\n if self.opt.use_gpu:\n self.model.cuda()\n self.model.eval()\n test_loader = DataLoader(test_data, batch_size=self.opt.batch_size,\n shuffle=True)\n result = []\n loss_meter = meter.AverageValueMeter()\n for i, (H, A, label) in enumerate(test_loader):\n if self.opt.use_gpu:\n H = H.type(torch.long).cuda()\n A = A.cuda()\n label = {key: value.cuda() for key, value in label.items()}\n loss = self.model(H, A, label)['loss']\n loss_meter.add(loss.data.cpu().detach().numpy())\n if val:\n return loss_meter.value()[0]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Trainer:\n\n def __init__(self, model=None, opt=Config()):\n self.model = model\n self.opt = opt\n self.criterion = opt.criterion\n self.pred_id = self.opt.predictor_id\n self.optimizer = opt.optimizer(self.model.parameters(), lr=opt.lr)\n self.log_path = opt.LOGS_PATH\n self.writer = SummaryWriter(log_dir=self.opt.LOGS_PATH + '/' + time\n .strftime('%m%d_%H_%M'))\n if opt.use_gpu:\n torch.set_default_tensor_type(torch.cuda.FloatTensor)\n\n def train(self, train_data, val_data=None):\n print('Now Begin Training!')\n train_loader = DataLoader(train_data, batch_size=self.opt.\n batch_size, shuffle=True)\n if self.opt.use_gpu:\n self.model.cuda()\n loss_meter = meter.AverageValueMeter()\n abs_losses = MAvgMeter(self.pred_id)\n previous_loss = 10000000000.0\n for epoch in range(self.opt.max_epoch):\n loss_meter.reset()\n abs_losses.reset()\n for i, (N, A, label) in enumerate(train_loader):\n if self.opt.use_gpu:\n N = N.type(torch.long).cuda()\n A = A.cuda()\n label = {key: value.cuda() for key, value in label.items()}\n self.optimizer.zero_grad()\n output = self.model(N, A, label)\n loss = output['loss']\n loss.backward()\n self.optimizer.step()\n abs_losses.add(output['visloss'])\n loss_meter.add(loss.data.cpu())\n if i % self.opt.print_feq == self.opt.print_feq - 1:\n nither = epoch * len(train_loader) + i\n print('EPOCH:{0},i:{1},loss:{2}'.format(epoch, i, loss.\n data.cpu()), end=' ')\n self.writer.add_scalar('train_loss', loss_meter.value()\n [0], nither)\n for key in self.pred_id:\n self.writer.add_scalar(key, abs_losses.value(key)[0\n ], nither)\n print(key, float(abs_losses.value(key)[0]), end=' ')\n print('\\n')\n if val_data:\n val_loss = self.test(val_data, val=True)\n print('val loss:', val_loss)\n self.writer.add_scalar('val_loss', val_loss, epoch)\n print('!!!!!!now{0},previous{1}'.format(loss_meter.value()[0],\n previous_loss))\n if loss_meter.value()[0] >= previous_loss:\n self.opt.lr = self.opt.lr * self.opt.lr_decay\n print('!!!!!LR:', self.opt.lr)\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = self.opt.lr\n previous_loss = loss_meter.value()[0]\n\n def test(self, test_data, val=False):\n if self.opt.use_gpu:\n self.model.cuda()\n self.model.eval()\n test_loader = DataLoader(test_data, batch_size=self.opt.batch_size,\n shuffle=True)\n result = []\n loss_meter = meter.AverageValueMeter()\n for i, (H, A, label) in enumerate(test_loader):\n if self.opt.use_gpu:\n H = H.type(torch.long).cuda()\n A = A.cuda()\n label = {key: value.cuda() for key, value in label.items()}\n loss = self.model(H, A, label)['loss']\n loss_meter.add(loss.data.cpu().detach().numpy())\n if val:\n return loss_meter.value()[0]\n\n\ntorch.set_default_tensor_type(torch.FloatTensor)\n<mask token>\nif load_path:\n GAVAE.load_state_dict(torch.load(load_path))\n<mask token>\nprint(GAVAE)\nVAE_trainer.train(train_data, val_data=val_data)\nGAVAE.save()\nprint('save success')\n",
"step-3": "<mask token>\n\n\nclass Trainer:\n\n def __init__(self, model=None, opt=Config()):\n self.model = model\n self.opt = opt\n self.criterion = opt.criterion\n self.pred_id = self.opt.predictor_id\n self.optimizer = opt.optimizer(self.model.parameters(), lr=opt.lr)\n self.log_path = opt.LOGS_PATH\n self.writer = SummaryWriter(log_dir=self.opt.LOGS_PATH + '/' + time\n .strftime('%m%d_%H_%M'))\n if opt.use_gpu:\n torch.set_default_tensor_type(torch.cuda.FloatTensor)\n\n def train(self, train_data, val_data=None):\n print('Now Begin Training!')\n train_loader = DataLoader(train_data, batch_size=self.opt.\n batch_size, shuffle=True)\n if self.opt.use_gpu:\n self.model.cuda()\n loss_meter = meter.AverageValueMeter()\n abs_losses = MAvgMeter(self.pred_id)\n previous_loss = 10000000000.0\n for epoch in range(self.opt.max_epoch):\n loss_meter.reset()\n abs_losses.reset()\n for i, (N, A, label) in enumerate(train_loader):\n if self.opt.use_gpu:\n N = N.type(torch.long).cuda()\n A = A.cuda()\n label = {key: value.cuda() for key, value in label.items()}\n self.optimizer.zero_grad()\n output = self.model(N, A, label)\n loss = output['loss']\n loss.backward()\n self.optimizer.step()\n abs_losses.add(output['visloss'])\n loss_meter.add(loss.data.cpu())\n if i % self.opt.print_feq == self.opt.print_feq - 1:\n nither = epoch * len(train_loader) + i\n print('EPOCH:{0},i:{1},loss:{2}'.format(epoch, i, loss.\n data.cpu()), end=' ')\n self.writer.add_scalar('train_loss', loss_meter.value()\n [0], nither)\n for key in self.pred_id:\n self.writer.add_scalar(key, abs_losses.value(key)[0\n ], nither)\n print(key, float(abs_losses.value(key)[0]), end=' ')\n print('\\n')\n if val_data:\n val_loss = self.test(val_data, val=True)\n print('val loss:', val_loss)\n self.writer.add_scalar('val_loss', val_loss, epoch)\n print('!!!!!!now{0},previous{1}'.format(loss_meter.value()[0],\n previous_loss))\n if loss_meter.value()[0] >= previous_loss:\n self.opt.lr = self.opt.lr * self.opt.lr_decay\n print('!!!!!LR:', self.opt.lr)\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = self.opt.lr\n previous_loss = loss_meter.value()[0]\n\n def test(self, test_data, val=False):\n if self.opt.use_gpu:\n self.model.cuda()\n self.model.eval()\n test_loader = DataLoader(test_data, batch_size=self.opt.batch_size,\n shuffle=True)\n result = []\n loss_meter = meter.AverageValueMeter()\n for i, (H, A, label) in enumerate(test_loader):\n if self.opt.use_gpu:\n H = H.type(torch.long).cuda()\n A = A.cuda()\n label = {key: value.cuda() for key, value in label.items()}\n loss = self.model(H, A, label)['loss']\n loss_meter.add(loss.data.cpu().detach().numpy())\n if val:\n return loss_meter.value()[0]\n\n\ntorch.set_default_tensor_type(torch.FloatTensor)\ndconfig = Config()\ndconfig.optimizer = torch.optim.Adam\ndconfig.lr = 0.005\ndconfig.res_connection = True\ndconfig.encoder_layers = 40\ndconfig.node_feature_dim = 100\ndconfig.batch_size = 50\nzinc_path = (\n '/home/jeffzhu/nips_gail/MCTs/dataset/datasets/zinc_dataset_clean.pkl')\nload_path = None\nGAVAE = VAE(dconfig)\nif load_path:\n GAVAE.load_state_dict(torch.load(load_path))\ntrain_data, val_data, test_data = Zinc_dataset(zinc_path, 150000, 1000,\n dconfig.predictor_id).Get_data()\nVAE_trainer = Trainer(model=GAVAE, opt=dconfig)\nprint(GAVAE)\nVAE_trainer.train(train_data, val_data=val_data)\nGAVAE.save()\nprint('save success')\n",
"step-4": "from torch.utils.data import DataLoader\nfrom config import Config\nfrom torchnet import meter\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom tensorboardX import SummaryWriter\nfrom Funcs import MAvgMeter\nfrom vae.base_vae import VAE\nfrom vae.data_util import Zinc_dataset\nimport time\nimport torch.optim\n\n\nclass Trainer:\n\n def __init__(self, model=None, opt=Config()):\n self.model = model\n self.opt = opt\n self.criterion = opt.criterion\n self.pred_id = self.opt.predictor_id\n self.optimizer = opt.optimizer(self.model.parameters(), lr=opt.lr)\n self.log_path = opt.LOGS_PATH\n self.writer = SummaryWriter(log_dir=self.opt.LOGS_PATH + '/' + time\n .strftime('%m%d_%H_%M'))\n if opt.use_gpu:\n torch.set_default_tensor_type(torch.cuda.FloatTensor)\n\n def train(self, train_data, val_data=None):\n print('Now Begin Training!')\n train_loader = DataLoader(train_data, batch_size=self.opt.\n batch_size, shuffle=True)\n if self.opt.use_gpu:\n self.model.cuda()\n loss_meter = meter.AverageValueMeter()\n abs_losses = MAvgMeter(self.pred_id)\n previous_loss = 10000000000.0\n for epoch in range(self.opt.max_epoch):\n loss_meter.reset()\n abs_losses.reset()\n for i, (N, A, label) in enumerate(train_loader):\n if self.opt.use_gpu:\n N = N.type(torch.long).cuda()\n A = A.cuda()\n label = {key: value.cuda() for key, value in label.items()}\n self.optimizer.zero_grad()\n output = self.model(N, A, label)\n loss = output['loss']\n loss.backward()\n self.optimizer.step()\n abs_losses.add(output['visloss'])\n loss_meter.add(loss.data.cpu())\n if i % self.opt.print_feq == self.opt.print_feq - 1:\n nither = epoch * len(train_loader) + i\n print('EPOCH:{0},i:{1},loss:{2}'.format(epoch, i, loss.\n data.cpu()), end=' ')\n self.writer.add_scalar('train_loss', loss_meter.value()\n [0], nither)\n for key in self.pred_id:\n self.writer.add_scalar(key, abs_losses.value(key)[0\n ], nither)\n print(key, float(abs_losses.value(key)[0]), end=' ')\n print('\\n')\n if val_data:\n val_loss = self.test(val_data, val=True)\n print('val loss:', val_loss)\n self.writer.add_scalar('val_loss', val_loss, epoch)\n print('!!!!!!now{0},previous{1}'.format(loss_meter.value()[0],\n previous_loss))\n if loss_meter.value()[0] >= previous_loss:\n self.opt.lr = self.opt.lr * self.opt.lr_decay\n print('!!!!!LR:', self.opt.lr)\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = self.opt.lr\n previous_loss = loss_meter.value()[0]\n\n def test(self, test_data, val=False):\n if self.opt.use_gpu:\n self.model.cuda()\n self.model.eval()\n test_loader = DataLoader(test_data, batch_size=self.opt.batch_size,\n shuffle=True)\n result = []\n loss_meter = meter.AverageValueMeter()\n for i, (H, A, label) in enumerate(test_loader):\n if self.opt.use_gpu:\n H = H.type(torch.long).cuda()\n A = A.cuda()\n label = {key: value.cuda() for key, value in label.items()}\n loss = self.model(H, A, label)['loss']\n loss_meter.add(loss.data.cpu().detach().numpy())\n if val:\n return loss_meter.value()[0]\n\n\ntorch.set_default_tensor_type(torch.FloatTensor)\ndconfig = Config()\ndconfig.optimizer = torch.optim.Adam\ndconfig.lr = 0.005\ndconfig.res_connection = True\ndconfig.encoder_layers = 40\ndconfig.node_feature_dim = 100\ndconfig.batch_size = 50\nzinc_path = (\n '/home/jeffzhu/nips_gail/MCTs/dataset/datasets/zinc_dataset_clean.pkl')\nload_path = None\nGAVAE = VAE(dconfig)\nif load_path:\n GAVAE.load_state_dict(torch.load(load_path))\ntrain_data, val_data, test_data = Zinc_dataset(zinc_path, 150000, 1000,\n dconfig.predictor_id).Get_data()\nVAE_trainer = Trainer(model=GAVAE, opt=dconfig)\nprint(GAVAE)\nVAE_trainer.train(train_data, val_data=val_data)\nGAVAE.save()\nprint('save success')\n",
"step-5": "from torch.utils.data import DataLoader\nfrom config import Config\nfrom torchnet import meter\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom tensorboardX import SummaryWriter\nfrom Funcs import MAvgMeter\nfrom vae.base_vae import VAE\nfrom vae.data_util import Zinc_dataset\nimport time\nimport torch.optim\n\n\nclass Trainer():\n def __init__(self, model=None, opt=Config()):\n self.model = model\n self.opt = opt\n self.criterion = opt.criterion\n\n self.pred_id = self.opt.predictor_id\n\n self.optimizer = opt.optimizer(self.model.parameters(), lr=opt.lr)\n self.log_path = opt.LOGS_PATH\n self.writer = SummaryWriter(log_dir=self.opt.LOGS_PATH + '/' + time.strftime('%m%d_%H_%M'))\n\n if opt.use_gpu:\n torch.set_default_tensor_type(torch.cuda.FloatTensor)\n\n def train(self, train_data, val_data=None):\n print('Now Begin Training!')\n train_loader = DataLoader(train_data, batch_size=self.opt.batch_size, shuffle=True)\n\n if self.opt.use_gpu:\n self.model.cuda()\n\n # meter initialize\n loss_meter = meter.AverageValueMeter()\n abs_losses = MAvgMeter(self.pred_id)\n previous_loss = 1e10\n\n for epoch in range(self.opt.max_epoch):\n loss_meter.reset()\n abs_losses.reset()\n\n # train\n for i, (N, A, label) in enumerate(train_loader):\n if self.opt.use_gpu:\n N = N.type(torch.long).cuda()\n A = A.cuda()\n label = {key: value.cuda() for key, value in label.items()}\n # label = torch.unsqueeze(label, 1) # 数据预处理问题补丁\n\n self.optimizer.zero_grad()\n\n output = self.model(N, A, label)\n loss = output['loss']\n loss.backward()\n self.optimizer.step()\n\n abs_losses.add(output['visloss'])\n loss_meter.add(loss.data.cpu())\n\n # tensorboard visulize module\n\n if i % self.opt.print_feq == self.opt.print_feq - 1:\n nither = epoch * len(train_loader) + i\n print('EPOCH:{0},i:{1},loss:{2}'.format(epoch, i, loss.data.cpu()), end=' ')\n self.writer.add_scalar('train_loss', loss_meter.value()[0], nither)\n for key in self.pred_id:\n self.writer.add_scalar(key, abs_losses.value(key)[0], nither)\n print(key, float(abs_losses.value(key)[0]), end=' ')\n print('\\n')\n\n if val_data:\n val_loss = self.test(val_data, val=True)\n print('val loss:', val_loss)\n self.writer.add_scalar('val_loss', val_loss, epoch)\n\n print('!!!!!!now{0},previous{1}'.format(loss_meter.value()[0], previous_loss))\n if loss_meter.value()[0] >= previous_loss:\n self.opt.lr = self.opt.lr * self.opt.lr_decay\n print('!!!!!LR:', self.opt.lr)\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = self.opt.lr\n\n previous_loss = loss_meter.value()[0]\n\n def test(self, test_data, val=False):\n\n if self.opt.use_gpu:\n self.model.cuda()\n\n self.model.eval()\n test_loader = DataLoader(test_data, batch_size=self.opt.batch_size, shuffle=True)\n result = []\n loss_meter = meter.AverageValueMeter()\n for i, (H, A, label) in enumerate(test_loader):\n\n # 数据格式转换\n if self.opt.use_gpu:\n H = H.type(torch.long).cuda()\n A = A.cuda()\n label = {key: value.cuda() for key, value in label.items()}\n # label = torch.unsqueeze(label, 1) # 数据预处理问题补丁\n\n loss = self.model(H, A, label)['loss']\n loss_meter.add(loss.data.cpu().detach().numpy())\n #\n #\n # if not val:\n # result.append(score.cpu().detach().numpy())\n #\n #\n #\n # self.model.train()\n if val:\n return loss_meter.value()[0]\n # else:\n # result = np.stack(result)\n # return result,loss_meter.value()[0]\n\n\n# begin main training\ntorch.set_default_tensor_type(torch.FloatTensor)\ndconfig = Config()\n\ndconfig.optimizer = torch.optim.Adam\ndconfig.lr = 5e-3\ndconfig.res_connection = True\ndconfig.encoder_layers = 40\ndconfig.node_feature_dim = 100\ndconfig.batch_size = 50\n\nzinc_path = '/home/jeffzhu/nips_gail/MCTs/dataset/datasets/zinc_dataset_clean.pkl'\nload_path = None\n\nGAVAE = VAE(dconfig)\nif load_path:\n GAVAE.load_state_dict(torch.load(load_path))\n\ntrain_data, val_data, test_data = Zinc_dataset(zinc_path, 150000, 1000, dconfig.predictor_id).Get_data()\nVAE_trainer = Trainer(model=GAVAE, opt=dconfig)\n\nprint(GAVAE)\nVAE_trainer.train(train_data, val_data=val_data)\n\nGAVAE.save()\nprint('save success')\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
import tidylib
def tidy(html):
html, errors = tidylib.tidy_document(html, options={'force-output':
True, 'output-xhtml': True, 'tidy-mark': False})
return html
except ImportError:
def tidy(html):
return html
<|reserved_special_token_1|>
'Attempts to use <http://countergram.com/software/pytidylib>.'
try:
import tidylib
def tidy(html):
html, errors = tidylib.tidy_document(html, options={'force-output': True,
'output-xhtml': True, 'tidy-mark': False})
return html
except ImportError:
def tidy(html):
return html
|
flexible
|
{
"blob_id": "33ec822f6149a57244edf6d8d99a5b3726600c2e",
"index": 3236,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n import tidylib\n\n def tidy(html):\n html, errors = tidylib.tidy_document(html, options={'force-output':\n True, 'output-xhtml': True, 'tidy-mark': False})\n return html\nexcept ImportError:\n\n def tidy(html):\n return html\n",
"step-3": "'Attempts to use <http://countergram.com/software/pytidylib>.'\n\ntry:\n import tidylib\n\n def tidy(html):\n html, errors = tidylib.tidy_document(html, options={'force-output': True,\n 'output-xhtml': True, 'tidy-mark': False})\n return html\n \nexcept ImportError:\n def tidy(html):\n return html\n ",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class Stack:
def __init__(self):
self.q1 = Queue()
self.q2 = Queue()
def empty(self):
return self.q1.empty()
def push(self, element):
if self.empty():
self.q1.enqueue(element)
else:
self.q2.enqueue(element)
while not self.q1.empty():
self.q2.enqueue(self.q1.dequeue())
self.q1, self.q2 = self.q2, self.q1
<|reserved_special_token_0|>
def top(self):
return self.q1.head.next.element
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Stack:
def __init__(self):
self.q1 = Queue()
self.q2 = Queue()
def empty(self):
return self.q1.empty()
def push(self, element):
if self.empty():
self.q1.enqueue(element)
else:
self.q2.enqueue(element)
while not self.q1.empty():
self.q2.enqueue(self.q1.dequeue())
self.q1, self.q2 = self.q2, self.q1
def pop(self):
return self.q1.dequeue()
def top(self):
return self.q1.head.next.element
def __repr__(self):
return str(self.q1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Stack:
def __init__(self):
self.q1 = Queue()
self.q2 = Queue()
def empty(self):
return self.q1.empty()
def push(self, element):
if self.empty():
self.q1.enqueue(element)
else:
self.q2.enqueue(element)
while not self.q1.empty():
self.q2.enqueue(self.q1.dequeue())
self.q1, self.q2 = self.q2, self.q1
def pop(self):
return self.q1.dequeue()
def top(self):
return self.q1.head.next.element
def __repr__(self):
return str(self.q1)
def test_stack():
s = Stack()
s.push(1)
s.push(2)
s.push(3)
s.push(4)
assert str(s) == 'head > 4 > 3 > 2 > 1 > '
assert s.pop() == 4
assert s.pop() == 3
assert s.pop() == 2
assert s.pop() == 1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Stack:
def __init__(self):
self.q1 = Queue()
self.q2 = Queue()
def empty(self):
return self.q1.empty()
def push(self, element):
if self.empty():
self.q1.enqueue(element)
else:
self.q2.enqueue(element)
while not self.q1.empty():
self.q2.enqueue(self.q1.dequeue())
self.q1, self.q2 = self.q2, self.q1
def pop(self):
return self.q1.dequeue()
def top(self):
return self.q1.head.next.element
def __repr__(self):
return str(self.q1)
def test_stack():
s = Stack()
s.push(1)
s.push(2)
s.push(3)
s.push(4)
assert str(s) == 'head > 4 > 3 > 2 > 1 > '
assert s.pop() == 4
assert s.pop() == 3
assert s.pop() == 2
assert s.pop() == 1
if __name__ == '__main__':
test_stack()
<|reserved_special_token_1|>
from queue import Queue
class Stack:
def __init__(self):
self.q1 = Queue()
self.q2 = Queue()
def empty(self):
return self.q1.empty()
def push(self, element):
if self.empty():
self.q1.enqueue(element)
else:
self.q2.enqueue(element)
while not self.q1.empty():
self.q2.enqueue(self.q1.dequeue())
self.q1, self.q2 = self.q2, self.q1
def pop(self):
return self.q1.dequeue()
def top(self):
return self.q1.head.next.element
def __repr__(self):
return str(self.q1)
def test_stack():
s = Stack()
s.push(1)
s.push(2)
s.push(3)
s.push(4)
assert str(s) == 'head > 4 > 3 > 2 > 1 > '
assert s.pop() == 4
assert s.pop() == 3
assert s.pop() == 2
assert s.pop() == 1
if __name__ == '__main__':
test_stack()
|
flexible
|
{
"blob_id": "4f5f4aadfeabb13790b417b334c5f73c6d0345a7",
"index": 9256,
"step-1": "<mask token>\n\n\nclass Stack:\n\n def __init__(self):\n self.q1 = Queue()\n self.q2 = Queue()\n\n def empty(self):\n return self.q1.empty()\n\n def push(self, element):\n if self.empty():\n self.q1.enqueue(element)\n else:\n self.q2.enqueue(element)\n while not self.q1.empty():\n self.q2.enqueue(self.q1.dequeue())\n self.q1, self.q2 = self.q2, self.q1\n <mask token>\n\n def top(self):\n return self.q1.head.next.element\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Stack:\n\n def __init__(self):\n self.q1 = Queue()\n self.q2 = Queue()\n\n def empty(self):\n return self.q1.empty()\n\n def push(self, element):\n if self.empty():\n self.q1.enqueue(element)\n else:\n self.q2.enqueue(element)\n while not self.q1.empty():\n self.q2.enqueue(self.q1.dequeue())\n self.q1, self.q2 = self.q2, self.q1\n\n def pop(self):\n return self.q1.dequeue()\n\n def top(self):\n return self.q1.head.next.element\n\n def __repr__(self):\n return str(self.q1)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Stack:\n\n def __init__(self):\n self.q1 = Queue()\n self.q2 = Queue()\n\n def empty(self):\n return self.q1.empty()\n\n def push(self, element):\n if self.empty():\n self.q1.enqueue(element)\n else:\n self.q2.enqueue(element)\n while not self.q1.empty():\n self.q2.enqueue(self.q1.dequeue())\n self.q1, self.q2 = self.q2, self.q1\n\n def pop(self):\n return self.q1.dequeue()\n\n def top(self):\n return self.q1.head.next.element\n\n def __repr__(self):\n return str(self.q1)\n\n\ndef test_stack():\n s = Stack()\n s.push(1)\n s.push(2)\n s.push(3)\n s.push(4)\n assert str(s) == 'head > 4 > 3 > 2 > 1 > '\n assert s.pop() == 4\n assert s.pop() == 3\n assert s.pop() == 2\n assert s.pop() == 1\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Stack:\n\n def __init__(self):\n self.q1 = Queue()\n self.q2 = Queue()\n\n def empty(self):\n return self.q1.empty()\n\n def push(self, element):\n if self.empty():\n self.q1.enqueue(element)\n else:\n self.q2.enqueue(element)\n while not self.q1.empty():\n self.q2.enqueue(self.q1.dequeue())\n self.q1, self.q2 = self.q2, self.q1\n\n def pop(self):\n return self.q1.dequeue()\n\n def top(self):\n return self.q1.head.next.element\n\n def __repr__(self):\n return str(self.q1)\n\n\ndef test_stack():\n s = Stack()\n s.push(1)\n s.push(2)\n s.push(3)\n s.push(4)\n assert str(s) == 'head > 4 > 3 > 2 > 1 > '\n assert s.pop() == 4\n assert s.pop() == 3\n assert s.pop() == 2\n assert s.pop() == 1\n\n\nif __name__ == '__main__':\n test_stack()\n",
"step-5": "from queue import Queue\n\n\nclass Stack:\n def __init__(self):\n self.q1 = Queue()\n self.q2 = Queue()\n\n def empty(self):\n return self.q1.empty()\n\n def push(self, element):\n if self.empty():\n self.q1.enqueue(element)\n else:\n self.q2.enqueue(element)\n while not self.q1.empty():\n self.q2.enqueue(self.q1.dequeue())\n self.q1, self.q2 = self.q2, self.q1\n\n def pop(self):\n return self.q1.dequeue()\n\n def top(self):\n return self.q1.head.next.element\n\n def __repr__(self):\n return str(self.q1)\n\n\ndef test_stack():\n s = Stack()\n s.push(1)\n s.push(2)\n s.push(3)\n s.push(4)\n assert str(s) == 'head > 4 > 3 > 2 > 1 > '\n assert s.pop() == 4\n assert s.pop() == 3\n assert s.pop() == 2\n assert s.pop() == 1\n\n\nif __name__ == '__main__':\n test_stack()",
"step-ids": [
5,
7,
8,
9,
11
]
}
|
[
5,
7,
8,
9,
11
] |
#!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 or the MIT License.
# SPDX-License-Identifier: Apache-2.0 OR MIT
# Copyright Tock Contributors 2023.
# Prints out the source locations of panics in a Tock kernel ELF
#
# This tool attempts to trace all panic locations in a Tock kernel ELF by
# tracing calls to panic functions in the core library, using the debug information
# embedded in the ELF file. This tool requires an ELF which includes debug information.
# In its current state, cannot accurately provide the source locations
# corresponding to each panic, but tries to be honest about its confidence in
# each guess. In general, each guess is usually enough to locate the relevant panic.
# More creative analysis might be able to increase
# the accuracy with which this tool can identify source locations of panics. For now,
# this tool is useful for:
#
# - obtaining a rough count of the number of panics in a Tock kernel binary
#
# - finding and removing panics in a Tock kernel binary
#
# - roughly determining which components of a Tock kernel binary contain the most panic
# paths
#
# There are several assumptions built into this tool which may not always hold. For one,
# the list of panic_functions are assumed to not match any strings in the actual
# codebase, despite the fact they are incomplete function names and overlap is possible.
# I could solve this by using full names of these functions, but I am unsure how often
# the name mangling of these functions will change as the rust compiler changes so this
# approach felt potentially more stable.
#
# Several assumptions are made about DWARF locations that do not always hold, so source
# locations are not always accurate -- sometimes, the printed location just points to
# the function containing a panic, rather than the actual line on which the panic
# occurs. Some assumptions about which panics are in the core library and will be
# caught by grepping for other calls may also not always hold. The best way to inspect
# these is by manually inspecting the panics in the `within_core_panic_list`.
#
# This script stores panics which it cannot trace out of the core library in the
# `no_info_panic_list`. If this list contains some panics, that is a sign that some
# panics have not been identified. You can manually look at the addresses stored in
# this list, attempt to find the core library function which leads to these instrucitons
# being called, and then add those core library functions to the list of panic functions.
#
# The output of this script is *not* stable.
#
# Usage: find_panics.py ELF [--riscv]
#
# Requires Python 3.7+
#
# Author: Hudson Ayers <hayers@.stanford.edu>
import argparse
import platform
import re
import subprocess
import sys
if platform.system() == 'Darwin':
DWARFDUMP = "dwarfdump"
elif platform.system() == 'Linux':
DWARFDUMP = "llvm-dwarfdump"
else:
raise NotImplementedError("Unknown platform")
# Note: In practice, GCC objdumps are better at symbol resolution than LLVM objdump
ARM_OBJDUMP = "arm-none-eabi-objdump"
RISCV_OBJDUMP = "riscv64-unknown-elf-objdump"
# TODO: For all functions below the initial batch, it would like be preferable to
# automatically populate the list with additional functions in the core library using
# debug info. For now, however, I do this manually.
panic_functions = [
"expect_failed",
"unwrap_failed",
"panic_bounds_check",
"slice_index_order_fail",
"slice_end_index_len_fail",
"slice_start_index_len_fail",
"slice17len_mismatch_fail",
"str16slice_error_fail",
"copy_from_slice17len_mismatch_fail",
"copy_from_slice17",
"panicking5panic",
# below are functions I have manually traced up from the above, more "core" panics, on a riscv binary with a low inline threshold
"6unwrap17",
"6expect17",
"11copy_within17",
"core..fmt..builders..PadAdapter", # calls slice_error_fail
"11copy_within17", # calls panicking::panic
"write_char", # calls PadAdapter one above
"write_str", # calls write_char
"printable5check", # calls slice_index_order_fail
"char$u20$as$u20$core..fmt..Debug", # calls printable5check
"GenericRadix7fmt_int", # calls slice_start_index_len_fail
# below are functions I manually traced on an arm binary,
# with a somewhat higher inline threshold.
"10unwrap_err17h6",
"13is_whitespace17",
"$u20$core..slice..index..SliceIndex$LT",
"core..iter..adapters..filter..Filter$LT$I$C$P$GT$$u20$as$u20$core..iter",
"_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17h4c77379bd26a525bE",
"_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17hfe7e43aa2388c47bE",
]
# Pre-compiled regex lookups
dw_at_file_re = re.compile(r""".*(?:DW_AT_call_file|DW_AT_decl_file).*""")
dw_at_line_re = re.compile(r""".*(?:DW_AT_call_line|DW_AT_decl_line).*""")
line_info_re = re.compile(r""".*Line info.*""")
abstract_origin_re = re.compile(r""".*DW_AT_abstract_origin.*""")
dw_at_linkage_name_re = re.compile(r""".*DW_AT_linkage_name.*""")
dw_at_name_re = re.compile(r""".*DW_AT_name.*""")
def matches_panic_funcs(name):
"""If the passed name contains one of the known panic_functions,
return the match
"""
for func in panic_functions:
if func in name:
return func
return ""
def linkage_or_origin_all_parents(elf, addr, linkage=False):
"""Returns a list of the abstract origin or linkage of all parents of the dwarf
location for the passed address
"""
result = subprocess.run(
(DWARFDUMP, "--lookup=0x" + addr, "-p", elf), capture_output=True, text=True
)
dwarfdump = result.stdout
regex = abstract_origin_re
if linkage:
regex = dw_at_linkage_name_re
matches = re.findall(regex, dwarfdump)
def getFunction(line):
return line.strip().split('"')[1]
origins = list(map(getFunction, matches))
return origins
def any_origin_matches_panic_func(elf, addr):
"""returns name if any origin for the passed addr matches one
of the functions in the panic_functions array
"""
origins = linkage_or_origin_all_parents(elf, addr)
for origin in origins:
name = matches_panic_funcs(origin)
if name:
return name
return ""
def any_linkage_matches_panic_func(elf, addr):
"""returns True + name if any linkage for the passed addr matches one
of the functions in the panic_functions array
"""
linkages = linkage_or_origin_all_parents(elf, addr, True)
for linkage in linkages:
name = matches_panic_funcs(linkage)
if name:
return name
return ""
def check_for_source_in_parent(elf, addr):
"""Takes in a dwarfdump lookup including parents of the source DWARF
location, returns the first parent with a call file not in
the core library. If found, this often indicates the source of the panic
in the Tock source code.
"""
result = subprocess.run(
(DWARFDUMP, "--lookup=0x" + addr, "-p", elf), capture_output=True, text=True
)
dwarfdump = result.stdout
matches = re.findall(dw_at_file_re, dwarfdump)
def getFile(line):
return line.strip().split('"')[1]
source_files = list(map(getFile, matches))
for (i, f) in enumerate(source_files[::-1]):
if "/core/" not in f:
line_matches = re.findall(dw_at_line_re, dwarfdump)
def getLine(line):
return line.strip().split("(")[1].split(")")[0]
source_lines = list(map(getLine, line_matches))
source_line = source_lines[::-1][i]
return (f, source_line)
return ("", "")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("ELF", help="ELF file for analysis")
parser.add_argument(
"--verbose",
"-v",
action="store_true",
help="Output additional DWARF info for each panic location in the binary",
)
parser.add_argument("--riscv", action="store_true", help="Use risc-v based objdump")
return parser.parse_args()
# Find all addresses that panic, and get basic dwarf info on those addresses
def find_all_panics(objdump, elf, is_riscv):
panic_list = []
within_core_panic_list = []
no_info_panic_list = []
result = subprocess.run((objdump, "-d", elf), capture_output=True, text=True)
objdump_out = result.stdout
for function in panic_functions:
function_re = re.compile(".*:.*#.*" + function + ".*")
if not is_riscv:
# Arm-none-eabi-objdump uses ';' for comments instead of '#'
function_re = re.compile(".*:.*<.*" + function + ".*")
# TODO: arm elfs include loads of offsets from symbols in such a way that these lines
# are matched by this regex. In general, these loads occur within the instruction stream
# associated with the symbol at hand, and will usually be excluded by logic later in
# this function. This leads to `within_core_panic_list` and `no_info_panic_list`
# containing more "panics" than when analyzing a risc-v binary. We could fix this
# by matching *only* on functions with instructions that actually jump to a new symbol,
# but this would require a list of such instructions for each architecture. However
# as written it actually lets us identify panics which are jumped to via addresses
# stored in registers, which may actually catch additional valid panics.
matches = re.findall(function_re, objdump_out)
def getAddr(line):
return line.strip().split(":")[0]
addrs = list(map(getAddr, matches))
for addr in addrs:
result = subprocess.run(
(DWARFDUMP, "--lookup=0x" + addr, elf), capture_output=True, text=True
)
dwarfdump = result.stdout
dw_at_file = re.search(dw_at_file_re, dwarfdump)
dw_at_line = re.search(dw_at_line_re, dwarfdump)
line_info = re.search(line_info_re, dwarfdump)
abstract_origin = re.search(abstract_origin_re, dwarfdump)
linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)
file_string = ""
line_string = ""
line_info_string = ""
abstract_origin_string = ""
linkage_name_string = ""
if dw_at_file:
file_string = dw_at_file.group(0).strip()
line_string = dw_at_line.group(0).strip()
panicinfo = {}
panicinfo["addr"] = addr
panicinfo["function"] = function
if line_info:
line_info_string = line_info.group(0).strip()
panicinfo["line_info"] = line_info_string
if abstract_origin:
abstract_origin_string = abstract_origin.group(0).strip()
if linkage_name:
linkage_name_string = linkage_name.group(0).strip()
if "DW_AT_call_file" in file_string and "DW_AT_decl_file" in file_string:
raise RuntimeError("I misunderstand DWARF")
if "DW_AT_call_file" in file_string or "DW_AT_decl_file" in file_string:
filename = file_string.split('"')[1]
line_num = line_string.split("(")[1].split(")")[0]
if "DW_AT_call_file" in file_string:
panicinfo["call_file"] = filename
panicinfo["call_line"] = line_num
if "DW_AT_decl_file" in file_string:
panicinfo["decl_file"] = filename
panicinfo["decl_line"] = line_num
if not "/core/" in filename:
if not "closure" in abstract_origin_string:
panicinfo["best_guess_source"] = "call/decl"
else:
panicinfo["best_guess_source"] = "call-closure-line-info"
panic_list.append(panicinfo)
continue
else: # 'core' in filename
(parent_file, parent_line) = check_for_source_in_parent(elf, addr)
if parent_file:
panicinfo["parent_call_file"] = parent_file
panicinfo["parent_call_line"] = parent_line
panicinfo["best_guess_source"] = "parent"
panic_list.append(panicinfo)
continue
elif not abstract_origin and not linkage_name:
no_info_panic_list.append(panicinfo)
continue
elif abstract_origin:
if "core" in abstract_origin_string:
name = matches_panic_funcs(abstract_origin_string)
if name:
within_core_panic_list.append(panicinfo)
continue
else:
name2 = any_origin_matches_panic_func(elf, addr)
name3 = any_linkage_matches_panic_func(elf, addr)
if name2:
within_core_panic_list.append(panicinfo)
continue
elif name3:
within_core_panic_list.append(panicinfo)
continue
else:
no_info_panic_list.append(panicinfo)
continue
elif "closure" in abstract_origin_string:
# not in core, in closure, line info is probably sufficient
panicinfo["best_guess_source"] = "lineinfo"
panic_list.append(panicinfo)
continue
else:
# i have not seen this happen -- core in file, not closure, origin not core
raise RuntimeError("Unhandled")
if linkage_name:
name = matches_panic_funcs(linkage_name_string)
if name:
within_core_panic_list.append(panicinfo)
continue
else:
no_info_panic_list.append(panicinfo)
print(
"Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}".format(
linkage_name_string, addr
)
)
continue
no_info_panic_list.append(panic_info)
print("did not find source for panic: {}".format(addr))
continue
elif abstract_origin:
origin = abstract_origin_string.split('"')[1]
panicinfo["abstract_origin"] = origin
if "core" in origin:
if matches_panic_funcs(origin):
within_core_panic_list.append(panicinfo)
continue
no_info_panic_list.append(panicinfo)
print(
"Probably could add this origin or one of its parents to the panic function list: {}".format(
abstract_origin_string
)
)
continue
else:
panicinfo["best_guess_source"] = "abstract_origin + line"
panic_list.append(panicinfo)
continue
else:
# This gets hit for OUTLINED_FUNCTION_XX a bunch on ARM
try:
dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[
-1
].strip() # see multiple matches for this string sometimes
function_name = dw_at_name_string.split('"')[1]
if "OUTLINED_FUNCTION_" in function_name:
# This is a common pattern where panicing paths are repeated in many
# places throughout the binary, and LLVMs optimizer outlines the repeated code.
# Let's add these to the list of panicing functions, dynamically so this is resilient to
# changes in the binary.
if function_name not in panic_functions:
# don't double insert
panic_functions.append(
function_name + ">"
) # so FUNCTION_22 does not catch FUNCTION_222
within_core_panic_list.append(panicinfo)
continue
no_info_panic_list.append(panicinfo)
continue
except:
# There seem to be a places where lookup fails completely
# Not easy to recover, log these and continue on.
no_info_panic_list.append(panicinfo)
continue
raise RuntimeError("BUG: Should not reach here")
return (panic_list, within_core_panic_list, no_info_panic_list)
def pretty_print(panicinfo):
if panicinfo["best_guess_source"] == "call/decl":
try:
print(
"\t{} -- {}:{}".format(
panicinfo["addr"], panicinfo["call_file"], panicinfo["call_line"]
)
)
except:
print(
"\t{} -- in function starting at {}:{}".format(
panicinfo["addr"], panicinfo["decl_file"], panicinfo["decl_line"]
)
)
elif panicinfo["best_guess_source"] == "parent":
print(
"\t{} -- at or in function starting at {}:{}".format(
panicinfo["addr"],
panicinfo["parent_call_file"],
panicinfo["parent_call_line"],
)
)
elif panicinfo["best_guess_source"] == "lineinfo":
print(
"\t{} -- in closure, try: {}".format(
panicinfo["addr"], panicinfo["line_info"]
)
)
elif panicinfo["best_guess_source"] == "abstract_origin + line":
print(
"\t{} -- line_info: {} from origin :{}".format(
panicinfo["addr"], panicinfo["line_info"], panicinfo["abstract_origin"]
)
)
elif panicinfo["best_guess_source"] == "call-closure-line-info":
print(
"\t{} -- in closure starting on line_info: {}".format(
panicinfo["addr"], panicinfo["line_info"]
)
)
else:
raise RuntimeError("Missing best guess source: {}".format(panicinfo))
def main():
args = parse_args()
if sys.version_info.minor < 7:
print("This tool requires Python 3.7+")
return -1
print("Tock panic report for " + args.ELF)
objdump = ARM_OBJDUMP
if args.riscv:
objdump = RISCV_OBJDUMP
(panic_list, within_core_panic_list, no_info_panic_list) = find_all_panics(
objdump, args.ELF, args.riscv
)
print("num_panics: {}".format(len(panic_list)))
buckets_list = {}
for f in panic_functions:
buckets_list[f] = []
for panic in panic_list:
buckets_list[panic["function"]].append(panic)
for f, l in buckets_list.items():
if len(l) > 0:
print("{}: {}".format(f, len(l)))
for p in l:
pretty_print(p)
if args.verbose:
print(p)
print()
print("num panics in core ignored: {}".format(len(within_core_panic_list)))
print("num panics for which no info available: {}".format(len(no_info_panic_list)))
if args.verbose:
print(
"If more debug info is needed, run dwarfdump directly on the address in question."
)
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "8c0a4d5a86d9ebd38ea05efb5b5b570368ce1449",
"index": 1336,
"step-1": "<mask token>\n\n\ndef matches_panic_funcs(name):\n \"\"\"If the passed name contains one of the known panic_functions,\n return the match\n \"\"\"\n for func in panic_functions:\n if func in name:\n return func\n return ''\n\n\n<mask token>\n\n\ndef any_origin_matches_panic_func(elf, addr):\n \"\"\"returns name if any origin for the passed addr matches one\n of the functions in the panic_functions array\n \"\"\"\n origins = linkage_or_origin_all_parents(elf, addr)\n for origin in origins:\n name = matches_panic_funcs(origin)\n if name:\n return name\n return ''\n\n\ndef any_linkage_matches_panic_func(elf, addr):\n \"\"\"returns True + name if any linkage for the passed addr matches one\n of the functions in the panic_functions array\n \"\"\"\n linkages = linkage_or_origin_all_parents(elf, addr, True)\n for linkage in linkages:\n name = matches_panic_funcs(linkage)\n if name:\n return name\n return ''\n\n\ndef check_for_source_in_parent(elf, addr):\n \"\"\"Takes in a dwarfdump lookup including parents of the source DWARF\n location, returns the first parent with a call file not in\n the core library. If found, this often indicates the source of the panic\n in the Tock source code.\n \"\"\"\n result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),\n capture_output=True, text=True)\n dwarfdump = result.stdout\n matches = re.findall(dw_at_file_re, dwarfdump)\n\n def getFile(line):\n return line.strip().split('\"')[1]\n source_files = list(map(getFile, matches))\n for i, f in enumerate(source_files[::-1]):\n if '/core/' not in f:\n line_matches = re.findall(dw_at_line_re, dwarfdump)\n\n def getLine(line):\n return line.strip().split('(')[1].split(')')[0]\n source_lines = list(map(getLine, line_matches))\n source_line = source_lines[::-1][i]\n return f, source_line\n return '', ''\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('ELF', help='ELF file for analysis')\n parser.add_argument('--verbose', '-v', action='store_true', help=\n 'Output additional DWARF info for each panic location in the binary')\n parser.add_argument('--riscv', action='store_true', help=\n 'Use risc-v based objdump')\n return parser.parse_args()\n\n\ndef find_all_panics(objdump, elf, is_riscv):\n panic_list = []\n within_core_panic_list = []\n no_info_panic_list = []\n result = subprocess.run((objdump, '-d', elf), capture_output=True, text\n =True)\n objdump_out = result.stdout\n for function in panic_functions:\n function_re = re.compile('.*:.*#.*' + function + '.*')\n if not is_riscv:\n function_re = re.compile('.*:.*<.*' + function + '.*')\n matches = re.findall(function_re, objdump_out)\n\n def getAddr(line):\n return line.strip().split(':')[0]\n addrs = list(map(getAddr, matches))\n for addr in addrs:\n result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, elf),\n capture_output=True, text=True)\n dwarfdump = result.stdout\n dw_at_file = re.search(dw_at_file_re, dwarfdump)\n dw_at_line = re.search(dw_at_line_re, dwarfdump)\n line_info = re.search(line_info_re, dwarfdump)\n abstract_origin = re.search(abstract_origin_re, dwarfdump)\n linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)\n file_string = ''\n line_string = ''\n line_info_string = ''\n abstract_origin_string = ''\n linkage_name_string = ''\n if dw_at_file:\n file_string = dw_at_file.group(0).strip()\n line_string = dw_at_line.group(0).strip()\n panicinfo = {}\n panicinfo['addr'] = addr\n panicinfo['function'] = function\n if line_info:\n line_info_string = line_info.group(0).strip()\n panicinfo['line_info'] = line_info_string\n if abstract_origin:\n abstract_origin_string = abstract_origin.group(0).strip()\n if linkage_name:\n linkage_name_string = linkage_name.group(0).strip()\n if ('DW_AT_call_file' in file_string and 'DW_AT_decl_file' in\n file_string):\n raise RuntimeError('I misunderstand DWARF')\n if ('DW_AT_call_file' in file_string or 'DW_AT_decl_file' in\n file_string):\n filename = file_string.split('\"')[1]\n line_num = line_string.split('(')[1].split(')')[0]\n if 'DW_AT_call_file' in file_string:\n panicinfo['call_file'] = filename\n panicinfo['call_line'] = line_num\n if 'DW_AT_decl_file' in file_string:\n panicinfo['decl_file'] = filename\n panicinfo['decl_line'] = line_num\n if not '/core/' in filename:\n if not 'closure' in abstract_origin_string:\n panicinfo['best_guess_source'] = 'call/decl'\n else:\n panicinfo['best_guess_source'\n ] = 'call-closure-line-info'\n panic_list.append(panicinfo)\n continue\n else:\n parent_file, parent_line = check_for_source_in_parent(elf,\n addr)\n if parent_file:\n panicinfo['parent_call_file'] = parent_file\n panicinfo['parent_call_line'] = parent_line\n panicinfo['best_guess_source'] = 'parent'\n panic_list.append(panicinfo)\n continue\n elif not abstract_origin and not linkage_name:\n no_info_panic_list.append(panicinfo)\n continue\n elif abstract_origin:\n if 'core' in abstract_origin_string:\n name = matches_panic_funcs(abstract_origin_string)\n if name:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n name2 = any_origin_matches_panic_func(elf, addr\n )\n name3 = any_linkage_matches_panic_func(elf,\n addr)\n if name2:\n within_core_panic_list.append(panicinfo)\n continue\n elif name3:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n no_info_panic_list.append(panicinfo)\n continue\n elif 'closure' in abstract_origin_string:\n panicinfo['best_guess_source'] = 'lineinfo'\n panic_list.append(panicinfo)\n continue\n else:\n raise RuntimeError('Unhandled')\n if linkage_name:\n name = matches_panic_funcs(linkage_name_string)\n if name:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n no_info_panic_list.append(panicinfo)\n print(\n 'Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}'\n .format(linkage_name_string, addr))\n continue\n no_info_panic_list.append(panic_info)\n print('did not find source for panic: {}'.format(addr))\n continue\n elif abstract_origin:\n origin = abstract_origin_string.split('\"')[1]\n panicinfo['abstract_origin'] = origin\n if 'core' in origin:\n if matches_panic_funcs(origin):\n within_core_panic_list.append(panicinfo)\n continue\n no_info_panic_list.append(panicinfo)\n print(\n 'Probably could add this origin or one of its parents to the panic function list: {}'\n .format(abstract_origin_string))\n continue\n else:\n panicinfo['best_guess_source'] = 'abstract_origin + line'\n panic_list.append(panicinfo)\n continue\n else:\n try:\n dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[-1\n ].strip()\n function_name = dw_at_name_string.split('\"')[1]\n if 'OUTLINED_FUNCTION_' in function_name:\n if function_name not in panic_functions:\n panic_functions.append(function_name + '>')\n within_core_panic_list.append(panicinfo)\n continue\n no_info_panic_list.append(panicinfo)\n continue\n except:\n no_info_panic_list.append(panicinfo)\n continue\n raise RuntimeError('BUG: Should not reach here')\n return panic_list, within_core_panic_list, no_info_panic_list\n\n\n<mask token>\n\n\ndef main():\n args = parse_args()\n if sys.version_info.minor < 7:\n print('This tool requires Python 3.7+')\n return -1\n print('Tock panic report for ' + args.ELF)\n objdump = ARM_OBJDUMP\n if args.riscv:\n objdump = RISCV_OBJDUMP\n panic_list, within_core_panic_list, no_info_panic_list = find_all_panics(\n objdump, args.ELF, args.riscv)\n print('num_panics: {}'.format(len(panic_list)))\n buckets_list = {}\n for f in panic_functions:\n buckets_list[f] = []\n for panic in panic_list:\n buckets_list[panic['function']].append(panic)\n for f, l in buckets_list.items():\n if len(l) > 0:\n print('{}: {}'.format(f, len(l)))\n for p in l:\n pretty_print(p)\n if args.verbose:\n print(p)\n print()\n print('num panics in core ignored: {}'.format(len(within_core_panic_list)))\n print('num panics for which no info available: {}'.format(len(\n no_info_panic_list)))\n if args.verbose:\n print(\n 'If more debug info is needed, run dwarfdump directly on the address in question.'\n )\n\n\n<mask token>\n",
"step-2": "<mask token>\nif platform.system() == 'Darwin':\n DWARFDUMP = 'dwarfdump'\nelif platform.system() == 'Linux':\n DWARFDUMP = 'llvm-dwarfdump'\nelse:\n raise NotImplementedError('Unknown platform')\n<mask token>\n\n\ndef matches_panic_funcs(name):\n \"\"\"If the passed name contains one of the known panic_functions,\n return the match\n \"\"\"\n for func in panic_functions:\n if func in name:\n return func\n return ''\n\n\ndef linkage_or_origin_all_parents(elf, addr, linkage=False):\n \"\"\"Returns a list of the abstract origin or linkage of all parents of the dwarf\n location for the passed address\n \"\"\"\n result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),\n capture_output=True, text=True)\n dwarfdump = result.stdout\n regex = abstract_origin_re\n if linkage:\n regex = dw_at_linkage_name_re\n matches = re.findall(regex, dwarfdump)\n\n def getFunction(line):\n return line.strip().split('\"')[1]\n origins = list(map(getFunction, matches))\n return origins\n\n\ndef any_origin_matches_panic_func(elf, addr):\n \"\"\"returns name if any origin for the passed addr matches one\n of the functions in the panic_functions array\n \"\"\"\n origins = linkage_or_origin_all_parents(elf, addr)\n for origin in origins:\n name = matches_panic_funcs(origin)\n if name:\n return name\n return ''\n\n\ndef any_linkage_matches_panic_func(elf, addr):\n \"\"\"returns True + name if any linkage for the passed addr matches one\n of the functions in the panic_functions array\n \"\"\"\n linkages = linkage_or_origin_all_parents(elf, addr, True)\n for linkage in linkages:\n name = matches_panic_funcs(linkage)\n if name:\n return name\n return ''\n\n\ndef check_for_source_in_parent(elf, addr):\n \"\"\"Takes in a dwarfdump lookup including parents of the source DWARF\n location, returns the first parent with a call file not in\n the core library. If found, this often indicates the source of the panic\n in the Tock source code.\n \"\"\"\n result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),\n capture_output=True, text=True)\n dwarfdump = result.stdout\n matches = re.findall(dw_at_file_re, dwarfdump)\n\n def getFile(line):\n return line.strip().split('\"')[1]\n source_files = list(map(getFile, matches))\n for i, f in enumerate(source_files[::-1]):\n if '/core/' not in f:\n line_matches = re.findall(dw_at_line_re, dwarfdump)\n\n def getLine(line):\n return line.strip().split('(')[1].split(')')[0]\n source_lines = list(map(getLine, line_matches))\n source_line = source_lines[::-1][i]\n return f, source_line\n return '', ''\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('ELF', help='ELF file for analysis')\n parser.add_argument('--verbose', '-v', action='store_true', help=\n 'Output additional DWARF info for each panic location in the binary')\n parser.add_argument('--riscv', action='store_true', help=\n 'Use risc-v based objdump')\n return parser.parse_args()\n\n\ndef find_all_panics(objdump, elf, is_riscv):\n panic_list = []\n within_core_panic_list = []\n no_info_panic_list = []\n result = subprocess.run((objdump, '-d', elf), capture_output=True, text\n =True)\n objdump_out = result.stdout\n for function in panic_functions:\n function_re = re.compile('.*:.*#.*' + function + '.*')\n if not is_riscv:\n function_re = re.compile('.*:.*<.*' + function + '.*')\n matches = re.findall(function_re, objdump_out)\n\n def getAddr(line):\n return line.strip().split(':')[0]\n addrs = list(map(getAddr, matches))\n for addr in addrs:\n result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, elf),\n capture_output=True, text=True)\n dwarfdump = result.stdout\n dw_at_file = re.search(dw_at_file_re, dwarfdump)\n dw_at_line = re.search(dw_at_line_re, dwarfdump)\n line_info = re.search(line_info_re, dwarfdump)\n abstract_origin = re.search(abstract_origin_re, dwarfdump)\n linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)\n file_string = ''\n line_string = ''\n line_info_string = ''\n abstract_origin_string = ''\n linkage_name_string = ''\n if dw_at_file:\n file_string = dw_at_file.group(0).strip()\n line_string = dw_at_line.group(0).strip()\n panicinfo = {}\n panicinfo['addr'] = addr\n panicinfo['function'] = function\n if line_info:\n line_info_string = line_info.group(0).strip()\n panicinfo['line_info'] = line_info_string\n if abstract_origin:\n abstract_origin_string = abstract_origin.group(0).strip()\n if linkage_name:\n linkage_name_string = linkage_name.group(0).strip()\n if ('DW_AT_call_file' in file_string and 'DW_AT_decl_file' in\n file_string):\n raise RuntimeError('I misunderstand DWARF')\n if ('DW_AT_call_file' in file_string or 'DW_AT_decl_file' in\n file_string):\n filename = file_string.split('\"')[1]\n line_num = line_string.split('(')[1].split(')')[0]\n if 'DW_AT_call_file' in file_string:\n panicinfo['call_file'] = filename\n panicinfo['call_line'] = line_num\n if 'DW_AT_decl_file' in file_string:\n panicinfo['decl_file'] = filename\n panicinfo['decl_line'] = line_num\n if not '/core/' in filename:\n if not 'closure' in abstract_origin_string:\n panicinfo['best_guess_source'] = 'call/decl'\n else:\n panicinfo['best_guess_source'\n ] = 'call-closure-line-info'\n panic_list.append(panicinfo)\n continue\n else:\n parent_file, parent_line = check_for_source_in_parent(elf,\n addr)\n if parent_file:\n panicinfo['parent_call_file'] = parent_file\n panicinfo['parent_call_line'] = parent_line\n panicinfo['best_guess_source'] = 'parent'\n panic_list.append(panicinfo)\n continue\n elif not abstract_origin and not linkage_name:\n no_info_panic_list.append(panicinfo)\n continue\n elif abstract_origin:\n if 'core' in abstract_origin_string:\n name = matches_panic_funcs(abstract_origin_string)\n if name:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n name2 = any_origin_matches_panic_func(elf, addr\n )\n name3 = any_linkage_matches_panic_func(elf,\n addr)\n if name2:\n within_core_panic_list.append(panicinfo)\n continue\n elif name3:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n no_info_panic_list.append(panicinfo)\n continue\n elif 'closure' in abstract_origin_string:\n panicinfo['best_guess_source'] = 'lineinfo'\n panic_list.append(panicinfo)\n continue\n else:\n raise RuntimeError('Unhandled')\n if linkage_name:\n name = matches_panic_funcs(linkage_name_string)\n if name:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n no_info_panic_list.append(panicinfo)\n print(\n 'Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}'\n .format(linkage_name_string, addr))\n continue\n no_info_panic_list.append(panic_info)\n print('did not find source for panic: {}'.format(addr))\n continue\n elif abstract_origin:\n origin = abstract_origin_string.split('\"')[1]\n panicinfo['abstract_origin'] = origin\n if 'core' in origin:\n if matches_panic_funcs(origin):\n within_core_panic_list.append(panicinfo)\n continue\n no_info_panic_list.append(panicinfo)\n print(\n 'Probably could add this origin or one of its parents to the panic function list: {}'\n .format(abstract_origin_string))\n continue\n else:\n panicinfo['best_guess_source'] = 'abstract_origin + line'\n panic_list.append(panicinfo)\n continue\n else:\n try:\n dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[-1\n ].strip()\n function_name = dw_at_name_string.split('\"')[1]\n if 'OUTLINED_FUNCTION_' in function_name:\n if function_name not in panic_functions:\n panic_functions.append(function_name + '>')\n within_core_panic_list.append(panicinfo)\n continue\n no_info_panic_list.append(panicinfo)\n continue\n except:\n no_info_panic_list.append(panicinfo)\n continue\n raise RuntimeError('BUG: Should not reach here')\n return panic_list, within_core_panic_list, no_info_panic_list\n\n\ndef pretty_print(panicinfo):\n if panicinfo['best_guess_source'] == 'call/decl':\n try:\n print('\\t{} -- {}:{}'.format(panicinfo['addr'], panicinfo[\n 'call_file'], panicinfo['call_line']))\n except:\n print('\\t{} -- in function starting at {}:{}'.format(panicinfo[\n 'addr'], panicinfo['decl_file'], panicinfo['decl_line']))\n elif panicinfo['best_guess_source'] == 'parent':\n print('\\t{} -- at or in function starting at {}:{}'.format(\n panicinfo['addr'], panicinfo['parent_call_file'], panicinfo[\n 'parent_call_line']))\n elif panicinfo['best_guess_source'] == 'lineinfo':\n print('\\t{} -- in closure, try: {}'.format(panicinfo['addr'],\n panicinfo['line_info']))\n elif panicinfo['best_guess_source'] == 'abstract_origin + line':\n print('\\t{} -- line_info: {} from origin :{}'.format(panicinfo[\n 'addr'], panicinfo['line_info'], panicinfo['abstract_origin']))\n elif panicinfo['best_guess_source'] == 'call-closure-line-info':\n print('\\t{} -- in closure starting on line_info: {}'.format(\n panicinfo['addr'], panicinfo['line_info']))\n else:\n raise RuntimeError('Missing best guess source: {}'.format(panicinfo))\n\n\ndef main():\n args = parse_args()\n if sys.version_info.minor < 7:\n print('This tool requires Python 3.7+')\n return -1\n print('Tock panic report for ' + args.ELF)\n objdump = ARM_OBJDUMP\n if args.riscv:\n objdump = RISCV_OBJDUMP\n panic_list, within_core_panic_list, no_info_panic_list = find_all_panics(\n objdump, args.ELF, args.riscv)\n print('num_panics: {}'.format(len(panic_list)))\n buckets_list = {}\n for f in panic_functions:\n buckets_list[f] = []\n for panic in panic_list:\n buckets_list[panic['function']].append(panic)\n for f, l in buckets_list.items():\n if len(l) > 0:\n print('{}: {}'.format(f, len(l)))\n for p in l:\n pretty_print(p)\n if args.verbose:\n print(p)\n print()\n print('num panics in core ignored: {}'.format(len(within_core_panic_list)))\n print('num panics for which no info available: {}'.format(len(\n no_info_panic_list)))\n if args.verbose:\n print(\n 'If more debug info is needed, run dwarfdump directly on the address in question.'\n )\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nif platform.system() == 'Darwin':\n DWARFDUMP = 'dwarfdump'\nelif platform.system() == 'Linux':\n DWARFDUMP = 'llvm-dwarfdump'\nelse:\n raise NotImplementedError('Unknown platform')\nARM_OBJDUMP = 'arm-none-eabi-objdump'\nRISCV_OBJDUMP = 'riscv64-unknown-elf-objdump'\npanic_functions = ['expect_failed', 'unwrap_failed', 'panic_bounds_check',\n 'slice_index_order_fail', 'slice_end_index_len_fail',\n 'slice_start_index_len_fail', 'slice17len_mismatch_fail',\n 'str16slice_error_fail', 'copy_from_slice17len_mismatch_fail',\n 'copy_from_slice17', 'panicking5panic', '6unwrap17', '6expect17',\n '11copy_within17', 'core..fmt..builders..PadAdapter', '11copy_within17',\n 'write_char', 'write_str', 'printable5check',\n 'char$u20$as$u20$core..fmt..Debug', 'GenericRadix7fmt_int',\n '10unwrap_err17h6', '13is_whitespace17',\n '$u20$core..slice..index..SliceIndex$LT',\n 'core..iter..adapters..filter..Filter$LT$I$C$P$GT$$u20$as$u20$core..iter',\n '_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17h4c77379bd26a525bE'\n ,\n '_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17hfe7e43aa2388c47bE'\n ]\ndw_at_file_re = re.compile('.*(?:DW_AT_call_file|DW_AT_decl_file).*')\ndw_at_line_re = re.compile('.*(?:DW_AT_call_line|DW_AT_decl_line).*')\nline_info_re = re.compile('.*Line info.*')\nabstract_origin_re = re.compile('.*DW_AT_abstract_origin.*')\ndw_at_linkage_name_re = re.compile('.*DW_AT_linkage_name.*')\ndw_at_name_re = re.compile('.*DW_AT_name.*')\n\n\ndef matches_panic_funcs(name):\n \"\"\"If the passed name contains one of the known panic_functions,\n return the match\n \"\"\"\n for func in panic_functions:\n if func in name:\n return func\n return ''\n\n\ndef linkage_or_origin_all_parents(elf, addr, linkage=False):\n \"\"\"Returns a list of the abstract origin or linkage of all parents of the dwarf\n location for the passed address\n \"\"\"\n result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),\n capture_output=True, text=True)\n dwarfdump = result.stdout\n regex = abstract_origin_re\n if linkage:\n regex = dw_at_linkage_name_re\n matches = re.findall(regex, dwarfdump)\n\n def getFunction(line):\n return line.strip().split('\"')[1]\n origins = list(map(getFunction, matches))\n return origins\n\n\ndef any_origin_matches_panic_func(elf, addr):\n \"\"\"returns name if any origin for the passed addr matches one\n of the functions in the panic_functions array\n \"\"\"\n origins = linkage_or_origin_all_parents(elf, addr)\n for origin in origins:\n name = matches_panic_funcs(origin)\n if name:\n return name\n return ''\n\n\ndef any_linkage_matches_panic_func(elf, addr):\n \"\"\"returns True + name if any linkage for the passed addr matches one\n of the functions in the panic_functions array\n \"\"\"\n linkages = linkage_or_origin_all_parents(elf, addr, True)\n for linkage in linkages:\n name = matches_panic_funcs(linkage)\n if name:\n return name\n return ''\n\n\ndef check_for_source_in_parent(elf, addr):\n \"\"\"Takes in a dwarfdump lookup including parents of the source DWARF\n location, returns the first parent with a call file not in\n the core library. If found, this often indicates the source of the panic\n in the Tock source code.\n \"\"\"\n result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),\n capture_output=True, text=True)\n dwarfdump = result.stdout\n matches = re.findall(dw_at_file_re, dwarfdump)\n\n def getFile(line):\n return line.strip().split('\"')[1]\n source_files = list(map(getFile, matches))\n for i, f in enumerate(source_files[::-1]):\n if '/core/' not in f:\n line_matches = re.findall(dw_at_line_re, dwarfdump)\n\n def getLine(line):\n return line.strip().split('(')[1].split(')')[0]\n source_lines = list(map(getLine, line_matches))\n source_line = source_lines[::-1][i]\n return f, source_line\n return '', ''\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('ELF', help='ELF file for analysis')\n parser.add_argument('--verbose', '-v', action='store_true', help=\n 'Output additional DWARF info for each panic location in the binary')\n parser.add_argument('--riscv', action='store_true', help=\n 'Use risc-v based objdump')\n return parser.parse_args()\n\n\ndef find_all_panics(objdump, elf, is_riscv):\n panic_list = []\n within_core_panic_list = []\n no_info_panic_list = []\n result = subprocess.run((objdump, '-d', elf), capture_output=True, text\n =True)\n objdump_out = result.stdout\n for function in panic_functions:\n function_re = re.compile('.*:.*#.*' + function + '.*')\n if not is_riscv:\n function_re = re.compile('.*:.*<.*' + function + '.*')\n matches = re.findall(function_re, objdump_out)\n\n def getAddr(line):\n return line.strip().split(':')[0]\n addrs = list(map(getAddr, matches))\n for addr in addrs:\n result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, elf),\n capture_output=True, text=True)\n dwarfdump = result.stdout\n dw_at_file = re.search(dw_at_file_re, dwarfdump)\n dw_at_line = re.search(dw_at_line_re, dwarfdump)\n line_info = re.search(line_info_re, dwarfdump)\n abstract_origin = re.search(abstract_origin_re, dwarfdump)\n linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)\n file_string = ''\n line_string = ''\n line_info_string = ''\n abstract_origin_string = ''\n linkage_name_string = ''\n if dw_at_file:\n file_string = dw_at_file.group(0).strip()\n line_string = dw_at_line.group(0).strip()\n panicinfo = {}\n panicinfo['addr'] = addr\n panicinfo['function'] = function\n if line_info:\n line_info_string = line_info.group(0).strip()\n panicinfo['line_info'] = line_info_string\n if abstract_origin:\n abstract_origin_string = abstract_origin.group(0).strip()\n if linkage_name:\n linkage_name_string = linkage_name.group(0).strip()\n if ('DW_AT_call_file' in file_string and 'DW_AT_decl_file' in\n file_string):\n raise RuntimeError('I misunderstand DWARF')\n if ('DW_AT_call_file' in file_string or 'DW_AT_decl_file' in\n file_string):\n filename = file_string.split('\"')[1]\n line_num = line_string.split('(')[1].split(')')[0]\n if 'DW_AT_call_file' in file_string:\n panicinfo['call_file'] = filename\n panicinfo['call_line'] = line_num\n if 'DW_AT_decl_file' in file_string:\n panicinfo['decl_file'] = filename\n panicinfo['decl_line'] = line_num\n if not '/core/' in filename:\n if not 'closure' in abstract_origin_string:\n panicinfo['best_guess_source'] = 'call/decl'\n else:\n panicinfo['best_guess_source'\n ] = 'call-closure-line-info'\n panic_list.append(panicinfo)\n continue\n else:\n parent_file, parent_line = check_for_source_in_parent(elf,\n addr)\n if parent_file:\n panicinfo['parent_call_file'] = parent_file\n panicinfo['parent_call_line'] = parent_line\n panicinfo['best_guess_source'] = 'parent'\n panic_list.append(panicinfo)\n continue\n elif not abstract_origin and not linkage_name:\n no_info_panic_list.append(panicinfo)\n continue\n elif abstract_origin:\n if 'core' in abstract_origin_string:\n name = matches_panic_funcs(abstract_origin_string)\n if name:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n name2 = any_origin_matches_panic_func(elf, addr\n )\n name3 = any_linkage_matches_panic_func(elf,\n addr)\n if name2:\n within_core_panic_list.append(panicinfo)\n continue\n elif name3:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n no_info_panic_list.append(panicinfo)\n continue\n elif 'closure' in abstract_origin_string:\n panicinfo['best_guess_source'] = 'lineinfo'\n panic_list.append(panicinfo)\n continue\n else:\n raise RuntimeError('Unhandled')\n if linkage_name:\n name = matches_panic_funcs(linkage_name_string)\n if name:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n no_info_panic_list.append(panicinfo)\n print(\n 'Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}'\n .format(linkage_name_string, addr))\n continue\n no_info_panic_list.append(panic_info)\n print('did not find source for panic: {}'.format(addr))\n continue\n elif abstract_origin:\n origin = abstract_origin_string.split('\"')[1]\n panicinfo['abstract_origin'] = origin\n if 'core' in origin:\n if matches_panic_funcs(origin):\n within_core_panic_list.append(panicinfo)\n continue\n no_info_panic_list.append(panicinfo)\n print(\n 'Probably could add this origin or one of its parents to the panic function list: {}'\n .format(abstract_origin_string))\n continue\n else:\n panicinfo['best_guess_source'] = 'abstract_origin + line'\n panic_list.append(panicinfo)\n continue\n else:\n try:\n dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[-1\n ].strip()\n function_name = dw_at_name_string.split('\"')[1]\n if 'OUTLINED_FUNCTION_' in function_name:\n if function_name not in panic_functions:\n panic_functions.append(function_name + '>')\n within_core_panic_list.append(panicinfo)\n continue\n no_info_panic_list.append(panicinfo)\n continue\n except:\n no_info_panic_list.append(panicinfo)\n continue\n raise RuntimeError('BUG: Should not reach here')\n return panic_list, within_core_panic_list, no_info_panic_list\n\n\ndef pretty_print(panicinfo):\n if panicinfo['best_guess_source'] == 'call/decl':\n try:\n print('\\t{} -- {}:{}'.format(panicinfo['addr'], panicinfo[\n 'call_file'], panicinfo['call_line']))\n except:\n print('\\t{} -- in function starting at {}:{}'.format(panicinfo[\n 'addr'], panicinfo['decl_file'], panicinfo['decl_line']))\n elif panicinfo['best_guess_source'] == 'parent':\n print('\\t{} -- at or in function starting at {}:{}'.format(\n panicinfo['addr'], panicinfo['parent_call_file'], panicinfo[\n 'parent_call_line']))\n elif panicinfo['best_guess_source'] == 'lineinfo':\n print('\\t{} -- in closure, try: {}'.format(panicinfo['addr'],\n panicinfo['line_info']))\n elif panicinfo['best_guess_source'] == 'abstract_origin + line':\n print('\\t{} -- line_info: {} from origin :{}'.format(panicinfo[\n 'addr'], panicinfo['line_info'], panicinfo['abstract_origin']))\n elif panicinfo['best_guess_source'] == 'call-closure-line-info':\n print('\\t{} -- in closure starting on line_info: {}'.format(\n panicinfo['addr'], panicinfo['line_info']))\n else:\n raise RuntimeError('Missing best guess source: {}'.format(panicinfo))\n\n\ndef main():\n args = parse_args()\n if sys.version_info.minor < 7:\n print('This tool requires Python 3.7+')\n return -1\n print('Tock panic report for ' + args.ELF)\n objdump = ARM_OBJDUMP\n if args.riscv:\n objdump = RISCV_OBJDUMP\n panic_list, within_core_panic_list, no_info_panic_list = find_all_panics(\n objdump, args.ELF, args.riscv)\n print('num_panics: {}'.format(len(panic_list)))\n buckets_list = {}\n for f in panic_functions:\n buckets_list[f] = []\n for panic in panic_list:\n buckets_list[panic['function']].append(panic)\n for f, l in buckets_list.items():\n if len(l) > 0:\n print('{}: {}'.format(f, len(l)))\n for p in l:\n pretty_print(p)\n if args.verbose:\n print(p)\n print()\n print('num panics in core ignored: {}'.format(len(within_core_panic_list)))\n print('num panics for which no info available: {}'.format(len(\n no_info_panic_list)))\n if args.verbose:\n print(\n 'If more debug info is needed, run dwarfdump directly on the address in question.'\n )\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import argparse\nimport platform\nimport re\nimport subprocess\nimport sys\nif platform.system() == 'Darwin':\n DWARFDUMP = 'dwarfdump'\nelif platform.system() == 'Linux':\n DWARFDUMP = 'llvm-dwarfdump'\nelse:\n raise NotImplementedError('Unknown platform')\nARM_OBJDUMP = 'arm-none-eabi-objdump'\nRISCV_OBJDUMP = 'riscv64-unknown-elf-objdump'\npanic_functions = ['expect_failed', 'unwrap_failed', 'panic_bounds_check',\n 'slice_index_order_fail', 'slice_end_index_len_fail',\n 'slice_start_index_len_fail', 'slice17len_mismatch_fail',\n 'str16slice_error_fail', 'copy_from_slice17len_mismatch_fail',\n 'copy_from_slice17', 'panicking5panic', '6unwrap17', '6expect17',\n '11copy_within17', 'core..fmt..builders..PadAdapter', '11copy_within17',\n 'write_char', 'write_str', 'printable5check',\n 'char$u20$as$u20$core..fmt..Debug', 'GenericRadix7fmt_int',\n '10unwrap_err17h6', '13is_whitespace17',\n '$u20$core..slice..index..SliceIndex$LT',\n 'core..iter..adapters..filter..Filter$LT$I$C$P$GT$$u20$as$u20$core..iter',\n '_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17h4c77379bd26a525bE'\n ,\n '_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17hfe7e43aa2388c47bE'\n ]\ndw_at_file_re = re.compile('.*(?:DW_AT_call_file|DW_AT_decl_file).*')\ndw_at_line_re = re.compile('.*(?:DW_AT_call_line|DW_AT_decl_line).*')\nline_info_re = re.compile('.*Line info.*')\nabstract_origin_re = re.compile('.*DW_AT_abstract_origin.*')\ndw_at_linkage_name_re = re.compile('.*DW_AT_linkage_name.*')\ndw_at_name_re = re.compile('.*DW_AT_name.*')\n\n\ndef matches_panic_funcs(name):\n \"\"\"If the passed name contains one of the known panic_functions,\n return the match\n \"\"\"\n for func in panic_functions:\n if func in name:\n return func\n return ''\n\n\ndef linkage_or_origin_all_parents(elf, addr, linkage=False):\n \"\"\"Returns a list of the abstract origin or linkage of all parents of the dwarf\n location for the passed address\n \"\"\"\n result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),\n capture_output=True, text=True)\n dwarfdump = result.stdout\n regex = abstract_origin_re\n if linkage:\n regex = dw_at_linkage_name_re\n matches = re.findall(regex, dwarfdump)\n\n def getFunction(line):\n return line.strip().split('\"')[1]\n origins = list(map(getFunction, matches))\n return origins\n\n\ndef any_origin_matches_panic_func(elf, addr):\n \"\"\"returns name if any origin for the passed addr matches one\n of the functions in the panic_functions array\n \"\"\"\n origins = linkage_or_origin_all_parents(elf, addr)\n for origin in origins:\n name = matches_panic_funcs(origin)\n if name:\n return name\n return ''\n\n\ndef any_linkage_matches_panic_func(elf, addr):\n \"\"\"returns True + name if any linkage for the passed addr matches one\n of the functions in the panic_functions array\n \"\"\"\n linkages = linkage_or_origin_all_parents(elf, addr, True)\n for linkage in linkages:\n name = matches_panic_funcs(linkage)\n if name:\n return name\n return ''\n\n\ndef check_for_source_in_parent(elf, addr):\n \"\"\"Takes in a dwarfdump lookup including parents of the source DWARF\n location, returns the first parent with a call file not in\n the core library. If found, this often indicates the source of the panic\n in the Tock source code.\n \"\"\"\n result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),\n capture_output=True, text=True)\n dwarfdump = result.stdout\n matches = re.findall(dw_at_file_re, dwarfdump)\n\n def getFile(line):\n return line.strip().split('\"')[1]\n source_files = list(map(getFile, matches))\n for i, f in enumerate(source_files[::-1]):\n if '/core/' not in f:\n line_matches = re.findall(dw_at_line_re, dwarfdump)\n\n def getLine(line):\n return line.strip().split('(')[1].split(')')[0]\n source_lines = list(map(getLine, line_matches))\n source_line = source_lines[::-1][i]\n return f, source_line\n return '', ''\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('ELF', help='ELF file for analysis')\n parser.add_argument('--verbose', '-v', action='store_true', help=\n 'Output additional DWARF info for each panic location in the binary')\n parser.add_argument('--riscv', action='store_true', help=\n 'Use risc-v based objdump')\n return parser.parse_args()\n\n\ndef find_all_panics(objdump, elf, is_riscv):\n panic_list = []\n within_core_panic_list = []\n no_info_panic_list = []\n result = subprocess.run((objdump, '-d', elf), capture_output=True, text\n =True)\n objdump_out = result.stdout\n for function in panic_functions:\n function_re = re.compile('.*:.*#.*' + function + '.*')\n if not is_riscv:\n function_re = re.compile('.*:.*<.*' + function + '.*')\n matches = re.findall(function_re, objdump_out)\n\n def getAddr(line):\n return line.strip().split(':')[0]\n addrs = list(map(getAddr, matches))\n for addr in addrs:\n result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, elf),\n capture_output=True, text=True)\n dwarfdump = result.stdout\n dw_at_file = re.search(dw_at_file_re, dwarfdump)\n dw_at_line = re.search(dw_at_line_re, dwarfdump)\n line_info = re.search(line_info_re, dwarfdump)\n abstract_origin = re.search(abstract_origin_re, dwarfdump)\n linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)\n file_string = ''\n line_string = ''\n line_info_string = ''\n abstract_origin_string = ''\n linkage_name_string = ''\n if dw_at_file:\n file_string = dw_at_file.group(0).strip()\n line_string = dw_at_line.group(0).strip()\n panicinfo = {}\n panicinfo['addr'] = addr\n panicinfo['function'] = function\n if line_info:\n line_info_string = line_info.group(0).strip()\n panicinfo['line_info'] = line_info_string\n if abstract_origin:\n abstract_origin_string = abstract_origin.group(0).strip()\n if linkage_name:\n linkage_name_string = linkage_name.group(0).strip()\n if ('DW_AT_call_file' in file_string and 'DW_AT_decl_file' in\n file_string):\n raise RuntimeError('I misunderstand DWARF')\n if ('DW_AT_call_file' in file_string or 'DW_AT_decl_file' in\n file_string):\n filename = file_string.split('\"')[1]\n line_num = line_string.split('(')[1].split(')')[0]\n if 'DW_AT_call_file' in file_string:\n panicinfo['call_file'] = filename\n panicinfo['call_line'] = line_num\n if 'DW_AT_decl_file' in file_string:\n panicinfo['decl_file'] = filename\n panicinfo['decl_line'] = line_num\n if not '/core/' in filename:\n if not 'closure' in abstract_origin_string:\n panicinfo['best_guess_source'] = 'call/decl'\n else:\n panicinfo['best_guess_source'\n ] = 'call-closure-line-info'\n panic_list.append(panicinfo)\n continue\n else:\n parent_file, parent_line = check_for_source_in_parent(elf,\n addr)\n if parent_file:\n panicinfo['parent_call_file'] = parent_file\n panicinfo['parent_call_line'] = parent_line\n panicinfo['best_guess_source'] = 'parent'\n panic_list.append(panicinfo)\n continue\n elif not abstract_origin and not linkage_name:\n no_info_panic_list.append(panicinfo)\n continue\n elif abstract_origin:\n if 'core' in abstract_origin_string:\n name = matches_panic_funcs(abstract_origin_string)\n if name:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n name2 = any_origin_matches_panic_func(elf, addr\n )\n name3 = any_linkage_matches_panic_func(elf,\n addr)\n if name2:\n within_core_panic_list.append(panicinfo)\n continue\n elif name3:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n no_info_panic_list.append(panicinfo)\n continue\n elif 'closure' in abstract_origin_string:\n panicinfo['best_guess_source'] = 'lineinfo'\n panic_list.append(panicinfo)\n continue\n else:\n raise RuntimeError('Unhandled')\n if linkage_name:\n name = matches_panic_funcs(linkage_name_string)\n if name:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n no_info_panic_list.append(panicinfo)\n print(\n 'Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}'\n .format(linkage_name_string, addr))\n continue\n no_info_panic_list.append(panic_info)\n print('did not find source for panic: {}'.format(addr))\n continue\n elif abstract_origin:\n origin = abstract_origin_string.split('\"')[1]\n panicinfo['abstract_origin'] = origin\n if 'core' in origin:\n if matches_panic_funcs(origin):\n within_core_panic_list.append(panicinfo)\n continue\n no_info_panic_list.append(panicinfo)\n print(\n 'Probably could add this origin or one of its parents to the panic function list: {}'\n .format(abstract_origin_string))\n continue\n else:\n panicinfo['best_guess_source'] = 'abstract_origin + line'\n panic_list.append(panicinfo)\n continue\n else:\n try:\n dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[-1\n ].strip()\n function_name = dw_at_name_string.split('\"')[1]\n if 'OUTLINED_FUNCTION_' in function_name:\n if function_name not in panic_functions:\n panic_functions.append(function_name + '>')\n within_core_panic_list.append(panicinfo)\n continue\n no_info_panic_list.append(panicinfo)\n continue\n except:\n no_info_panic_list.append(panicinfo)\n continue\n raise RuntimeError('BUG: Should not reach here')\n return panic_list, within_core_panic_list, no_info_panic_list\n\n\ndef pretty_print(panicinfo):\n if panicinfo['best_guess_source'] == 'call/decl':\n try:\n print('\\t{} -- {}:{}'.format(panicinfo['addr'], panicinfo[\n 'call_file'], panicinfo['call_line']))\n except:\n print('\\t{} -- in function starting at {}:{}'.format(panicinfo[\n 'addr'], panicinfo['decl_file'], panicinfo['decl_line']))\n elif panicinfo['best_guess_source'] == 'parent':\n print('\\t{} -- at or in function starting at {}:{}'.format(\n panicinfo['addr'], panicinfo['parent_call_file'], panicinfo[\n 'parent_call_line']))\n elif panicinfo['best_guess_source'] == 'lineinfo':\n print('\\t{} -- in closure, try: {}'.format(panicinfo['addr'],\n panicinfo['line_info']))\n elif panicinfo['best_guess_source'] == 'abstract_origin + line':\n print('\\t{} -- line_info: {} from origin :{}'.format(panicinfo[\n 'addr'], panicinfo['line_info'], panicinfo['abstract_origin']))\n elif panicinfo['best_guess_source'] == 'call-closure-line-info':\n print('\\t{} -- in closure starting on line_info: {}'.format(\n panicinfo['addr'], panicinfo['line_info']))\n else:\n raise RuntimeError('Missing best guess source: {}'.format(panicinfo))\n\n\ndef main():\n args = parse_args()\n if sys.version_info.minor < 7:\n print('This tool requires Python 3.7+')\n return -1\n print('Tock panic report for ' + args.ELF)\n objdump = ARM_OBJDUMP\n if args.riscv:\n objdump = RISCV_OBJDUMP\n panic_list, within_core_panic_list, no_info_panic_list = find_all_panics(\n objdump, args.ELF, args.riscv)\n print('num_panics: {}'.format(len(panic_list)))\n buckets_list = {}\n for f in panic_functions:\n buckets_list[f] = []\n for panic in panic_list:\n buckets_list[panic['function']].append(panic)\n for f, l in buckets_list.items():\n if len(l) > 0:\n print('{}: {}'.format(f, len(l)))\n for p in l:\n pretty_print(p)\n if args.verbose:\n print(p)\n print()\n print('num panics in core ignored: {}'.format(len(within_core_panic_list)))\n print('num panics for which no info available: {}'.format(len(\n no_info_panic_list)))\n if args.verbose:\n print(\n 'If more debug info is needed, run dwarfdump directly on the address in question.'\n )\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python3\n\n# Licensed under the Apache License, Version 2.0 or the MIT License.\n# SPDX-License-Identifier: Apache-2.0 OR MIT\n# Copyright Tock Contributors 2023.\n\n# Prints out the source locations of panics in a Tock kernel ELF\n#\n# This tool attempts to trace all panic locations in a Tock kernel ELF by\n# tracing calls to panic functions in the core library, using the debug information\n# embedded in the ELF file. This tool requires an ELF which includes debug information.\n# In its current state, cannot accurately provide the source locations\n# corresponding to each panic, but tries to be honest about its confidence in\n# each guess. In general, each guess is usually enough to locate the relevant panic.\n# More creative analysis might be able to increase\n# the accuracy with which this tool can identify source locations of panics. For now,\n# this tool is useful for:\n#\n# - obtaining a rough count of the number of panics in a Tock kernel binary\n#\n# - finding and removing panics in a Tock kernel binary\n#\n# - roughly determining which components of a Tock kernel binary contain the most panic\n# paths\n#\n# There are several assumptions built into this tool which may not always hold. For one,\n# the list of panic_functions are assumed to not match any strings in the actual\n# codebase, despite the fact they are incomplete function names and overlap is possible.\n# I could solve this by using full names of these functions, but I am unsure how often\n# the name mangling of these functions will change as the rust compiler changes so this\n# approach felt potentially more stable.\n#\n# Several assumptions are made about DWARF locations that do not always hold, so source\n# locations are not always accurate -- sometimes, the printed location just points to\n# the function containing a panic, rather than the actual line on which the panic\n# occurs. Some assumptions about which panics are in the core library and will be\n# caught by grepping for other calls may also not always hold. The best way to inspect\n# these is by manually inspecting the panics in the `within_core_panic_list`.\n#\n# This script stores panics which it cannot trace out of the core library in the\n# `no_info_panic_list`. If this list contains some panics, that is a sign that some\n# panics have not been identified. You can manually look at the addresses stored in\n# this list, attempt to find the core library function which leads to these instrucitons\n# being called, and then add those core library functions to the list of panic functions.\n#\n# The output of this script is *not* stable.\n#\n# Usage: find_panics.py ELF [--riscv]\n#\n# Requires Python 3.7+\n#\n# Author: Hudson Ayers <hayers@.stanford.edu>\n\nimport argparse\nimport platform\nimport re\nimport subprocess\nimport sys\n\n\nif platform.system() == 'Darwin':\n DWARFDUMP = \"dwarfdump\"\nelif platform.system() == 'Linux':\n DWARFDUMP = \"llvm-dwarfdump\"\nelse:\n raise NotImplementedError(\"Unknown platform\")\n# Note: In practice, GCC objdumps are better at symbol resolution than LLVM objdump\nARM_OBJDUMP = \"arm-none-eabi-objdump\"\nRISCV_OBJDUMP = \"riscv64-unknown-elf-objdump\"\n\n# TODO: For all functions below the initial batch, it would like be preferable to\n# automatically populate the list with additional functions in the core library using\n# debug info. For now, however, I do this manually.\npanic_functions = [\n \"expect_failed\",\n \"unwrap_failed\",\n \"panic_bounds_check\",\n \"slice_index_order_fail\",\n \"slice_end_index_len_fail\",\n \"slice_start_index_len_fail\",\n \"slice17len_mismatch_fail\",\n \"str16slice_error_fail\",\n \"copy_from_slice17len_mismatch_fail\",\n \"copy_from_slice17\",\n \"panicking5panic\",\n # below are functions I have manually traced up from the above, more \"core\" panics, on a riscv binary with a low inline threshold\n \"6unwrap17\",\n \"6expect17\",\n \"11copy_within17\",\n \"core..fmt..builders..PadAdapter\", # calls slice_error_fail\n \"11copy_within17\", # calls panicking::panic\n \"write_char\", # calls PadAdapter one above\n \"write_str\", # calls write_char\n \"printable5check\", # calls slice_index_order_fail\n \"char$u20$as$u20$core..fmt..Debug\", # calls printable5check\n \"GenericRadix7fmt_int\", # calls slice_start_index_len_fail\n # below are functions I manually traced on an arm binary,\n # with a somewhat higher inline threshold.\n \"10unwrap_err17h6\",\n \"13is_whitespace17\",\n \"$u20$core..slice..index..SliceIndex$LT\",\n \"core..iter..adapters..filter..Filter$LT$I$C$P$GT$$u20$as$u20$core..iter\",\n \"_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17h4c77379bd26a525bE\",\n \"_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17hfe7e43aa2388c47bE\",\n]\n\n# Pre-compiled regex lookups\ndw_at_file_re = re.compile(r\"\"\".*(?:DW_AT_call_file|DW_AT_decl_file).*\"\"\")\ndw_at_line_re = re.compile(r\"\"\".*(?:DW_AT_call_line|DW_AT_decl_line).*\"\"\")\nline_info_re = re.compile(r\"\"\".*Line info.*\"\"\")\nabstract_origin_re = re.compile(r\"\"\".*DW_AT_abstract_origin.*\"\"\")\ndw_at_linkage_name_re = re.compile(r\"\"\".*DW_AT_linkage_name.*\"\"\")\ndw_at_name_re = re.compile(r\"\"\".*DW_AT_name.*\"\"\")\n\n\ndef matches_panic_funcs(name):\n \"\"\"If the passed name contains one of the known panic_functions,\n return the match\n \"\"\"\n for func in panic_functions:\n if func in name:\n return func\n return \"\"\n\n\ndef linkage_or_origin_all_parents(elf, addr, linkage=False):\n \"\"\"Returns a list of the abstract origin or linkage of all parents of the dwarf\n location for the passed address\n \"\"\"\n result = subprocess.run(\n (DWARFDUMP, \"--lookup=0x\" + addr, \"-p\", elf), capture_output=True, text=True\n )\n dwarfdump = result.stdout\n regex = abstract_origin_re\n if linkage:\n regex = dw_at_linkage_name_re\n matches = re.findall(regex, dwarfdump)\n\n def getFunction(line):\n return line.strip().split('\"')[1]\n\n origins = list(map(getFunction, matches))\n return origins\n\n\ndef any_origin_matches_panic_func(elf, addr):\n \"\"\"returns name if any origin for the passed addr matches one\n of the functions in the panic_functions array\n \"\"\"\n origins = linkage_or_origin_all_parents(elf, addr)\n for origin in origins:\n name = matches_panic_funcs(origin)\n if name:\n return name\n return \"\"\n\n\ndef any_linkage_matches_panic_func(elf, addr):\n \"\"\"returns True + name if any linkage for the passed addr matches one\n of the functions in the panic_functions array\n \"\"\"\n linkages = linkage_or_origin_all_parents(elf, addr, True)\n for linkage in linkages:\n name = matches_panic_funcs(linkage)\n if name:\n return name\n return \"\"\n\n\ndef check_for_source_in_parent(elf, addr):\n \"\"\"Takes in a dwarfdump lookup including parents of the source DWARF\n location, returns the first parent with a call file not in\n the core library. If found, this often indicates the source of the panic\n in the Tock source code.\n \"\"\"\n result = subprocess.run(\n (DWARFDUMP, \"--lookup=0x\" + addr, \"-p\", elf), capture_output=True, text=True\n )\n dwarfdump = result.stdout\n matches = re.findall(dw_at_file_re, dwarfdump)\n\n def getFile(line):\n return line.strip().split('\"')[1]\n\n source_files = list(map(getFile, matches))\n for (i, f) in enumerate(source_files[::-1]):\n if \"/core/\" not in f:\n line_matches = re.findall(dw_at_line_re, dwarfdump)\n\n def getLine(line):\n return line.strip().split(\"(\")[1].split(\")\")[0]\n\n source_lines = list(map(getLine, line_matches))\n source_line = source_lines[::-1][i]\n return (f, source_line)\n return (\"\", \"\")\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"ELF\", help=\"ELF file for analysis\")\n parser.add_argument(\n \"--verbose\",\n \"-v\",\n action=\"store_true\",\n help=\"Output additional DWARF info for each panic location in the binary\",\n )\n parser.add_argument(\"--riscv\", action=\"store_true\", help=\"Use risc-v based objdump\")\n return parser.parse_args()\n\n\n# Find all addresses that panic, and get basic dwarf info on those addresses\ndef find_all_panics(objdump, elf, is_riscv):\n panic_list = []\n within_core_panic_list = []\n no_info_panic_list = []\n result = subprocess.run((objdump, \"-d\", elf), capture_output=True, text=True)\n objdump_out = result.stdout\n for function in panic_functions:\n function_re = re.compile(\".*:.*#.*\" + function + \".*\")\n if not is_riscv:\n # Arm-none-eabi-objdump uses ';' for comments instead of '#'\n function_re = re.compile(\".*:.*<.*\" + function + \".*\")\n # TODO: arm elfs include loads of offsets from symbols in such a way that these lines\n # are matched by this regex. In general, these loads occur within the instruction stream\n # associated with the symbol at hand, and will usually be excluded by logic later in\n # this function. This leads to `within_core_panic_list` and `no_info_panic_list`\n # containing more \"panics\" than when analyzing a risc-v binary. We could fix this\n # by matching *only* on functions with instructions that actually jump to a new symbol,\n # but this would require a list of such instructions for each architecture. However\n # as written it actually lets us identify panics which are jumped to via addresses\n # stored in registers, which may actually catch additional valid panics.\n matches = re.findall(function_re, objdump_out)\n\n def getAddr(line):\n return line.strip().split(\":\")[0]\n\n addrs = list(map(getAddr, matches))\n for addr in addrs:\n result = subprocess.run(\n (DWARFDUMP, \"--lookup=0x\" + addr, elf), capture_output=True, text=True\n )\n dwarfdump = result.stdout\n dw_at_file = re.search(dw_at_file_re, dwarfdump)\n dw_at_line = re.search(dw_at_line_re, dwarfdump)\n line_info = re.search(line_info_re, dwarfdump)\n abstract_origin = re.search(abstract_origin_re, dwarfdump)\n linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)\n file_string = \"\"\n line_string = \"\"\n line_info_string = \"\"\n abstract_origin_string = \"\"\n linkage_name_string = \"\"\n if dw_at_file:\n file_string = dw_at_file.group(0).strip()\n line_string = dw_at_line.group(0).strip()\n panicinfo = {}\n panicinfo[\"addr\"] = addr\n panicinfo[\"function\"] = function\n if line_info:\n line_info_string = line_info.group(0).strip()\n panicinfo[\"line_info\"] = line_info_string\n if abstract_origin:\n abstract_origin_string = abstract_origin.group(0).strip()\n if linkage_name:\n linkage_name_string = linkage_name.group(0).strip()\n if \"DW_AT_call_file\" in file_string and \"DW_AT_decl_file\" in file_string:\n raise RuntimeError(\"I misunderstand DWARF\")\n if \"DW_AT_call_file\" in file_string or \"DW_AT_decl_file\" in file_string:\n filename = file_string.split('\"')[1]\n line_num = line_string.split(\"(\")[1].split(\")\")[0]\n if \"DW_AT_call_file\" in file_string:\n panicinfo[\"call_file\"] = filename\n panicinfo[\"call_line\"] = line_num\n if \"DW_AT_decl_file\" in file_string:\n panicinfo[\"decl_file\"] = filename\n panicinfo[\"decl_line\"] = line_num\n if not \"/core/\" in filename:\n if not \"closure\" in abstract_origin_string:\n panicinfo[\"best_guess_source\"] = \"call/decl\"\n else:\n panicinfo[\"best_guess_source\"] = \"call-closure-line-info\"\n panic_list.append(panicinfo)\n continue\n else: # 'core' in filename\n (parent_file, parent_line) = check_for_source_in_parent(elf, addr)\n if parent_file:\n panicinfo[\"parent_call_file\"] = parent_file\n panicinfo[\"parent_call_line\"] = parent_line\n panicinfo[\"best_guess_source\"] = \"parent\"\n panic_list.append(panicinfo)\n continue\n elif not abstract_origin and not linkage_name:\n no_info_panic_list.append(panicinfo)\n continue\n elif abstract_origin:\n if \"core\" in abstract_origin_string:\n name = matches_panic_funcs(abstract_origin_string)\n if name:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n name2 = any_origin_matches_panic_func(elf, addr)\n name3 = any_linkage_matches_panic_func(elf, addr)\n if name2:\n within_core_panic_list.append(panicinfo)\n continue\n elif name3:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n no_info_panic_list.append(panicinfo)\n continue\n elif \"closure\" in abstract_origin_string:\n # not in core, in closure, line info is probably sufficient\n panicinfo[\"best_guess_source\"] = \"lineinfo\"\n panic_list.append(panicinfo)\n continue\n else:\n # i have not seen this happen -- core in file, not closure, origin not core\n raise RuntimeError(\"Unhandled\")\n if linkage_name:\n name = matches_panic_funcs(linkage_name_string)\n if name:\n within_core_panic_list.append(panicinfo)\n continue\n else:\n no_info_panic_list.append(panicinfo)\n print(\n \"Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}\".format(\n linkage_name_string, addr\n )\n )\n continue\n no_info_panic_list.append(panic_info)\n print(\"did not find source for panic: {}\".format(addr))\n continue\n elif abstract_origin:\n origin = abstract_origin_string.split('\"')[1]\n panicinfo[\"abstract_origin\"] = origin\n if \"core\" in origin:\n if matches_panic_funcs(origin):\n within_core_panic_list.append(panicinfo)\n continue\n no_info_panic_list.append(panicinfo)\n print(\n \"Probably could add this origin or one of its parents to the panic function list: {}\".format(\n abstract_origin_string\n )\n )\n continue\n else:\n panicinfo[\"best_guess_source\"] = \"abstract_origin + line\"\n panic_list.append(panicinfo)\n continue\n else:\n # This gets hit for OUTLINED_FUNCTION_XX a bunch on ARM\n try:\n dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[\n -1\n ].strip() # see multiple matches for this string sometimes\n function_name = dw_at_name_string.split('\"')[1]\n if \"OUTLINED_FUNCTION_\" in function_name:\n # This is a common pattern where panicing paths are repeated in many\n # places throughout the binary, and LLVMs optimizer outlines the repeated code.\n # Let's add these to the list of panicing functions, dynamically so this is resilient to\n # changes in the binary.\n if function_name not in panic_functions:\n # don't double insert\n panic_functions.append(\n function_name + \">\"\n ) # so FUNCTION_22 does not catch FUNCTION_222\n within_core_panic_list.append(panicinfo)\n continue\n no_info_panic_list.append(panicinfo)\n continue\n except:\n # There seem to be a places where lookup fails completely\n # Not easy to recover, log these and continue on.\n no_info_panic_list.append(panicinfo)\n continue\n raise RuntimeError(\"BUG: Should not reach here\")\n return (panic_list, within_core_panic_list, no_info_panic_list)\n\n\ndef pretty_print(panicinfo):\n if panicinfo[\"best_guess_source\"] == \"call/decl\":\n try:\n print(\n \"\\t{} -- {}:{}\".format(\n panicinfo[\"addr\"], panicinfo[\"call_file\"], panicinfo[\"call_line\"]\n )\n )\n except:\n print(\n \"\\t{} -- in function starting at {}:{}\".format(\n panicinfo[\"addr\"], panicinfo[\"decl_file\"], panicinfo[\"decl_line\"]\n )\n )\n elif panicinfo[\"best_guess_source\"] == \"parent\":\n print(\n \"\\t{} -- at or in function starting at {}:{}\".format(\n panicinfo[\"addr\"],\n panicinfo[\"parent_call_file\"],\n panicinfo[\"parent_call_line\"],\n )\n )\n elif panicinfo[\"best_guess_source\"] == \"lineinfo\":\n print(\n \"\\t{} -- in closure, try: {}\".format(\n panicinfo[\"addr\"], panicinfo[\"line_info\"]\n )\n )\n elif panicinfo[\"best_guess_source\"] == \"abstract_origin + line\":\n print(\n \"\\t{} -- line_info: {} from origin :{}\".format(\n panicinfo[\"addr\"], panicinfo[\"line_info\"], panicinfo[\"abstract_origin\"]\n )\n )\n elif panicinfo[\"best_guess_source\"] == \"call-closure-line-info\":\n print(\n \"\\t{} -- in closure starting on line_info: {}\".format(\n panicinfo[\"addr\"], panicinfo[\"line_info\"]\n )\n )\n else:\n raise RuntimeError(\"Missing best guess source: {}\".format(panicinfo))\n\n\ndef main():\n args = parse_args()\n if sys.version_info.minor < 7:\n print(\"This tool requires Python 3.7+\")\n return -1\n print(\"Tock panic report for \" + args.ELF)\n\n objdump = ARM_OBJDUMP\n if args.riscv:\n objdump = RISCV_OBJDUMP\n\n (panic_list, within_core_panic_list, no_info_panic_list) = find_all_panics(\n objdump, args.ELF, args.riscv\n )\n print(\"num_panics: {}\".format(len(panic_list)))\n buckets_list = {}\n for f in panic_functions:\n buckets_list[f] = []\n for panic in panic_list:\n buckets_list[panic[\"function\"]].append(panic)\n for f, l in buckets_list.items():\n if len(l) > 0:\n print(\"{}: {}\".format(f, len(l)))\n for p in l:\n pretty_print(p)\n if args.verbose:\n print(p)\n print()\n\n print(\"num panics in core ignored: {}\".format(len(within_core_panic_list)))\n print(\"num panics for which no info available: {}\".format(len(no_info_panic_list)))\n if args.verbose:\n print(\n \"If more debug info is needed, run dwarfdump directly on the address in question.\"\n )\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
7,
10,
11,
12,
13
]
}
|
[
7,
10,
11,
12,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cv2.imshow('image1', img[0:int(img_height / 2), 0:int(img_width / 2)])
cv2.imshow('image2', img[int(img_height / 2):img_height, 0:int(img_width / 2)])
cv2.imshow('image3', img[0:int(img_height / 2), int(img_width / 2):img_width])
cv2.imshow('image4', img[int(img_height / 2):img_height, int(img_width / 2)
:img_width])
cv2.waitKey(0)
cv2.destroyAllWindows()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
img = cv2.imread('Scan1.jpg')
img_height, img_width, dim = img.shape
cv2.imshow('image1', img[0:int(img_height / 2), 0:int(img_width / 2)])
cv2.imshow('image2', img[int(img_height / 2):img_height, 0:int(img_width / 2)])
cv2.imshow('image3', img[0:int(img_height / 2), int(img_width / 2):img_width])
cv2.imshow('image4', img[int(img_height / 2):img_height, int(img_width / 2)
:img_width])
cv2.waitKey(0)
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import cv2
import numpy as np
img = cv2.imread('Scan1.jpg')
img_height, img_width, dim = img.shape
cv2.imshow('image1', img[0:int(img_height / 2), 0:int(img_width / 2)])
cv2.imshow('image2', img[int(img_height / 2):img_height, 0:int(img_width / 2)])
cv2.imshow('image3', img[0:int(img_height / 2), int(img_width / 2):img_width])
cv2.imshow('image4', img[int(img_height / 2):img_height, int(img_width / 2)
:img_width])
cv2.waitKey(0)
cv2.destroyAllWindows()
|
flexible
|
{
"blob_id": "8c6f890631e9696a7907975b5d0bb71d03b380da",
"index": 839,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncv2.imshow('image1', img[0:int(img_height / 2), 0:int(img_width / 2)])\ncv2.imshow('image2', img[int(img_height / 2):img_height, 0:int(img_width / 2)])\ncv2.imshow('image3', img[0:int(img_height / 2), int(img_width / 2):img_width])\ncv2.imshow('image4', img[int(img_height / 2):img_height, int(img_width / 2)\n :img_width])\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nimg = cv2.imread('Scan1.jpg')\nimg_height, img_width, dim = img.shape\ncv2.imshow('image1', img[0:int(img_height / 2), 0:int(img_width / 2)])\ncv2.imshow('image2', img[int(img_height / 2):img_height, 0:int(img_width / 2)])\ncv2.imshow('image3', img[0:int(img_height / 2), int(img_width / 2):img_width])\ncv2.imshow('image4', img[int(img_height / 2):img_height, int(img_width / 2)\n :img_width])\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport numpy as np\nimg = cv2.imread('Scan1.jpg')\nimg_height, img_width, dim = img.shape\ncv2.imshow('image1', img[0:int(img_height / 2), 0:int(img_width / 2)])\ncv2.imshow('image2', img[int(img_height / 2):img_height, 0:int(img_width / 2)])\ncv2.imshow('image3', img[0:int(img_height / 2), int(img_width / 2):img_width])\ncv2.imshow('image4', img[int(img_height / 2):img_height, int(img_width / 2)\n :img_width])\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pygame
class SpriteObject(pygame.sprite.Sprite):
def __init__(self, x, y, w, h, color):
pygame.sprite.Sprite.__init__(self)
self.angle = 0
self.original_image = pygame.Surface([w, h], pygame.SRCALPHA)
self.original_image.fill(color)
self.image = self.original_image
self.rect = self.image.get_rect(center = (x, y))
self.mask = pygame.mask.from_surface(self.image )
def update(self):
self.rotate()
def rotate(self):
self.angle += 0.3
self.image = pygame.transform.rotate(self.original_image, self.angle)
self.rect = self.image.get_rect(center = self.rect.center)
self.mask = pygame.mask.from_surface(self.image )
pygame.init()
clock = pygame.time.Clock()
window = pygame.display.set_mode((400, 400))
size = window.get_size()
moving_object = SpriteObject(0, 0, 50, 50, (128, 0, 255))
static_objects = [
SpriteObject(size[0] // 2, size[1] // 3, 100, 50, (128, 128, 128)),
SpriteObject(size[0] // 4, size[1] * 2 // 3, 100, 50, (128, 128, 128)),
SpriteObject(size[0] * 3 // 4, size[1] * 2 // 3, 100, 50, (128, 128, 128))
]
all_sprites = pygame.sprite.Group([moving_object] + static_objects)
static_sprites = pygame.sprite.Group(static_objects)
run = True
while run:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
moving_object.rect.center = pygame.mouse.get_pos()
all_sprites.update()
collide = pygame.sprite.spritecollide(moving_object, static_sprites, False, pygame.sprite.collide_mask)
window.fill((255, 0, 0) if collide else (255, 255, 255))
all_sprites.draw(window)
pygame.display.update()
pygame.quit()
exit()
|
normal
|
{
"blob_id": "b90c6a3f8fe084bc2acc0b733750124a1387527c",
"index": 1712,
"step-1": "<mask token>\n\n\nclass SpriteObject(pygame.sprite.Sprite):\n <mask token>\n\n def update(self):\n self.rotate()\n\n def rotate(self):\n self.angle += 0.3\n self.image = pygame.transform.rotate(self.original_image, self.angle)\n self.rect = self.image.get_rect(center=self.rect.center)\n self.mask = pygame.mask.from_surface(self.image)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SpriteObject(pygame.sprite.Sprite):\n\n def __init__(self, x, y, w, h, color):\n pygame.sprite.Sprite.__init__(self)\n self.angle = 0\n self.original_image = pygame.Surface([w, h], pygame.SRCALPHA)\n self.original_image.fill(color)\n self.image = self.original_image\n self.rect = self.image.get_rect(center=(x, y))\n self.mask = pygame.mask.from_surface(self.image)\n\n def update(self):\n self.rotate()\n\n def rotate(self):\n self.angle += 0.3\n self.image = pygame.transform.rotate(self.original_image, self.angle)\n self.rect = self.image.get_rect(center=self.rect.center)\n self.mask = pygame.mask.from_surface(self.image)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SpriteObject(pygame.sprite.Sprite):\n\n def __init__(self, x, y, w, h, color):\n pygame.sprite.Sprite.__init__(self)\n self.angle = 0\n self.original_image = pygame.Surface([w, h], pygame.SRCALPHA)\n self.original_image.fill(color)\n self.image = self.original_image\n self.rect = self.image.get_rect(center=(x, y))\n self.mask = pygame.mask.from_surface(self.image)\n\n def update(self):\n self.rotate()\n\n def rotate(self):\n self.angle += 0.3\n self.image = pygame.transform.rotate(self.original_image, self.angle)\n self.rect = self.image.get_rect(center=self.rect.center)\n self.mask = pygame.mask.from_surface(self.image)\n\n\npygame.init()\n<mask token>\nwhile run:\n clock.tick(60)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n moving_object.rect.center = pygame.mouse.get_pos()\n all_sprites.update()\n collide = pygame.sprite.spritecollide(moving_object, static_sprites, \n False, pygame.sprite.collide_mask)\n window.fill((255, 0, 0) if collide else (255, 255, 255))\n all_sprites.draw(window)\n pygame.display.update()\npygame.quit()\nexit()\n",
"step-4": "<mask token>\n\n\nclass SpriteObject(pygame.sprite.Sprite):\n\n def __init__(self, x, y, w, h, color):\n pygame.sprite.Sprite.__init__(self)\n self.angle = 0\n self.original_image = pygame.Surface([w, h], pygame.SRCALPHA)\n self.original_image.fill(color)\n self.image = self.original_image\n self.rect = self.image.get_rect(center=(x, y))\n self.mask = pygame.mask.from_surface(self.image)\n\n def update(self):\n self.rotate()\n\n def rotate(self):\n self.angle += 0.3\n self.image = pygame.transform.rotate(self.original_image, self.angle)\n self.rect = self.image.get_rect(center=self.rect.center)\n self.mask = pygame.mask.from_surface(self.image)\n\n\npygame.init()\nclock = pygame.time.Clock()\nwindow = pygame.display.set_mode((400, 400))\nsize = window.get_size()\nmoving_object = SpriteObject(0, 0, 50, 50, (128, 0, 255))\nstatic_objects = [SpriteObject(size[0] // 2, size[1] // 3, 100, 50, (128, \n 128, 128)), SpriteObject(size[0] // 4, size[1] * 2 // 3, 100, 50, (128,\n 128, 128)), SpriteObject(size[0] * 3 // 4, size[1] * 2 // 3, 100, 50, (\n 128, 128, 128))]\nall_sprites = pygame.sprite.Group([moving_object] + static_objects)\nstatic_sprites = pygame.sprite.Group(static_objects)\nrun = True\nwhile run:\n clock.tick(60)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n moving_object.rect.center = pygame.mouse.get_pos()\n all_sprites.update()\n collide = pygame.sprite.spritecollide(moving_object, static_sprites, \n False, pygame.sprite.collide_mask)\n window.fill((255, 0, 0) if collide else (255, 255, 255))\n all_sprites.draw(window)\n pygame.display.update()\npygame.quit()\nexit()\n",
"step-5": "import pygame\r\n\r\nclass SpriteObject(pygame.sprite.Sprite):\r\n def __init__(self, x, y, w, h, color):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.angle = 0\r\n self.original_image = pygame.Surface([w, h], pygame.SRCALPHA)\r\n self.original_image.fill(color)\r\n self.image = self.original_image\r\n self.rect = self.image.get_rect(center = (x, y))\r\n self.mask = pygame.mask.from_surface(self.image )\r\n def update(self):\r\n self.rotate()\r\n def rotate(self):\r\n self.angle += 0.3\r\n self.image = pygame.transform.rotate(self.original_image, self.angle)\r\n self.rect = self.image.get_rect(center = self.rect.center)\r\n self.mask = pygame.mask.from_surface(self.image )\r\n\r\npygame.init()\r\nclock = pygame.time.Clock()\r\nwindow = pygame.display.set_mode((400, 400))\r\nsize = window.get_size()\r\n\r\nmoving_object = SpriteObject(0, 0, 50, 50, (128, 0, 255))\r\nstatic_objects = [\r\n SpriteObject(size[0] // 2, size[1] // 3, 100, 50, (128, 128, 128)),\r\n SpriteObject(size[0] // 4, size[1] * 2 // 3, 100, 50, (128, 128, 128)),\r\n SpriteObject(size[0] * 3 // 4, size[1] * 2 // 3, 100, 50, (128, 128, 128))\r\n]\r\nall_sprites = pygame.sprite.Group([moving_object] + static_objects)\r\nstatic_sprites = pygame.sprite.Group(static_objects)\r\n\r\nrun = True\r\nwhile run:\r\n clock.tick(60)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n run = False\r\n\r\n moving_object.rect.center = pygame.mouse.get_pos()\r\n all_sprites.update() \r\n collide = pygame.sprite.spritecollide(moving_object, static_sprites, False, pygame.sprite.collide_mask)\r\n \r\n window.fill((255, 0, 0) if collide else (255, 255, 255))\r\n all_sprites.draw(window)\r\n pygame.display.update()\r\n\r\npygame.quit()\r\nexit()",
"step-ids": [
3,
4,
5,
6,
8
]
}
|
[
3,
4,
5,
6,
8
] |
<|reserved_special_token_0|>
class GroupParticipation(models.Model):
account = models.ForeignKey(Account, related_name='groups')
parts = models.FloatField(default=1.0)
group = models.ForeignKey(Group, related_name='participants')
def __str__(self):
return out.substitute(account=self.account, parts=self.parts)
class Meta:
unique_together = 'account', 'parts', 'group'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Group(models.Model):
<|reserved_special_token_0|>
class GroupParticipation(models.Model):
account = models.ForeignKey(Account, related_name='groups')
parts = models.FloatField(default=1.0)
group = models.ForeignKey(Group, related_name='participants')
def __str__(self):
return out.substitute(account=self.account, parts=self.parts)
class Meta:
unique_together = 'account', 'parts', 'group'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
out = Template('$account: $parts')
class Group(models.Model):
name = models.CharField(max_length=100)
class GroupParticipation(models.Model):
account = models.ForeignKey(Account, related_name='groups')
parts = models.FloatField(default=1.0)
group = models.ForeignKey(Group, related_name='participants')
def __str__(self):
return out.substitute(account=self.account, parts=self.parts)
class Meta:
unique_together = 'account', 'parts', 'group'
<|reserved_special_token_1|>
from django.db import models
from backend.models.account import Account
from string import Template
out = Template('$account: $parts')
class Group(models.Model):
name = models.CharField(max_length=100)
class GroupParticipation(models.Model):
account = models.ForeignKey(Account, related_name='groups')
parts = models.FloatField(default=1.0)
group = models.ForeignKey(Group, related_name='participants')
def __str__(self):
return out.substitute(account=self.account, parts=self.parts)
class Meta:
unique_together = 'account', 'parts', 'group'
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from django.db import models
from backend.models.account import Account
from string import Template
out = Template("$account: $parts")
class Group(models.Model):
name = models.CharField(max_length=100)
class GroupParticipation(models.Model):
account = models.ForeignKey(Account, related_name='groups')
parts = models.FloatField(default=1.0)
group = models.ForeignKey(Group, related_name='participants')
def __str__(self):
return out.substitute(account=self.account, parts=self.parts)
class Meta:
unique_together = ('account', 'parts', 'group')
|
flexible
|
{
"blob_id": "11337f6f9cf22ba6fbed68dfcb7a07fb6368e94e",
"index": 6350,
"step-1": "<mask token>\n\n\nclass GroupParticipation(models.Model):\n account = models.ForeignKey(Account, related_name='groups')\n parts = models.FloatField(default=1.0)\n group = models.ForeignKey(Group, related_name='participants')\n\n def __str__(self):\n return out.substitute(account=self.account, parts=self.parts)\n\n\n class Meta:\n unique_together = 'account', 'parts', 'group'\n",
"step-2": "<mask token>\n\n\nclass Group(models.Model):\n <mask token>\n\n\nclass GroupParticipation(models.Model):\n account = models.ForeignKey(Account, related_name='groups')\n parts = models.FloatField(default=1.0)\n group = models.ForeignKey(Group, related_name='participants')\n\n def __str__(self):\n return out.substitute(account=self.account, parts=self.parts)\n\n\n class Meta:\n unique_together = 'account', 'parts', 'group'\n",
"step-3": "<mask token>\nout = Template('$account: $parts')\n\n\nclass Group(models.Model):\n name = models.CharField(max_length=100)\n\n\nclass GroupParticipation(models.Model):\n account = models.ForeignKey(Account, related_name='groups')\n parts = models.FloatField(default=1.0)\n group = models.ForeignKey(Group, related_name='participants')\n\n def __str__(self):\n return out.substitute(account=self.account, parts=self.parts)\n\n\n class Meta:\n unique_together = 'account', 'parts', 'group'\n",
"step-4": "from django.db import models\nfrom backend.models.account import Account\nfrom string import Template\nout = Template('$account: $parts')\n\n\nclass Group(models.Model):\n name = models.CharField(max_length=100)\n\n\nclass GroupParticipation(models.Model):\n account = models.ForeignKey(Account, related_name='groups')\n parts = models.FloatField(default=1.0)\n group = models.ForeignKey(Group, related_name='participants')\n\n def __str__(self):\n return out.substitute(account=self.account, parts=self.parts)\n\n\n class Meta:\n unique_together = 'account', 'parts', 'group'\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom django.db import models\n\nfrom backend.models.account import Account\nfrom string import Template\n\n\nout = Template(\"$account: $parts\")\n\n\nclass Group(models.Model):\n name = models.CharField(max_length=100)\n\n\nclass GroupParticipation(models.Model):\n account = models.ForeignKey(Account, related_name='groups')\n parts = models.FloatField(default=1.0)\n group = models.ForeignKey(Group, related_name='participants')\n\n def __str__(self):\n return out.substitute(account=self.account, parts=self.parts)\n\n class Meta:\n unique_together = ('account', 'parts', 'group')\n\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
# Copyright (c) 2020, Galois, Inc.
#
# All Rights Reserved
#
# This material is based upon work supported by the Defense Advanced Research
# Projects Agency (DARPA) under Contract No. FA8750-20-C-0203.
#
# Any opinions, findings and conclusions or recommendations expressed in this
# material are those of the author(s) and do not necessarily reflect the views
# of the Defense Advanced Research Projects Agency (DARPA).
from dataclasses import dataclass
import semtk
from migration_helpers.name_space import NameSpace, get_uri
from ontology_changes.ontology_change import stylize_property, OntologyChange
@dataclass
class RemoveIsATypeOf(OntologyChange):
"""
Represents an ontology change where:
property_id is a type of from_property_id.
has been removed.
"""
name_space: NameSpace
class_id: str
property_id: str
range_id: str
def text_description(self) -> str:
prop = stylize_property(self.property_id)
range_str = stylize_property(self.range_id)
return f"Property {prop} used to be a type of {range_str}, no longer."
def migrate_json(self, json: semtk.SemTKJSON) -> None:
json.accept(MigrationVisitor(self))
class MigrationVisitor(semtk.DefaultSemTKVisitor):
def __init__(self, data: RemoveIsATypeOf):
self.data = data
# TODO?
|
normal
|
{
"blob_id": "41294c803cf42611fa003f21b74a49dd5576a8e8",
"index": 5973,
"step-1": "<mask token>\n\n\nclass MigrationVisitor(semtk.DefaultSemTKVisitor):\n\n def __init__(self, data: RemoveIsATypeOf):\n self.data = data\n",
"step-2": "<mask token>\n\n\n@dataclass\nclass RemoveIsATypeOf(OntologyChange):\n <mask token>\n name_space: NameSpace\n class_id: str\n property_id: str\n range_id: str\n\n def text_description(self) ->str:\n prop = stylize_property(self.property_id)\n range_str = stylize_property(self.range_id)\n return f'Property {prop} used to be a type of {range_str}, no longer.'\n\n def migrate_json(self, json: semtk.SemTKJSON) ->None:\n json.accept(MigrationVisitor(self))\n\n\nclass MigrationVisitor(semtk.DefaultSemTKVisitor):\n\n def __init__(self, data: RemoveIsATypeOf):\n self.data = data\n",
"step-3": "<mask token>\n\n\n@dataclass\nclass RemoveIsATypeOf(OntologyChange):\n \"\"\"\n Represents an ontology change where:\n\n property_id is a type of from_property_id.\n\n has been removed.\n \"\"\"\n name_space: NameSpace\n class_id: str\n property_id: str\n range_id: str\n\n def text_description(self) ->str:\n prop = stylize_property(self.property_id)\n range_str = stylize_property(self.range_id)\n return f'Property {prop} used to be a type of {range_str}, no longer.'\n\n def migrate_json(self, json: semtk.SemTKJSON) ->None:\n json.accept(MigrationVisitor(self))\n\n\nclass MigrationVisitor(semtk.DefaultSemTKVisitor):\n\n def __init__(self, data: RemoveIsATypeOf):\n self.data = data\n",
"step-4": "from dataclasses import dataclass\nimport semtk\nfrom migration_helpers.name_space import NameSpace, get_uri\nfrom ontology_changes.ontology_change import stylize_property, OntologyChange\n\n\n@dataclass\nclass RemoveIsATypeOf(OntologyChange):\n \"\"\"\n Represents an ontology change where:\n\n property_id is a type of from_property_id.\n\n has been removed.\n \"\"\"\n name_space: NameSpace\n class_id: str\n property_id: str\n range_id: str\n\n def text_description(self) ->str:\n prop = stylize_property(self.property_id)\n range_str = stylize_property(self.range_id)\n return f'Property {prop} used to be a type of {range_str}, no longer.'\n\n def migrate_json(self, json: semtk.SemTKJSON) ->None:\n json.accept(MigrationVisitor(self))\n\n\nclass MigrationVisitor(semtk.DefaultSemTKVisitor):\n\n def __init__(self, data: RemoveIsATypeOf):\n self.data = data\n",
"step-5": "# Copyright (c) 2020, Galois, Inc.\n#\n# All Rights Reserved\n#\n# This material is based upon work supported by the Defense Advanced Research\n# Projects Agency (DARPA) under Contract No. FA8750-20-C-0203.\n#\n# Any opinions, findings and conclusions or recommendations expressed in this\n# material are those of the author(s) and do not necessarily reflect the views\n# of the Defense Advanced Research Projects Agency (DARPA).\n\nfrom dataclasses import dataclass\n\nimport semtk\n\nfrom migration_helpers.name_space import NameSpace, get_uri\nfrom ontology_changes.ontology_change import stylize_property, OntologyChange\n\n\n@dataclass\nclass RemoveIsATypeOf(OntologyChange):\n \"\"\"\n Represents an ontology change where:\n\n property_id is a type of from_property_id.\n\n has been removed.\n \"\"\"\n\n name_space: NameSpace\n class_id: str\n property_id: str\n range_id: str\n\n def text_description(self) -> str:\n prop = stylize_property(self.property_id)\n range_str = stylize_property(self.range_id)\n return f\"Property {prop} used to be a type of {range_str}, no longer.\"\n\n def migrate_json(self, json: semtk.SemTKJSON) -> None:\n json.accept(MigrationVisitor(self))\n\n\nclass MigrationVisitor(semtk.DefaultSemTKVisitor):\n def __init__(self, data: RemoveIsATypeOf):\n self.data = data\n\n # TODO?\n",
"step-ids": [
2,
5,
6,
7,
8
]
}
|
[
2,
5,
6,
7,
8
] |
import pytesseract
from PIL import Image
img = Image.open("flag.png")
text = pytesseract.image_to_string(img)
def rot(*symbols):
def _rot(n):
encoded = ''.join(sy[n:] + sy[:n] for sy in symbols)
lookup = str.maketrans(''.join(symbols), encoded)
return lambda s: s.translate(lookup)
return _rot
def rot_alpha(n):
from string import ascii_lowercase as lc, ascii_uppercase as uc
lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])
return lambda s: s.translate(lookup)
def rot_encode(n):
from string import ascii_lowercase as lc, ascii_uppercase as uc
lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])
return lambda s: s.translate(lookup)
print(rot_encode(7)(text))
if __name__ == '__main__':
pass
|
normal
|
{
"blob_id": "b7a60322b4a0fcb6de16cd12be33db265a2b8746",
"index": 2735,
"step-1": "<mask token>\n\n\ndef rot(*symbols):\n\n def _rot(n):\n encoded = ''.join(sy[n:] + sy[:n] for sy in symbols)\n lookup = str.maketrans(''.join(symbols), encoded)\n return lambda s: s.translate(lookup)\n return _rot\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef rot(*symbols):\n\n def _rot(n):\n encoded = ''.join(sy[n:] + sy[:n] for sy in symbols)\n lookup = str.maketrans(''.join(symbols), encoded)\n return lambda s: s.translate(lookup)\n return _rot\n\n\ndef rot_alpha(n):\n from string import ascii_lowercase as lc, ascii_uppercase as uc\n lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])\n return lambda s: s.translate(lookup)\n\n\ndef rot_encode(n):\n from string import ascii_lowercase as lc, ascii_uppercase as uc\n lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])\n return lambda s: s.translate(lookup)\n\n\nprint(rot_encode(7)(text))\nif __name__ == '__main__':\n pass\n",
"step-3": "<mask token>\nimg = Image.open('flag.png')\ntext = pytesseract.image_to_string(img)\n\n\ndef rot(*symbols):\n\n def _rot(n):\n encoded = ''.join(sy[n:] + sy[:n] for sy in symbols)\n lookup = str.maketrans(''.join(symbols), encoded)\n return lambda s: s.translate(lookup)\n return _rot\n\n\ndef rot_alpha(n):\n from string import ascii_lowercase as lc, ascii_uppercase as uc\n lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])\n return lambda s: s.translate(lookup)\n\n\ndef rot_encode(n):\n from string import ascii_lowercase as lc, ascii_uppercase as uc\n lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])\n return lambda s: s.translate(lookup)\n\n\nprint(rot_encode(7)(text))\nif __name__ == '__main__':\n pass\n",
"step-4": "import pytesseract\nfrom PIL import Image\nimg = Image.open('flag.png')\ntext = pytesseract.image_to_string(img)\n\n\ndef rot(*symbols):\n\n def _rot(n):\n encoded = ''.join(sy[n:] + sy[:n] for sy in symbols)\n lookup = str.maketrans(''.join(symbols), encoded)\n return lambda s: s.translate(lookup)\n return _rot\n\n\ndef rot_alpha(n):\n from string import ascii_lowercase as lc, ascii_uppercase as uc\n lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])\n return lambda s: s.translate(lookup)\n\n\ndef rot_encode(n):\n from string import ascii_lowercase as lc, ascii_uppercase as uc\n lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])\n return lambda s: s.translate(lookup)\n\n\nprint(rot_encode(7)(text))\nif __name__ == '__main__':\n pass\n",
"step-5": "import pytesseract\nfrom PIL import Image\n\nimg = Image.open(\"flag.png\")\ntext = pytesseract.image_to_string(img)\n\n\ndef rot(*symbols):\n def _rot(n):\n encoded = ''.join(sy[n:] + sy[:n] for sy in symbols)\n lookup = str.maketrans(''.join(symbols), encoded)\n return lambda s: s.translate(lookup)\n\n return _rot\n\n\ndef rot_alpha(n):\n from string import ascii_lowercase as lc, ascii_uppercase as uc\n lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])\n return lambda s: s.translate(lookup)\n\n\ndef rot_encode(n):\n from string import ascii_lowercase as lc, ascii_uppercase as uc\n lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])\n return lambda s: s.translate(lookup)\n\n\nprint(rot_encode(7)(text))\n\nif __name__ == '__main__':\n pass\n",
"step-ids": [
1,
4,
5,
6,
7
]
}
|
[
1,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class Process:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def send_neighbours(self, data, exceptions=[]):
for i in [x for x in self.neighbours if x not in exceptions]:
self.send(i, data)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def threaded(fn):
def wrapper(*args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs, daemon=True)
thread.start()
return thread
return wrapper
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--id', type=int, required=True)
parser.add_argument('--neighbours', nargs='*', type=int, default=[])
parser.add_argument('--election', action='store_true')
parser.add_argument('--capacity', type=int, required=True)
parser.add_argument('--n', type=int, required=True)
args = parser.parse_args()
Process(args.n, args.id, args.neighbours, args.election, ADDRESS, PORT,
args.capacity)
pass
class Process:
def __init__(self, n: int, id: int, neighbours: int, election: bool,
address: str, port: int, capacity: int):
self.n = n
self.id = id
self.address = address
self.port = port
self.neighbours = neighbours
self.parent = None
self.parent_lock = Lock()
self.election_id = None
self.election_id_lock = Lock()
self.ack_counter = 0
self.capacity = capacity
self.max_capacity = self.id, self.capacity
self.listen = self.listen()
sleep(1)
if election:
print('Iniciando eleição')
with self.election_id_lock:
with self.parent_lock:
self.parent = None
self.election_id = self.id
self.ack_counter = 0
print(f'Enviando pedido de eleição para {self.neighbours}')
self.send_neighbours({'message': 'election', 'election_id':
self.election_id})
while True:
sleep(1)
@threaded
def listen(self):
with Listener((self.address, self.port + self.id), backlog=self.n *
self.n) as listener:
while True:
with listener.accept() as conn:
data = conn.recv()
if data['message'] == 'election':
if (self.parent is None and self.election_id is
None or self.election_id < data['election_id']):
if self.election_id is not None:
print('Eleição de maior prioridade recebida')
with self.parent_lock:
self.parent = data['sender']
with self.election_id_lock:
self.election_id = data['election_id']
self.ack_counter = 0
self.send_neighbours({'message': 'election',
'election_id': data['election_id']},
exceptions=[self.parent])
print(
f'Repassando pedido de eleição de {self.election_id} enviada por {self.parent}'
)
else:
print(
f"Confirmando pedido de eleição de {data['election_id']} enviada por {data['sender']}"
)
self.send(data['sender'], {'message': 'ack',
'capacity': self.max_capacity})
elif data['message'] == 'ack':
print(f"Guardando confirmação de {data['sender']}")
self.ack_counter += 1
if self.max_capacity[1] < data['capacity'][1]:
self.max_capacity = data['capacity']
if self.parent is None:
if self.ack_counter == len(self.neighbours):
self.ack_counter = 0
print(
f'Fim da eleição, vencedor: {self.max_capacity}'
)
self.send_all({'message': 'winner',
'leader': self.max_capacity})
elif self.ack_counter == len(self.neighbours) - 1:
self.ack_counter = 0
print(
f'Confirmando pedido de eleição para o nó pai ({self.parent})'
)
self.send(self.parent, {'message': 'ack',
'capacity': self.max_capacity})
elif data['message'] == 'winner':
if self.max_capacity[1] < data['leader'][1]:
self.max_capacity = data['leader']
print(f'Vencedor: {self.max_capacity}')
conn.close()
@threaded
def send(self, target, data):
try:
with Client((self.address, self.port + target)) as client:
data['sender'] = self.id
client.send(data)
client.close()
except ConnectionRefusedError as e:
print(str(e))
print('Connection refused')
def send_neighbours(self, data, exceptions=[]):
for i in [x for x in self.neighbours if x not in exceptions]:
self.send(i, data)
def send_all(self, data, exceptions=[]):
for i in [x for x in range(self.n) if x not in exceptions and x !=
self.id]:
self.send(i, data)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def threaded(fn):
def wrapper(*args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs, daemon=True)
thread.start()
return thread
return wrapper
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--id', type=int, required=True)
parser.add_argument('--neighbours', nargs='*', type=int, default=[])
parser.add_argument('--election', action='store_true')
parser.add_argument('--capacity', type=int, required=True)
parser.add_argument('--n', type=int, required=True)
args = parser.parse_args()
Process(args.n, args.id, args.neighbours, args.election, ADDRESS, PORT,
args.capacity)
pass
class Process:
def __init__(self, n: int, id: int, neighbours: int, election: bool,
address: str, port: int, capacity: int):
self.n = n
self.id = id
self.address = address
self.port = port
self.neighbours = neighbours
self.parent = None
self.parent_lock = Lock()
self.election_id = None
self.election_id_lock = Lock()
self.ack_counter = 0
self.capacity = capacity
self.max_capacity = self.id, self.capacity
self.listen = self.listen()
sleep(1)
if election:
print('Iniciando eleição')
with self.election_id_lock:
with self.parent_lock:
self.parent = None
self.election_id = self.id
self.ack_counter = 0
print(f'Enviando pedido de eleição para {self.neighbours}')
self.send_neighbours({'message': 'election', 'election_id':
self.election_id})
while True:
sleep(1)
@threaded
def listen(self):
with Listener((self.address, self.port + self.id), backlog=self.n *
self.n) as listener:
while True:
with listener.accept() as conn:
data = conn.recv()
if data['message'] == 'election':
if (self.parent is None and self.election_id is
None or self.election_id < data['election_id']):
if self.election_id is not None:
print('Eleição de maior prioridade recebida')
with self.parent_lock:
self.parent = data['sender']
with self.election_id_lock:
self.election_id = data['election_id']
self.ack_counter = 0
self.send_neighbours({'message': 'election',
'election_id': data['election_id']},
exceptions=[self.parent])
print(
f'Repassando pedido de eleição de {self.election_id} enviada por {self.parent}'
)
else:
print(
f"Confirmando pedido de eleição de {data['election_id']} enviada por {data['sender']}"
)
self.send(data['sender'], {'message': 'ack',
'capacity': self.max_capacity})
elif data['message'] == 'ack':
print(f"Guardando confirmação de {data['sender']}")
self.ack_counter += 1
if self.max_capacity[1] < data['capacity'][1]:
self.max_capacity = data['capacity']
if self.parent is None:
if self.ack_counter == len(self.neighbours):
self.ack_counter = 0
print(
f'Fim da eleição, vencedor: {self.max_capacity}'
)
self.send_all({'message': 'winner',
'leader': self.max_capacity})
elif self.ack_counter == len(self.neighbours) - 1:
self.ack_counter = 0
print(
f'Confirmando pedido de eleição para o nó pai ({self.parent})'
)
self.send(self.parent, {'message': 'ack',
'capacity': self.max_capacity})
elif data['message'] == 'winner':
if self.max_capacity[1] < data['leader'][1]:
self.max_capacity = data['leader']
print(f'Vencedor: {self.max_capacity}')
conn.close()
@threaded
def send(self, target, data):
try:
with Client((self.address, self.port + target)) as client:
data['sender'] = self.id
client.send(data)
client.close()
except ConnectionRefusedError as e:
print(str(e))
print('Connection refused')
def send_neighbours(self, data, exceptions=[]):
for i in [x for x in self.neighbours if x not in exceptions]:
self.send(i, data)
def send_all(self, data, exceptions=[]):
for i in [x for x in range(self.n) if x not in exceptions and x !=
self.id]:
self.send(i, data)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
exit(0)
pass
<|reserved_special_token_1|>
import argparse
from time import sleep
from threading import Thread
from threading import Lock
from multiprocessing.connection import Listener
from multiprocessing.connection import Client
ADDRESS = '127.0.0.1'
PORT = 5000
def threaded(fn):
def wrapper(*args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs, daemon=True)
thread.start()
return thread
return wrapper
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--id', type=int, required=True)
parser.add_argument('--neighbours', nargs='*', type=int, default=[])
parser.add_argument('--election', action='store_true')
parser.add_argument('--capacity', type=int, required=True)
parser.add_argument('--n', type=int, required=True)
args = parser.parse_args()
Process(args.n, args.id, args.neighbours, args.election, ADDRESS, PORT,
args.capacity)
pass
class Process:
def __init__(self, n: int, id: int, neighbours: int, election: bool,
address: str, port: int, capacity: int):
self.n = n
self.id = id
self.address = address
self.port = port
self.neighbours = neighbours
self.parent = None
self.parent_lock = Lock()
self.election_id = None
self.election_id_lock = Lock()
self.ack_counter = 0
self.capacity = capacity
self.max_capacity = self.id, self.capacity
self.listen = self.listen()
sleep(1)
if election:
print('Iniciando eleição')
with self.election_id_lock:
with self.parent_lock:
self.parent = None
self.election_id = self.id
self.ack_counter = 0
print(f'Enviando pedido de eleição para {self.neighbours}')
self.send_neighbours({'message': 'election', 'election_id':
self.election_id})
while True:
sleep(1)
@threaded
def listen(self):
with Listener((self.address, self.port + self.id), backlog=self.n *
self.n) as listener:
while True:
with listener.accept() as conn:
data = conn.recv()
if data['message'] == 'election':
if (self.parent is None and self.election_id is
None or self.election_id < data['election_id']):
if self.election_id is not None:
print('Eleição de maior prioridade recebida')
with self.parent_lock:
self.parent = data['sender']
with self.election_id_lock:
self.election_id = data['election_id']
self.ack_counter = 0
self.send_neighbours({'message': 'election',
'election_id': data['election_id']},
exceptions=[self.parent])
print(
f'Repassando pedido de eleição de {self.election_id} enviada por {self.parent}'
)
else:
print(
f"Confirmando pedido de eleição de {data['election_id']} enviada por {data['sender']}"
)
self.send(data['sender'], {'message': 'ack',
'capacity': self.max_capacity})
elif data['message'] == 'ack':
print(f"Guardando confirmação de {data['sender']}")
self.ack_counter += 1
if self.max_capacity[1] < data['capacity'][1]:
self.max_capacity = data['capacity']
if self.parent is None:
if self.ack_counter == len(self.neighbours):
self.ack_counter = 0
print(
f'Fim da eleição, vencedor: {self.max_capacity}'
)
self.send_all({'message': 'winner',
'leader': self.max_capacity})
elif self.ack_counter == len(self.neighbours) - 1:
self.ack_counter = 0
print(
f'Confirmando pedido de eleição para o nó pai ({self.parent})'
)
self.send(self.parent, {'message': 'ack',
'capacity': self.max_capacity})
elif data['message'] == 'winner':
if self.max_capacity[1] < data['leader'][1]:
self.max_capacity = data['leader']
print(f'Vencedor: {self.max_capacity}')
conn.close()
@threaded
def send(self, target, data):
try:
with Client((self.address, self.port + target)) as client:
data['sender'] = self.id
client.send(data)
client.close()
except ConnectionRefusedError as e:
print(str(e))
print('Connection refused')
def send_neighbours(self, data, exceptions=[]):
for i in [x for x in self.neighbours if x not in exceptions]:
self.send(i, data)
def send_all(self, data, exceptions=[]):
for i in [x for x in range(self.n) if x not in exceptions and x !=
self.id]:
self.send(i, data)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
exit(0)
pass
<|reserved_special_token_1|>
import argparse
from time import sleep
from threading import Thread
from threading import Lock
from multiprocessing.connection import Listener
from multiprocessing.connection import Client
ADDRESS = '127.0.0.1'
PORT = 5000
# Threaded function snippet
def threaded(fn):
def wrapper(*args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs, daemon=True)
thread.start()
return thread
return wrapper
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--id', type=int, required=True)
parser.add_argument("--neighbours", nargs="*", type=int, default=[])
parser.add_argument("--election", action='store_true')
parser.add_argument('--capacity', type=int, required=True)
parser.add_argument('--n', type=int, required=True)
args = parser.parse_args()
Process(args.n, args.id, args.neighbours, args.election, ADDRESS, PORT, args.capacity)
pass
class Process:
def __init__(self, n : int, id : int, neighbours : int, election : bool, address : str, port : int, capacity : int):
self.n = n
self.id = id
self.address = address
self.port = port
self.neighbours = neighbours
self.parent = None
self.parent_lock = Lock()
self.election_id = None
self.election_id_lock = Lock()
self.ack_counter = 0
self.capacity = capacity
self.max_capacity = (self.id, self.capacity)
self.listen = self.listen()
sleep(1)
if election:
print('Iniciando eleição')
with self.election_id_lock:
with self.parent_lock:
self.parent = None
self.election_id = self.id
self.ack_counter = 0
print(f'Enviando pedido de eleição para {self.neighbours}')
self.send_neighbours({
'message': 'election',
'election_id': self.election_id
})
while(True):
sleep(1)
@threaded
def listen(self):
with Listener((self.address, self.port+self.id), backlog=self.n*self.n) as listener:
while True:
with listener.accept() as conn:
data = conn.recv()
if data['message'] == 'election':
if (self.parent is None and self.election_id is None) or self.election_id < data['election_id']:
if self.election_id is not None:
print('Eleição de maior prioridade recebida')
with self.parent_lock:
self.parent = data['sender']
with self.election_id_lock:
self.election_id = data['election_id']
self.ack_counter = 0
self.send_neighbours({
'message': 'election',
'election_id': data['election_id']
}, exceptions=[self.parent])
print(f"Repassando pedido de eleição de {self.election_id} enviada por {self.parent}")
else:
print(f"Confirmando pedido de eleição de {data['election_id']} enviada por {data['sender']}")
self.send(data['sender'], {
'message': 'ack',
'capacity': self.max_capacity
})
elif data['message'] == 'ack':
print(f"Guardando confirmação de {data['sender']}")
self.ack_counter+= 1
if (self.max_capacity[1] < data['capacity'][1]):
self.max_capacity = data['capacity']
if self.parent is None:
if self.ack_counter == len(self.neighbours):
self.ack_counter = 0
print(f'Fim da eleição, vencedor: {self.max_capacity}')
self.send_all({
'message': 'winner',
'leader': self.max_capacity
})
else:
if self.ack_counter == len(self.neighbours) - 1:
self.ack_counter = 0
print(f"Confirmando pedido de eleição para o nó pai ({self.parent})")
self.send(self.parent, {
'message': 'ack',
'capacity': self.max_capacity,
})
elif data['message'] == 'winner':
if (self.max_capacity[1] < data['leader'][1]):
self.max_capacity = data['leader']
print(f"Vencedor: {self.max_capacity}")
conn.close()
@threaded
def send(self, target, data):
try:
with Client((self.address, self.port+target)) as client:
data['sender'] = self.id
client.send(data)
client.close()
except ConnectionRefusedError as e:
print(str(e))
print("Connection refused")
def send_neighbours(self, data, exceptions = []):
for i in [x for x in self.neighbours if x not in exceptions]:
self.send(i, data)
def send_all(self, data, exceptions = []):
for i in [x for x in range(self.n) if x not in exceptions and x != self.id]:
self.send(i, data)
if __name__ == '__main__':
try:
main()
except (KeyboardInterrupt):
exit(0)
pass
|
flexible
|
{
"blob_id": "c5a2c00d53111d62df413907d4ff4ca5a02d4035",
"index": 7005,
"step-1": "<mask token>\n\n\nclass Process:\n <mask token>\n <mask token>\n <mask token>\n\n def send_neighbours(self, data, exceptions=[]):\n for i in [x for x in self.neighbours if x not in exceptions]:\n self.send(i, data)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef threaded(fn):\n\n def wrapper(*args, **kwargs):\n thread = Thread(target=fn, args=args, kwargs=kwargs, daemon=True)\n thread.start()\n return thread\n return wrapper\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--id', type=int, required=True)\n parser.add_argument('--neighbours', nargs='*', type=int, default=[])\n parser.add_argument('--election', action='store_true')\n parser.add_argument('--capacity', type=int, required=True)\n parser.add_argument('--n', type=int, required=True)\n args = parser.parse_args()\n Process(args.n, args.id, args.neighbours, args.election, ADDRESS, PORT,\n args.capacity)\n pass\n\n\nclass Process:\n\n def __init__(self, n: int, id: int, neighbours: int, election: bool,\n address: str, port: int, capacity: int):\n self.n = n\n self.id = id\n self.address = address\n self.port = port\n self.neighbours = neighbours\n self.parent = None\n self.parent_lock = Lock()\n self.election_id = None\n self.election_id_lock = Lock()\n self.ack_counter = 0\n self.capacity = capacity\n self.max_capacity = self.id, self.capacity\n self.listen = self.listen()\n sleep(1)\n if election:\n print('Iniciando eleição')\n with self.election_id_lock:\n with self.parent_lock:\n self.parent = None\n self.election_id = self.id\n self.ack_counter = 0\n print(f'Enviando pedido de eleição para {self.neighbours}')\n self.send_neighbours({'message': 'election', 'election_id':\n self.election_id})\n while True:\n sleep(1)\n\n @threaded\n def listen(self):\n with Listener((self.address, self.port + self.id), backlog=self.n *\n self.n) as listener:\n while True:\n with listener.accept() as conn:\n data = conn.recv()\n if data['message'] == 'election':\n if (self.parent is None and self.election_id is\n None or self.election_id < data['election_id']):\n if self.election_id is not None:\n print('Eleição de maior prioridade recebida')\n with self.parent_lock:\n self.parent = data['sender']\n with self.election_id_lock:\n self.election_id = data['election_id']\n self.ack_counter = 0\n self.send_neighbours({'message': 'election',\n 'election_id': data['election_id']},\n exceptions=[self.parent])\n print(\n f'Repassando pedido de eleição de {self.election_id} enviada por {self.parent}'\n )\n else:\n print(\n f\"Confirmando pedido de eleição de {data['election_id']} enviada por {data['sender']}\"\n )\n self.send(data['sender'], {'message': 'ack',\n 'capacity': self.max_capacity})\n elif data['message'] == 'ack':\n print(f\"Guardando confirmação de {data['sender']}\")\n self.ack_counter += 1\n if self.max_capacity[1] < data['capacity'][1]:\n self.max_capacity = data['capacity']\n if self.parent is None:\n if self.ack_counter == len(self.neighbours):\n self.ack_counter = 0\n print(\n f'Fim da eleição, vencedor: {self.max_capacity}'\n )\n self.send_all({'message': 'winner',\n 'leader': self.max_capacity})\n elif self.ack_counter == len(self.neighbours) - 1:\n self.ack_counter = 0\n print(\n f'Confirmando pedido de eleição para o nó pai ({self.parent})'\n )\n self.send(self.parent, {'message': 'ack',\n 'capacity': self.max_capacity})\n elif data['message'] == 'winner':\n if self.max_capacity[1] < data['leader'][1]:\n self.max_capacity = data['leader']\n print(f'Vencedor: {self.max_capacity}')\n conn.close()\n\n @threaded\n def send(self, target, data):\n try:\n with Client((self.address, self.port + target)) as client:\n data['sender'] = self.id\n client.send(data)\n client.close()\n except ConnectionRefusedError as e:\n print(str(e))\n print('Connection refused')\n\n def send_neighbours(self, data, exceptions=[]):\n for i in [x for x in self.neighbours if x not in exceptions]:\n self.send(i, data)\n\n def send_all(self, data, exceptions=[]):\n for i in [x for x in range(self.n) if x not in exceptions and x !=\n self.id]:\n self.send(i, data)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef threaded(fn):\n\n def wrapper(*args, **kwargs):\n thread = Thread(target=fn, args=args, kwargs=kwargs, daemon=True)\n thread.start()\n return thread\n return wrapper\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--id', type=int, required=True)\n parser.add_argument('--neighbours', nargs='*', type=int, default=[])\n parser.add_argument('--election', action='store_true')\n parser.add_argument('--capacity', type=int, required=True)\n parser.add_argument('--n', type=int, required=True)\n args = parser.parse_args()\n Process(args.n, args.id, args.neighbours, args.election, ADDRESS, PORT,\n args.capacity)\n pass\n\n\nclass Process:\n\n def __init__(self, n: int, id: int, neighbours: int, election: bool,\n address: str, port: int, capacity: int):\n self.n = n\n self.id = id\n self.address = address\n self.port = port\n self.neighbours = neighbours\n self.parent = None\n self.parent_lock = Lock()\n self.election_id = None\n self.election_id_lock = Lock()\n self.ack_counter = 0\n self.capacity = capacity\n self.max_capacity = self.id, self.capacity\n self.listen = self.listen()\n sleep(1)\n if election:\n print('Iniciando eleição')\n with self.election_id_lock:\n with self.parent_lock:\n self.parent = None\n self.election_id = self.id\n self.ack_counter = 0\n print(f'Enviando pedido de eleição para {self.neighbours}')\n self.send_neighbours({'message': 'election', 'election_id':\n self.election_id})\n while True:\n sleep(1)\n\n @threaded\n def listen(self):\n with Listener((self.address, self.port + self.id), backlog=self.n *\n self.n) as listener:\n while True:\n with listener.accept() as conn:\n data = conn.recv()\n if data['message'] == 'election':\n if (self.parent is None and self.election_id is\n None or self.election_id < data['election_id']):\n if self.election_id is not None:\n print('Eleição de maior prioridade recebida')\n with self.parent_lock:\n self.parent = data['sender']\n with self.election_id_lock:\n self.election_id = data['election_id']\n self.ack_counter = 0\n self.send_neighbours({'message': 'election',\n 'election_id': data['election_id']},\n exceptions=[self.parent])\n print(\n f'Repassando pedido de eleição de {self.election_id} enviada por {self.parent}'\n )\n else:\n print(\n f\"Confirmando pedido de eleição de {data['election_id']} enviada por {data['sender']}\"\n )\n self.send(data['sender'], {'message': 'ack',\n 'capacity': self.max_capacity})\n elif data['message'] == 'ack':\n print(f\"Guardando confirmação de {data['sender']}\")\n self.ack_counter += 1\n if self.max_capacity[1] < data['capacity'][1]:\n self.max_capacity = data['capacity']\n if self.parent is None:\n if self.ack_counter == len(self.neighbours):\n self.ack_counter = 0\n print(\n f'Fim da eleição, vencedor: {self.max_capacity}'\n )\n self.send_all({'message': 'winner',\n 'leader': self.max_capacity})\n elif self.ack_counter == len(self.neighbours) - 1:\n self.ack_counter = 0\n print(\n f'Confirmando pedido de eleição para o nó pai ({self.parent})'\n )\n self.send(self.parent, {'message': 'ack',\n 'capacity': self.max_capacity})\n elif data['message'] == 'winner':\n if self.max_capacity[1] < data['leader'][1]:\n self.max_capacity = data['leader']\n print(f'Vencedor: {self.max_capacity}')\n conn.close()\n\n @threaded\n def send(self, target, data):\n try:\n with Client((self.address, self.port + target)) as client:\n data['sender'] = self.id\n client.send(data)\n client.close()\n except ConnectionRefusedError as e:\n print(str(e))\n print('Connection refused')\n\n def send_neighbours(self, data, exceptions=[]):\n for i in [x for x in self.neighbours if x not in exceptions]:\n self.send(i, data)\n\n def send_all(self, data, exceptions=[]):\n for i in [x for x in range(self.n) if x not in exceptions and x !=\n self.id]:\n self.send(i, data)\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n exit(0)\n pass\n",
"step-4": "import argparse\nfrom time import sleep\nfrom threading import Thread\nfrom threading import Lock\nfrom multiprocessing.connection import Listener\nfrom multiprocessing.connection import Client\nADDRESS = '127.0.0.1'\nPORT = 5000\n\n\ndef threaded(fn):\n\n def wrapper(*args, **kwargs):\n thread = Thread(target=fn, args=args, kwargs=kwargs, daemon=True)\n thread.start()\n return thread\n return wrapper\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--id', type=int, required=True)\n parser.add_argument('--neighbours', nargs='*', type=int, default=[])\n parser.add_argument('--election', action='store_true')\n parser.add_argument('--capacity', type=int, required=True)\n parser.add_argument('--n', type=int, required=True)\n args = parser.parse_args()\n Process(args.n, args.id, args.neighbours, args.election, ADDRESS, PORT,\n args.capacity)\n pass\n\n\nclass Process:\n\n def __init__(self, n: int, id: int, neighbours: int, election: bool,\n address: str, port: int, capacity: int):\n self.n = n\n self.id = id\n self.address = address\n self.port = port\n self.neighbours = neighbours\n self.parent = None\n self.parent_lock = Lock()\n self.election_id = None\n self.election_id_lock = Lock()\n self.ack_counter = 0\n self.capacity = capacity\n self.max_capacity = self.id, self.capacity\n self.listen = self.listen()\n sleep(1)\n if election:\n print('Iniciando eleição')\n with self.election_id_lock:\n with self.parent_lock:\n self.parent = None\n self.election_id = self.id\n self.ack_counter = 0\n print(f'Enviando pedido de eleição para {self.neighbours}')\n self.send_neighbours({'message': 'election', 'election_id':\n self.election_id})\n while True:\n sleep(1)\n\n @threaded\n def listen(self):\n with Listener((self.address, self.port + self.id), backlog=self.n *\n self.n) as listener:\n while True:\n with listener.accept() as conn:\n data = conn.recv()\n if data['message'] == 'election':\n if (self.parent is None and self.election_id is\n None or self.election_id < data['election_id']):\n if self.election_id is not None:\n print('Eleição de maior prioridade recebida')\n with self.parent_lock:\n self.parent = data['sender']\n with self.election_id_lock:\n self.election_id = data['election_id']\n self.ack_counter = 0\n self.send_neighbours({'message': 'election',\n 'election_id': data['election_id']},\n exceptions=[self.parent])\n print(\n f'Repassando pedido de eleição de {self.election_id} enviada por {self.parent}'\n )\n else:\n print(\n f\"Confirmando pedido de eleição de {data['election_id']} enviada por {data['sender']}\"\n )\n self.send(data['sender'], {'message': 'ack',\n 'capacity': self.max_capacity})\n elif data['message'] == 'ack':\n print(f\"Guardando confirmação de {data['sender']}\")\n self.ack_counter += 1\n if self.max_capacity[1] < data['capacity'][1]:\n self.max_capacity = data['capacity']\n if self.parent is None:\n if self.ack_counter == len(self.neighbours):\n self.ack_counter = 0\n print(\n f'Fim da eleição, vencedor: {self.max_capacity}'\n )\n self.send_all({'message': 'winner',\n 'leader': self.max_capacity})\n elif self.ack_counter == len(self.neighbours) - 1:\n self.ack_counter = 0\n print(\n f'Confirmando pedido de eleição para o nó pai ({self.parent})'\n )\n self.send(self.parent, {'message': 'ack',\n 'capacity': self.max_capacity})\n elif data['message'] == 'winner':\n if self.max_capacity[1] < data['leader'][1]:\n self.max_capacity = data['leader']\n print(f'Vencedor: {self.max_capacity}')\n conn.close()\n\n @threaded\n def send(self, target, data):\n try:\n with Client((self.address, self.port + target)) as client:\n data['sender'] = self.id\n client.send(data)\n client.close()\n except ConnectionRefusedError as e:\n print(str(e))\n print('Connection refused')\n\n def send_neighbours(self, data, exceptions=[]):\n for i in [x for x in self.neighbours if x not in exceptions]:\n self.send(i, data)\n\n def send_all(self, data, exceptions=[]):\n for i in [x for x in range(self.n) if x not in exceptions and x !=\n self.id]:\n self.send(i, data)\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n exit(0)\n pass\n",
"step-5": "import argparse\nfrom time import sleep\nfrom threading import Thread\nfrom threading import Lock\nfrom multiprocessing.connection import Listener\nfrom multiprocessing.connection import Client\n\nADDRESS = '127.0.0.1'\nPORT = 5000\n\n# Threaded function snippet\ndef threaded(fn):\n def wrapper(*args, **kwargs):\n thread = Thread(target=fn, args=args, kwargs=kwargs, daemon=True)\n thread.start()\n return thread\n return wrapper\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--id', type=int, required=True)\n parser.add_argument(\"--neighbours\", nargs=\"*\", type=int, default=[])\n parser.add_argument(\"--election\", action='store_true')\n parser.add_argument('--capacity', type=int, required=True)\n parser.add_argument('--n', type=int, required=True)\n args = parser.parse_args()\n Process(args.n, args.id, args.neighbours, args.election, ADDRESS, PORT, args.capacity)\n pass\n\nclass Process:\n\n def __init__(self, n : int, id : int, neighbours : int, election : bool, address : str, port : int, capacity : int):\n self.n = n\n self.id = id\n self.address = address\n self.port = port\n self.neighbours = neighbours\n self.parent = None\n self.parent_lock = Lock()\n self.election_id = None\n self.election_id_lock = Lock()\n self.ack_counter = 0\n self.capacity = capacity\n self.max_capacity = (self.id, self.capacity)\n self.listen = self.listen()\n\n sleep(1)\n\n if election:\n print('Iniciando eleição')\n with self.election_id_lock:\n with self.parent_lock:\n self.parent = None\n self.election_id = self.id\n self.ack_counter = 0\n print(f'Enviando pedido de eleição para {self.neighbours}')\n self.send_neighbours({\n 'message': 'election',\n 'election_id': self.election_id\n })\n\n while(True):\n sleep(1)\n\n @threaded\n def listen(self):\n with Listener((self.address, self.port+self.id), backlog=self.n*self.n) as listener:\n while True:\n with listener.accept() as conn:\n data = conn.recv()\n\n if data['message'] == 'election':\n if (self.parent is None and self.election_id is None) or self.election_id < data['election_id']:\n if self.election_id is not None:\n print('Eleição de maior prioridade recebida')\n with self.parent_lock:\n self.parent = data['sender']\n with self.election_id_lock:\n self.election_id = data['election_id']\n self.ack_counter = 0\n self.send_neighbours({\n 'message': 'election',\n 'election_id': data['election_id']\n }, exceptions=[self.parent])\n print(f\"Repassando pedido de eleição de {self.election_id} enviada por {self.parent}\")\n else:\n print(f\"Confirmando pedido de eleição de {data['election_id']} enviada por {data['sender']}\")\n self.send(data['sender'], {\n 'message': 'ack',\n 'capacity': self.max_capacity\n })\n elif data['message'] == 'ack':\n print(f\"Guardando confirmação de {data['sender']}\")\n self.ack_counter+= 1\n if (self.max_capacity[1] < data['capacity'][1]):\n self.max_capacity = data['capacity']\n\n if self.parent is None:\n if self.ack_counter == len(self.neighbours):\n self.ack_counter = 0\n print(f'Fim da eleição, vencedor: {self.max_capacity}')\n self.send_all({\n 'message': 'winner',\n 'leader': self.max_capacity\n })\n else:\n if self.ack_counter == len(self.neighbours) - 1:\n self.ack_counter = 0\n print(f\"Confirmando pedido de eleição para o nó pai ({self.parent})\")\n self.send(self.parent, {\n 'message': 'ack',\n 'capacity': self.max_capacity,\n })\n elif data['message'] == 'winner':\n if (self.max_capacity[1] < data['leader'][1]):\n self.max_capacity = data['leader']\n print(f\"Vencedor: {self.max_capacity}\")\n\n conn.close()\n\n @threaded\n def send(self, target, data):\n try:\n with Client((self.address, self.port+target)) as client:\n data['sender'] = self.id\n client.send(data)\n client.close()\n except ConnectionRefusedError as e:\n print(str(e))\n print(\"Connection refused\")\n\n def send_neighbours(self, data, exceptions = []):\n for i in [x for x in self.neighbours if x not in exceptions]:\n self.send(i, data)\n\n def send_all(self, data, exceptions = []):\n for i in [x for x in range(self.n) if x not in exceptions and x != self.id]:\n self.send(i, data)\n\nif __name__ == '__main__':\n try:\n main()\n except (KeyboardInterrupt):\n exit(0)\n pass",
"step-ids": [
2,
8,
9,
11,
12
]
}
|
[
2,
8,
9,
11,
12
] |
#!/usr/bin/env python
import argparse
import pymssql
import json
#get the lcmMediaId from DB.
def getMediaId(contentProviderMediaName):
#test db
conn = pymssql.connect(host='CHELLSSSQL23.karmalab.net', user='TravCatalog', password='travel', database='LodgingCatalogMaster_Phoenix')
#prod db
#conn = pymssql.connect(host='LodgingCatalogMaster.ch.expeso.com', user='TravCatalog', password='travel', database='LodgingCatalogMaster_Phoenix')
cur = conn.cursor()
cur.execute('SELECT * FROM media WHERE contentprovidermedianame =%s',contentProviderMediaName)
row = cur.fetchone()
mediaid = None
while row:
mediaid =row[0]
break
return mediaid
def main(messages_file, records):
print ('> Messages: %s; Records: %d' % (messages_file, records))
message_number = 0
with open(messages_file, 'r') as msgs_file:
for message in msgs_file:
if message_number >= records and records > 0:
break
if message.startswith('> '):
continue
try:
jsonMsg = json.loads(message)
mediaid = getMediaId(jsonMsg['fileName'])
if(mediaid != None):
jsonMsg['domainFields']['lcmMediaId']=str(mediaid)
print (json.dumps(jsonMsg))
except (RuntimeError, TypeError, NameError):
print ('> %s error' % message_number)
message_number += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'messages_file', help='File with the messages to write. One message per line'
)
parser.add_argument(
'--records', default=-1, help='Number of messages to read'
)
args = parser.parse_args()
main(args.messages_file, int(args.records))
|
normal
|
{
"blob_id": "a5b7f565a1797e5f326bcf26ff7c8ad2469dca70",
"index": 7442,
"step-1": "<mask token>\n\n\ndef getMediaId(contentProviderMediaName):\n conn = pymssql.connect(host='CHELLSSSQL23.karmalab.net', user=\n 'TravCatalog', password='travel', database=\n 'LodgingCatalogMaster_Phoenix')\n cur = conn.cursor()\n cur.execute('SELECT * FROM media WHERE contentprovidermedianame =%s',\n contentProviderMediaName)\n row = cur.fetchone()\n mediaid = None\n while row:\n mediaid = row[0]\n break\n return mediaid\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getMediaId(contentProviderMediaName):\n conn = pymssql.connect(host='CHELLSSSQL23.karmalab.net', user=\n 'TravCatalog', password='travel', database=\n 'LodgingCatalogMaster_Phoenix')\n cur = conn.cursor()\n cur.execute('SELECT * FROM media WHERE contentprovidermedianame =%s',\n contentProviderMediaName)\n row = cur.fetchone()\n mediaid = None\n while row:\n mediaid = row[0]\n break\n return mediaid\n\n\ndef main(messages_file, records):\n print('> Messages: %s; Records: %d' % (messages_file, records))\n message_number = 0\n with open(messages_file, 'r') as msgs_file:\n for message in msgs_file:\n if message_number >= records and records > 0:\n break\n if message.startswith('> '):\n continue\n try:\n jsonMsg = json.loads(message)\n mediaid = getMediaId(jsonMsg['fileName'])\n if mediaid != None:\n jsonMsg['domainFields']['lcmMediaId'] = str(mediaid)\n print(json.dumps(jsonMsg))\n except (RuntimeError, TypeError, NameError):\n print('> %s error' % message_number)\n message_number += 1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef getMediaId(contentProviderMediaName):\n conn = pymssql.connect(host='CHELLSSSQL23.karmalab.net', user=\n 'TravCatalog', password='travel', database=\n 'LodgingCatalogMaster_Phoenix')\n cur = conn.cursor()\n cur.execute('SELECT * FROM media WHERE contentprovidermedianame =%s',\n contentProviderMediaName)\n row = cur.fetchone()\n mediaid = None\n while row:\n mediaid = row[0]\n break\n return mediaid\n\n\ndef main(messages_file, records):\n print('> Messages: %s; Records: %d' % (messages_file, records))\n message_number = 0\n with open(messages_file, 'r') as msgs_file:\n for message in msgs_file:\n if message_number >= records and records > 0:\n break\n if message.startswith('> '):\n continue\n try:\n jsonMsg = json.loads(message)\n mediaid = getMediaId(jsonMsg['fileName'])\n if mediaid != None:\n jsonMsg['domainFields']['lcmMediaId'] = str(mediaid)\n print(json.dumps(jsonMsg))\n except (RuntimeError, TypeError, NameError):\n print('> %s error' % message_number)\n message_number += 1\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('messages_file', help=\n 'File with the messages to write. One message per line')\n parser.add_argument('--records', default=-1, help=\n 'Number of messages to read')\n args = parser.parse_args()\n main(args.messages_file, int(args.records))\n",
"step-4": "import argparse\nimport pymssql\nimport json\n\n\ndef getMediaId(contentProviderMediaName):\n conn = pymssql.connect(host='CHELLSSSQL23.karmalab.net', user=\n 'TravCatalog', password='travel', database=\n 'LodgingCatalogMaster_Phoenix')\n cur = conn.cursor()\n cur.execute('SELECT * FROM media WHERE contentprovidermedianame =%s',\n contentProviderMediaName)\n row = cur.fetchone()\n mediaid = None\n while row:\n mediaid = row[0]\n break\n return mediaid\n\n\ndef main(messages_file, records):\n print('> Messages: %s; Records: %d' % (messages_file, records))\n message_number = 0\n with open(messages_file, 'r') as msgs_file:\n for message in msgs_file:\n if message_number >= records and records > 0:\n break\n if message.startswith('> '):\n continue\n try:\n jsonMsg = json.loads(message)\n mediaid = getMediaId(jsonMsg['fileName'])\n if mediaid != None:\n jsonMsg['domainFields']['lcmMediaId'] = str(mediaid)\n print(json.dumps(jsonMsg))\n except (RuntimeError, TypeError, NameError):\n print('> %s error' % message_number)\n message_number += 1\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('messages_file', help=\n 'File with the messages to write. One message per line')\n parser.add_argument('--records', default=-1, help=\n 'Number of messages to read')\n args = parser.parse_args()\n main(args.messages_file, int(args.records))\n",
"step-5": "#!/usr/bin/env python\nimport argparse\nimport pymssql\nimport json\n\n#get the lcmMediaId from DB.\ndef getMediaId(contentProviderMediaName):\n #test db\n conn = pymssql.connect(host='CHELLSSSQL23.karmalab.net', user='TravCatalog', password='travel', database='LodgingCatalogMaster_Phoenix')\n #prod db\n #conn = pymssql.connect(host='LodgingCatalogMaster.ch.expeso.com', user='TravCatalog', password='travel', database='LodgingCatalogMaster_Phoenix')\n cur = conn.cursor()\n cur.execute('SELECT * FROM media WHERE contentprovidermedianame =%s',contentProviderMediaName)\n row = cur.fetchone()\n mediaid = None\n while row:\n mediaid =row[0]\n break\n return mediaid\n\ndef main(messages_file, records):\n print ('> Messages: %s; Records: %d' % (messages_file, records))\n message_number = 0\n with open(messages_file, 'r') as msgs_file:\n for message in msgs_file:\n if message_number >= records and records > 0:\n break\n if message.startswith('> '):\n continue\n try:\n jsonMsg = json.loads(message)\n mediaid = getMediaId(jsonMsg['fileName'])\n if(mediaid != None):\n jsonMsg['domainFields']['lcmMediaId']=str(mediaid)\n print (json.dumps(jsonMsg))\n except (RuntimeError, TypeError, NameError):\n print ('> %s error' % message_number)\n message_number += 1\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'messages_file', help='File with the messages to write. One message per line'\n )\n parser.add_argument(\n '--records', default=-1, help='Number of messages to read'\n )\n args = parser.parse_args()\n main(args.messages_file, int(args.records))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
__version__ = '18.07.0'
|
flexible
|
{
"blob_id": "3cac7829cf0c07ddc704a25ec3c781c9510a8e0c",
"index": 3613,
"step-1": "<mask token>\n",
"step-2": "__version__ = '18.07.0'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from django.conf.urls import url
from myapp import views
urlpatterns = [
url(r'^$', views.homepage, name='homepage'),
url(r'^search/', views.my_search_view, name = 'article_detail')
]
|
normal
|
{
"blob_id": "388e43850a2e114cfe7869293ee814831a088b3e",
"index": 8468,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('^$', views.homepage, name='homepage'), url('^search/',\n views.my_search_view, name='article_detail')]\n",
"step-3": "from django.conf.urls import url\nfrom myapp import views\nurlpatterns = [url('^$', views.homepage, name='homepage'), url('^search/',\n views.my_search_view, name='article_detail')]\n",
"step-4": "from django.conf.urls import url\nfrom myapp import views\n\nurlpatterns = [\n url(r'^$', views.homepage, name='homepage'),\n url(r'^search/', views.my_search_view, name = 'article_detail')\n ]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import random #importing the random library from python
answers = ["It is certain", "Without a doubt", "Yes, definitely",
"You may rely on it", "As I see it, yes", "Most likely",
"Outlook good", "Yes", "Signs point to yes", "Reply hazy, try again",
"Ask again later", "Better not tell you now", "Cannot predict now",
"Concentrate and ask again", "Don't count on it", "My reply is no",
"My sources say no", "Outlook not so good", "Very doubtful"] #here, we declare a list of strings.
ans = '!' #we give ans a value so that the while loop will execute.
while ans: #This will keep on looping as long as ans is not blank. If a variable stores nothing, it returns false when checked
ans = input("Ask the magic 8 ball a question. (Press enter to leave): \n")
#The reason we store the input is so the user can exit the program by passing in nothing for ans
print(random.choice(answers)) #the random library lets us draw a random string from a list. We then print it
|
normal
|
{
"blob_id": "b5e9af166f3b55e44d9273077e5acd05b1fd68fa",
"index": 2335,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile ans:\n ans = input('Ask the magic 8 ball a question. (Press enter to leave): \\n')\n print(random.choice(answers))\n",
"step-3": "<mask token>\nanswers = ['It is certain', 'Without a doubt', 'Yes, definitely',\n 'You may rely on it', 'As I see it, yes', 'Most likely', 'Outlook good',\n 'Yes', 'Signs point to yes', 'Reply hazy, try again', 'Ask again later',\n 'Better not tell you now', 'Cannot predict now',\n 'Concentrate and ask again', \"Don't count on it\", 'My reply is no',\n 'My sources say no', 'Outlook not so good', 'Very doubtful']\nans = '!'\nwhile ans:\n ans = input('Ask the magic 8 ball a question. (Press enter to leave): \\n')\n print(random.choice(answers))\n",
"step-4": "import random\nanswers = ['It is certain', 'Without a doubt', 'Yes, definitely',\n 'You may rely on it', 'As I see it, yes', 'Most likely', 'Outlook good',\n 'Yes', 'Signs point to yes', 'Reply hazy, try again', 'Ask again later',\n 'Better not tell you now', 'Cannot predict now',\n 'Concentrate and ask again', \"Don't count on it\", 'My reply is no',\n 'My sources say no', 'Outlook not so good', 'Very doubtful']\nans = '!'\nwhile ans:\n ans = input('Ask the magic 8 ball a question. (Press enter to leave): \\n')\n print(random.choice(answers))\n",
"step-5": "import random #importing the random library from python\nanswers = [\"It is certain\", \"Without a doubt\", \"Yes, definitely\",\n \"You may rely on it\", \"As I see it, yes\", \"Most likely\",\n \"Outlook good\", \"Yes\", \"Signs point to yes\", \"Reply hazy, try again\",\n \"Ask again later\", \"Better not tell you now\", \"Cannot predict now\",\n \"Concentrate and ask again\", \"Don't count on it\", \"My reply is no\",\n \"My sources say no\", \"Outlook not so good\", \"Very doubtful\"] #here, we declare a list of strings. \nans = '!' #we give ans a value so that the while loop will execute. \nwhile ans: #This will keep on looping as long as ans is not blank. If a variable stores nothing, it returns false when checked\n ans = input(\"Ask the magic 8 ball a question. (Press enter to leave): \\n\") \n #The reason we store the input is so the user can exit the program by passing in nothing for ans\n print(random.choice(answers)) #the random library lets us draw a random string from a list. We then print it\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_rosters_from_excel(django_file):
workbook = xlrd.open_workbook(file_contents=django_file.read())
worksheet = workbook.sheet_by_name('Match_Rosters')
num_rows = worksheet.nrows - 1
cur_row = -1
rosters = []
while cur_row < num_rows:
cur_row += 1
if worksheet.cell_value(cur_row, 0
) == 'NCTTA Team Match Player Selection Form':
row = worksheet.row(cur_row + 4)
roster = {'round_match': worksheet.cell_value(cur_row + 2, 6),
'left_team_label': worksheet.cell_value(cur_row + 4, 3),
'right_team_label': worksheet.cell_value(cur_row + 4, 8),
'left_team_title': worksheet.cell_value(cur_row + 5, 1),
'right_team_title': worksheet.cell_value(cur_row + 5, 6),
'players': [], 'opponents': []}
roster['players'].append({'player_label': worksheet.cell_value(
cur_row + 11, 0), 'player_name': worksheet.cell_value(
cur_row + 11, 1)})
roster['players'].append({'player_label': worksheet.cell_value(
cur_row + 12, 0), 'player_name': worksheet.cell_value(
cur_row + 12, 1)})
roster['players'].append({'player_label': worksheet.cell_value(
cur_row + 13, 0), 'player_name': worksheet.cell_value(
cur_row + 13, 1)})
roster['players'].append({'player_label': worksheet.cell_value(
cur_row + 14, 0), 'player_name': worksheet.cell_value(
cur_row + 14, 1)})
roster['players'].append({'player_label': worksheet.cell_value(
cur_row + 15, 0), 'player_name': worksheet.cell_value(
cur_row + 15, 1)})
roster['players'].append({'player_label': worksheet.cell_value(
cur_row + 16, 0), 'player_name': worksheet.cell_value(
cur_row + 16, 1)})
roster['players'].append({'player_label': worksheet.cell_value(
cur_row + 17, 0), 'player_name': worksheet.cell_value(
cur_row + 17, 1)})
roster['players'].append({'player_label': worksheet.cell_value(
cur_row + 18, 0), 'player_name': worksheet.cell_value(
cur_row + 18, 1)})
roster['opponents'].append({'player_name': worksheet.cell_value
(cur_row + 11, 6), 'player_rating': worksheet.cell_value(
cur_row + 11, 9)})
roster['opponents'].append({'player_name': worksheet.cell_value
(cur_row + 12, 6), 'player_rating': worksheet.cell_value(
cur_row + 12, 9)})
roster['opponents'].append({'player_name': worksheet.cell_value
(cur_row + 13, 6), 'player_rating': worksheet.cell_value(
cur_row + 13, 9)})
roster['opponents'].append({'player_name': worksheet.cell_value
(cur_row + 14, 6), 'player_rating': worksheet.cell_value(
cur_row + 14, 9)})
roster['opponents'].append({'player_name': worksheet.cell_value
(cur_row + 15, 6), 'player_rating': worksheet.cell_value(
cur_row + 15, 9)})
roster['opponents'].append({'player_name': worksheet.cell_value
(cur_row + 16, 6), 'player_rating': worksheet.cell_value(
cur_row + 16, 9)})
roster['opponents'].append({'player_name': worksheet.cell_value
(cur_row + 17, 6), 'player_rating': worksheet.cell_value(
cur_row + 17, 9)})
roster['opponents'].append({'player_name': worksheet.cell_value
(cur_row + 18, 6), 'player_rating': worksheet.cell_value(
cur_row + 18, 9)})
label_letter = ''.join(i for i in roster['players'][0][
'player_label'] if not i.isdigit())
if label_letter == str(roster['left_team_label'].strip()):
roster['active_team'] = 'left'
else:
roster['active_team'] = 'right'
if roster['opponents'][0]['player_name'] != '' and roster['players'
][0]['player_name'] and roster['round_match'] != '':
rosters.append(roster)
return rosters
<|reserved_special_token_1|>
import xlrd
def get_rosters_from_excel(django_file):
workbook = xlrd.open_workbook(file_contents=django_file.read())
worksheet = workbook.sheet_by_name('Match_Rosters')
num_rows = worksheet.nrows - 1
cur_row = -1
rosters = []
while cur_row < num_rows:
cur_row += 1
if worksheet.cell_value(cur_row, 0
) == 'NCTTA Team Match Player Selection Form':
row = worksheet.row(cur_row + 4)
roster = {'round_match': worksheet.cell_value(cur_row + 2, 6),
'left_team_label': worksheet.cell_value(cur_row + 4, 3),
'right_team_label': worksheet.cell_value(cur_row + 4, 8),
'left_team_title': worksheet.cell_value(cur_row + 5, 1),
'right_team_title': worksheet.cell_value(cur_row + 5, 6),
'players': [], 'opponents': []}
roster['players'].append({'player_label': worksheet.cell_value(
cur_row + 11, 0), 'player_name': worksheet.cell_value(
cur_row + 11, 1)})
roster['players'].append({'player_label': worksheet.cell_value(
cur_row + 12, 0), 'player_name': worksheet.cell_value(
cur_row + 12, 1)})
roster['players'].append({'player_label': worksheet.cell_value(
cur_row + 13, 0), 'player_name': worksheet.cell_value(
cur_row + 13, 1)})
roster['players'].append({'player_label': worksheet.cell_value(
cur_row + 14, 0), 'player_name': worksheet.cell_value(
cur_row + 14, 1)})
roster['players'].append({'player_label': worksheet.cell_value(
cur_row + 15, 0), 'player_name': worksheet.cell_value(
cur_row + 15, 1)})
roster['players'].append({'player_label': worksheet.cell_value(
cur_row + 16, 0), 'player_name': worksheet.cell_value(
cur_row + 16, 1)})
roster['players'].append({'player_label': worksheet.cell_value(
cur_row + 17, 0), 'player_name': worksheet.cell_value(
cur_row + 17, 1)})
roster['players'].append({'player_label': worksheet.cell_value(
cur_row + 18, 0), 'player_name': worksheet.cell_value(
cur_row + 18, 1)})
roster['opponents'].append({'player_name': worksheet.cell_value
(cur_row + 11, 6), 'player_rating': worksheet.cell_value(
cur_row + 11, 9)})
roster['opponents'].append({'player_name': worksheet.cell_value
(cur_row + 12, 6), 'player_rating': worksheet.cell_value(
cur_row + 12, 9)})
roster['opponents'].append({'player_name': worksheet.cell_value
(cur_row + 13, 6), 'player_rating': worksheet.cell_value(
cur_row + 13, 9)})
roster['opponents'].append({'player_name': worksheet.cell_value
(cur_row + 14, 6), 'player_rating': worksheet.cell_value(
cur_row + 14, 9)})
roster['opponents'].append({'player_name': worksheet.cell_value
(cur_row + 15, 6), 'player_rating': worksheet.cell_value(
cur_row + 15, 9)})
roster['opponents'].append({'player_name': worksheet.cell_value
(cur_row + 16, 6), 'player_rating': worksheet.cell_value(
cur_row + 16, 9)})
roster['opponents'].append({'player_name': worksheet.cell_value
(cur_row + 17, 6), 'player_rating': worksheet.cell_value(
cur_row + 17, 9)})
roster['opponents'].append({'player_name': worksheet.cell_value
(cur_row + 18, 6), 'player_rating': worksheet.cell_value(
cur_row + 18, 9)})
label_letter = ''.join(i for i in roster['players'][0][
'player_label'] if not i.isdigit())
if label_letter == str(roster['left_team_label'].strip()):
roster['active_team'] = 'left'
else:
roster['active_team'] = 'right'
if roster['opponents'][0]['player_name'] != '' and roster['players'
][0]['player_name'] and roster['round_match'] != '':
rosters.append(roster)
return rosters
<|reserved_special_token_1|>
import xlrd
def get_rosters_from_excel(django_file):
workbook = xlrd.open_workbook(file_contents=django_file.read())
worksheet = workbook.sheet_by_name('Match_Rosters')
num_rows = worksheet.nrows - 1
cur_row = -1
rosters = []
while cur_row < num_rows:
cur_row += 1
if worksheet.cell_value(cur_row, 0) == "NCTTA Team Match Player Selection Form":
row = worksheet.row(cur_row + 4)
roster = {
"round_match" : worksheet.cell_value(cur_row + 2, 6), # consider adding the time in +2, 8
"left_team_label" : worksheet.cell_value(cur_row + 4, 3),
"right_team_label" : worksheet.cell_value(cur_row + 4, 8),
"left_team_title" : worksheet.cell_value(cur_row + 5, 1),
"right_team_title" : worksheet.cell_value(cur_row + 5, 6),
"players" : [],
"opponents" : [],
# Don't forget about doubles!
}
roster["players"].append({
"player_label" : worksheet.cell_value(cur_row + 11, 0),
"player_name" : worksheet.cell_value(cur_row + 11, 1)
})
roster["players"].append({
"player_label" : worksheet.cell_value(cur_row + 12, 0),
"player_name" : worksheet.cell_value(cur_row + 12, 1),
})
roster["players"].append({
"player_label" : worksheet.cell_value(cur_row + 13, 0),
"player_name" : worksheet.cell_value(cur_row + 13, 1),
})
roster["players"].append({
"player_label" : worksheet.cell_value(cur_row + 14, 0),
"player_name" : worksheet.cell_value(cur_row + 14, 1),
})
roster["players"].append({
"player_label" : worksheet.cell_value(cur_row + 15, 0),
"player_name" : worksheet.cell_value(cur_row + 15, 1),
})
roster["players"].append({
"player_label" : worksheet.cell_value(cur_row + 16, 0),
"player_name" : worksheet.cell_value(cur_row + 16, 1),
})
roster["players"].append({
"player_label" : worksheet.cell_value(cur_row + 17, 0),
"player_name" : worksheet.cell_value(cur_row + 17, 1),
})
roster["players"].append({
"player_label" : worksheet.cell_value(cur_row + 18, 0),
"player_name" : worksheet.cell_value(cur_row + 18, 1),
})
# The opponents
roster["opponents"].append({
"player_name" : worksheet.cell_value(cur_row + 11, 6),
"player_rating" : worksheet.cell_value(cur_row + 11, 9),
})
roster["opponents"].append({
"player_name" : worksheet.cell_value(cur_row + 12, 6),
"player_rating" : worksheet.cell_value(cur_row + 12, 9),
})
roster["opponents"].append({
"player_name" : worksheet.cell_value(cur_row + 13, 6),
"player_rating" : worksheet.cell_value(cur_row + 13, 9),
})
roster["opponents"].append({
"player_name" : worksheet.cell_value(cur_row + 14, 6),
"player_rating" : worksheet.cell_value(cur_row + 14, 9),
})
roster["opponents"].append({
"player_name" : worksheet.cell_value(cur_row + 15, 6),
"player_rating" : worksheet.cell_value(cur_row + 15, 9),
})
roster["opponents"].append({
"player_name" : worksheet.cell_value(cur_row + 16, 6),
"player_rating" : worksheet.cell_value(cur_row + 16, 9),
})
roster["opponents"].append({
"player_name" : worksheet.cell_value(cur_row + 17, 6),
"player_rating" : worksheet.cell_value(cur_row + 17, 9),
})
roster["opponents"].append({
"player_name" : worksheet.cell_value(cur_row + 18, 6),
"player_rating" : worksheet.cell_value(cur_row + 18, 9),
})
label_letter = ''.join(i for i in roster["players"][0]["player_label"] if not i.isdigit())
if label_letter == str(roster["left_team_label"].strip()):
roster["active_team"] = "left"
else:
roster["active_team"] = "right"
#for key, value in roster.items():
# print " ", key, ":", value
if roster["opponents"][0]["player_name"] != "" and roster["players"][0]["player_name"] and roster["round_match"] != "":
rosters.append(roster)
return rosters
|
flexible
|
{
"blob_id": "a7a219e9ea5cdec004ef936958994ed1f5a96103",
"index": 3244,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_rosters_from_excel(django_file):\n workbook = xlrd.open_workbook(file_contents=django_file.read())\n worksheet = workbook.sheet_by_name('Match_Rosters')\n num_rows = worksheet.nrows - 1\n cur_row = -1\n rosters = []\n while cur_row < num_rows:\n cur_row += 1\n if worksheet.cell_value(cur_row, 0\n ) == 'NCTTA Team Match Player Selection Form':\n row = worksheet.row(cur_row + 4)\n roster = {'round_match': worksheet.cell_value(cur_row + 2, 6),\n 'left_team_label': worksheet.cell_value(cur_row + 4, 3),\n 'right_team_label': worksheet.cell_value(cur_row + 4, 8),\n 'left_team_title': worksheet.cell_value(cur_row + 5, 1),\n 'right_team_title': worksheet.cell_value(cur_row + 5, 6),\n 'players': [], 'opponents': []}\n roster['players'].append({'player_label': worksheet.cell_value(\n cur_row + 11, 0), 'player_name': worksheet.cell_value(\n cur_row + 11, 1)})\n roster['players'].append({'player_label': worksheet.cell_value(\n cur_row + 12, 0), 'player_name': worksheet.cell_value(\n cur_row + 12, 1)})\n roster['players'].append({'player_label': worksheet.cell_value(\n cur_row + 13, 0), 'player_name': worksheet.cell_value(\n cur_row + 13, 1)})\n roster['players'].append({'player_label': worksheet.cell_value(\n cur_row + 14, 0), 'player_name': worksheet.cell_value(\n cur_row + 14, 1)})\n roster['players'].append({'player_label': worksheet.cell_value(\n cur_row + 15, 0), 'player_name': worksheet.cell_value(\n cur_row + 15, 1)})\n roster['players'].append({'player_label': worksheet.cell_value(\n cur_row + 16, 0), 'player_name': worksheet.cell_value(\n cur_row + 16, 1)})\n roster['players'].append({'player_label': worksheet.cell_value(\n cur_row + 17, 0), 'player_name': worksheet.cell_value(\n cur_row + 17, 1)})\n roster['players'].append({'player_label': worksheet.cell_value(\n cur_row + 18, 0), 'player_name': worksheet.cell_value(\n cur_row + 18, 1)})\n roster['opponents'].append({'player_name': worksheet.cell_value\n (cur_row + 11, 6), 'player_rating': worksheet.cell_value(\n cur_row + 11, 9)})\n roster['opponents'].append({'player_name': worksheet.cell_value\n (cur_row + 12, 6), 'player_rating': worksheet.cell_value(\n cur_row + 12, 9)})\n roster['opponents'].append({'player_name': worksheet.cell_value\n (cur_row + 13, 6), 'player_rating': worksheet.cell_value(\n cur_row + 13, 9)})\n roster['opponents'].append({'player_name': worksheet.cell_value\n (cur_row + 14, 6), 'player_rating': worksheet.cell_value(\n cur_row + 14, 9)})\n roster['opponents'].append({'player_name': worksheet.cell_value\n (cur_row + 15, 6), 'player_rating': worksheet.cell_value(\n cur_row + 15, 9)})\n roster['opponents'].append({'player_name': worksheet.cell_value\n (cur_row + 16, 6), 'player_rating': worksheet.cell_value(\n cur_row + 16, 9)})\n roster['opponents'].append({'player_name': worksheet.cell_value\n (cur_row + 17, 6), 'player_rating': worksheet.cell_value(\n cur_row + 17, 9)})\n roster['opponents'].append({'player_name': worksheet.cell_value\n (cur_row + 18, 6), 'player_rating': worksheet.cell_value(\n cur_row + 18, 9)})\n label_letter = ''.join(i for i in roster['players'][0][\n 'player_label'] if not i.isdigit())\n if label_letter == str(roster['left_team_label'].strip()):\n roster['active_team'] = 'left'\n else:\n roster['active_team'] = 'right'\n if roster['opponents'][0]['player_name'] != '' and roster['players'\n ][0]['player_name'] and roster['round_match'] != '':\n rosters.append(roster)\n return rosters\n",
"step-3": "import xlrd\n\n\ndef get_rosters_from_excel(django_file):\n workbook = xlrd.open_workbook(file_contents=django_file.read())\n worksheet = workbook.sheet_by_name('Match_Rosters')\n num_rows = worksheet.nrows - 1\n cur_row = -1\n rosters = []\n while cur_row < num_rows:\n cur_row += 1\n if worksheet.cell_value(cur_row, 0\n ) == 'NCTTA Team Match Player Selection Form':\n row = worksheet.row(cur_row + 4)\n roster = {'round_match': worksheet.cell_value(cur_row + 2, 6),\n 'left_team_label': worksheet.cell_value(cur_row + 4, 3),\n 'right_team_label': worksheet.cell_value(cur_row + 4, 8),\n 'left_team_title': worksheet.cell_value(cur_row + 5, 1),\n 'right_team_title': worksheet.cell_value(cur_row + 5, 6),\n 'players': [], 'opponents': []}\n roster['players'].append({'player_label': worksheet.cell_value(\n cur_row + 11, 0), 'player_name': worksheet.cell_value(\n cur_row + 11, 1)})\n roster['players'].append({'player_label': worksheet.cell_value(\n cur_row + 12, 0), 'player_name': worksheet.cell_value(\n cur_row + 12, 1)})\n roster['players'].append({'player_label': worksheet.cell_value(\n cur_row + 13, 0), 'player_name': worksheet.cell_value(\n cur_row + 13, 1)})\n roster['players'].append({'player_label': worksheet.cell_value(\n cur_row + 14, 0), 'player_name': worksheet.cell_value(\n cur_row + 14, 1)})\n roster['players'].append({'player_label': worksheet.cell_value(\n cur_row + 15, 0), 'player_name': worksheet.cell_value(\n cur_row + 15, 1)})\n roster['players'].append({'player_label': worksheet.cell_value(\n cur_row + 16, 0), 'player_name': worksheet.cell_value(\n cur_row + 16, 1)})\n roster['players'].append({'player_label': worksheet.cell_value(\n cur_row + 17, 0), 'player_name': worksheet.cell_value(\n cur_row + 17, 1)})\n roster['players'].append({'player_label': worksheet.cell_value(\n cur_row + 18, 0), 'player_name': worksheet.cell_value(\n cur_row + 18, 1)})\n roster['opponents'].append({'player_name': worksheet.cell_value\n (cur_row + 11, 6), 'player_rating': worksheet.cell_value(\n cur_row + 11, 9)})\n roster['opponents'].append({'player_name': worksheet.cell_value\n (cur_row + 12, 6), 'player_rating': worksheet.cell_value(\n cur_row + 12, 9)})\n roster['opponents'].append({'player_name': worksheet.cell_value\n (cur_row + 13, 6), 'player_rating': worksheet.cell_value(\n cur_row + 13, 9)})\n roster['opponents'].append({'player_name': worksheet.cell_value\n (cur_row + 14, 6), 'player_rating': worksheet.cell_value(\n cur_row + 14, 9)})\n roster['opponents'].append({'player_name': worksheet.cell_value\n (cur_row + 15, 6), 'player_rating': worksheet.cell_value(\n cur_row + 15, 9)})\n roster['opponents'].append({'player_name': worksheet.cell_value\n (cur_row + 16, 6), 'player_rating': worksheet.cell_value(\n cur_row + 16, 9)})\n roster['opponents'].append({'player_name': worksheet.cell_value\n (cur_row + 17, 6), 'player_rating': worksheet.cell_value(\n cur_row + 17, 9)})\n roster['opponents'].append({'player_name': worksheet.cell_value\n (cur_row + 18, 6), 'player_rating': worksheet.cell_value(\n cur_row + 18, 9)})\n label_letter = ''.join(i for i in roster['players'][0][\n 'player_label'] if not i.isdigit())\n if label_letter == str(roster['left_team_label'].strip()):\n roster['active_team'] = 'left'\n else:\n roster['active_team'] = 'right'\n if roster['opponents'][0]['player_name'] != '' and roster['players'\n ][0]['player_name'] and roster['round_match'] != '':\n rosters.append(roster)\n return rosters\n",
"step-4": "import xlrd\n\ndef get_rosters_from_excel(django_file):\n workbook = xlrd.open_workbook(file_contents=django_file.read())\n worksheet = workbook.sheet_by_name('Match_Rosters')\n\n num_rows = worksheet.nrows - 1\n cur_row = -1\n\n rosters = []\n\n while cur_row < num_rows:\n cur_row += 1\n\n if worksheet.cell_value(cur_row, 0) == \"NCTTA Team Match Player Selection Form\":\n row = worksheet.row(cur_row + 4)\n roster = {\n \"round_match\" : worksheet.cell_value(cur_row + 2, 6), # consider adding the time in +2, 8\n \"left_team_label\" : worksheet.cell_value(cur_row + 4, 3),\n \"right_team_label\" : worksheet.cell_value(cur_row + 4, 8),\n \"left_team_title\" : worksheet.cell_value(cur_row + 5, 1),\n \"right_team_title\" : worksheet.cell_value(cur_row + 5, 6),\n \"players\" : [],\n \"opponents\" : [],\n # Don't forget about doubles!\n }\n roster[\"players\"].append({\n \"player_label\" : worksheet.cell_value(cur_row + 11, 0),\n \"player_name\" : worksheet.cell_value(cur_row + 11, 1)\n })\n roster[\"players\"].append({\n \"player_label\" : worksheet.cell_value(cur_row + 12, 0),\n \"player_name\" : worksheet.cell_value(cur_row + 12, 1),\n })\n roster[\"players\"].append({\n \"player_label\" : worksheet.cell_value(cur_row + 13, 0),\n \"player_name\" : worksheet.cell_value(cur_row + 13, 1),\n })\n roster[\"players\"].append({\n \"player_label\" : worksheet.cell_value(cur_row + 14, 0),\n \"player_name\" : worksheet.cell_value(cur_row + 14, 1),\n })\n roster[\"players\"].append({\n \"player_label\" : worksheet.cell_value(cur_row + 15, 0),\n \"player_name\" : worksheet.cell_value(cur_row + 15, 1),\n })\n roster[\"players\"].append({\n \"player_label\" : worksheet.cell_value(cur_row + 16, 0),\n \"player_name\" : worksheet.cell_value(cur_row + 16, 1),\n })\n roster[\"players\"].append({\n \"player_label\" : worksheet.cell_value(cur_row + 17, 0),\n \"player_name\" : worksheet.cell_value(cur_row + 17, 1),\n })\n roster[\"players\"].append({\n \"player_label\" : worksheet.cell_value(cur_row + 18, 0),\n \"player_name\" : worksheet.cell_value(cur_row + 18, 1),\n })\n # The opponents\n roster[\"opponents\"].append({\n \"player_name\" : worksheet.cell_value(cur_row + 11, 6),\n \"player_rating\" : worksheet.cell_value(cur_row + 11, 9),\n })\n roster[\"opponents\"].append({\n \"player_name\" : worksheet.cell_value(cur_row + 12, 6),\n \"player_rating\" : worksheet.cell_value(cur_row + 12, 9),\n })\n roster[\"opponents\"].append({\n \"player_name\" : worksheet.cell_value(cur_row + 13, 6),\n \"player_rating\" : worksheet.cell_value(cur_row + 13, 9),\n })\n roster[\"opponents\"].append({\n \"player_name\" : worksheet.cell_value(cur_row + 14, 6),\n \"player_rating\" : worksheet.cell_value(cur_row + 14, 9),\n })\n roster[\"opponents\"].append({\n \"player_name\" : worksheet.cell_value(cur_row + 15, 6),\n \"player_rating\" : worksheet.cell_value(cur_row + 15, 9),\n })\n roster[\"opponents\"].append({\n \"player_name\" : worksheet.cell_value(cur_row + 16, 6),\n \"player_rating\" : worksheet.cell_value(cur_row + 16, 9),\n })\n roster[\"opponents\"].append({\n \"player_name\" : worksheet.cell_value(cur_row + 17, 6),\n \"player_rating\" : worksheet.cell_value(cur_row + 17, 9),\n })\n roster[\"opponents\"].append({\n \"player_name\" : worksheet.cell_value(cur_row + 18, 6),\n \"player_rating\" : worksheet.cell_value(cur_row + 18, 9),\n })\n\n label_letter = ''.join(i for i in roster[\"players\"][0][\"player_label\"] if not i.isdigit())\n if label_letter == str(roster[\"left_team_label\"].strip()):\n roster[\"active_team\"] = \"left\"\n else:\n roster[\"active_team\"] = \"right\"\n\n\n #for key, value in roster.items():\n # print \" \", key, \":\", value\n if roster[\"opponents\"][0][\"player_name\"] != \"\" and roster[\"players\"][0][\"player_name\"] and roster[\"round_match\"] != \"\":\n rosters.append(roster)\n return rosters\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# #writing a file
# fout = open('Session14/output.txt', 'w')
# line1 = "How many roads must a man walk down\n"
# fout.write(line1)
# line2 = "Before you call him a man?\n"
# fout.write(line2)
# #when you are done writing, you should close the file.
# fout.close()
# #if you dont close the file, it gets closed for you when the program dies
#exercise 1
# def sed(pattern, replace, source, dest):
# with open(source, 'r') as f_r:
# with open(dest, 'w') as f_w:
# for line in f_r:
# new_line = line.replace(pattern, replace)
# f_w.write(new_line)
# pattern = " man "
# replace = " woman "
# source = "Session14/output.txt"
# dest = "Session14/output2.txt"
# sed(pattern, replace, source, dest)
import os
cwd = os.getcwd()
#cwd stands for "current working directory"
# print(cwd)
#os.path provides other functions for working with filenames and paths
# os.path.abspath('output.txt')
# os.path.exists('output.txt')
# os.path.isdir('output.txt')
# os.path.isdir('/exercises')
# os.path.isfile('output.txt')
# os.listdir(cwd)
def walk(dirname):
"""Prints the names of all files in
dirname and its subdirectories.
dirname: string name of directory
"""
for name in os.listdir(dirname):
path = os.path.join(dirname, name)
if os.path.isfile(path):
print(path)
else:
walk(path)
#os.path.join takes a directory and a file name and joins them inot a complete path
def walk2(dirname):
"""Prints the names of all files in
dirname and its subdirectories.
dirname: string name of directory
"""
for root, dirs, files in os.walk(dirname):
for filename in files:
print(os.path.join(root, filename))
|
normal
|
{
"blob_id": "de1262da699a18266ad8673597391f625783a44d",
"index": 5721,
"step-1": "<mask token>\n\n\ndef walk2(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for root, dirs, files in os.walk(dirname):\n for filename in files:\n print(os.path.join(root, filename))\n",
"step-2": "<mask token>\n\n\ndef walk(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for name in os.listdir(dirname):\n path = os.path.join(dirname, name)\n if os.path.isfile(path):\n print(path)\n else:\n walk(path)\n\n\ndef walk2(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for root, dirs, files in os.walk(dirname):\n for filename in files:\n print(os.path.join(root, filename))\n",
"step-3": "<mask token>\ncwd = os.getcwd()\n\n\ndef walk(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for name in os.listdir(dirname):\n path = os.path.join(dirname, name)\n if os.path.isfile(path):\n print(path)\n else:\n walk(path)\n\n\ndef walk2(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for root, dirs, files in os.walk(dirname):\n for filename in files:\n print(os.path.join(root, filename))\n",
"step-4": "import os\ncwd = os.getcwd()\n\n\ndef walk(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for name in os.listdir(dirname):\n path = os.path.join(dirname, name)\n if os.path.isfile(path):\n print(path)\n else:\n walk(path)\n\n\ndef walk2(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for root, dirs, files in os.walk(dirname):\n for filename in files:\n print(os.path.join(root, filename))\n",
"step-5": "# #writing a file\n# fout = open('Session14/output.txt', 'w')\n# line1 = \"How many roads must a man walk down\\n\"\n# fout.write(line1)\n# line2 = \"Before you call him a man?\\n\"\n# fout.write(line2)\n# #when you are done writing, you should close the file.\n# fout.close()\n# #if you dont close the file, it gets closed for you when the program dies\n\n#exercise 1\n# def sed(pattern, replace, source, dest):\n# with open(source, 'r') as f_r:\n# with open(dest, 'w') as f_w:\n# for line in f_r:\n# new_line = line.replace(pattern, replace)\n# f_w.write(new_line)\n\n# pattern = \" man \"\n# replace = \" woman \"\n# source = \"Session14/output.txt\"\n# dest = \"Session14/output2.txt\"\n# sed(pattern, replace, source, dest)\n\nimport os\ncwd = os.getcwd()\n#cwd stands for \"current working directory\"\n# print(cwd)\n\n#os.path provides other functions for working with filenames and paths\n# os.path.abspath('output.txt')\n# os.path.exists('output.txt')\n# os.path.isdir('output.txt')\n# os.path.isdir('/exercises')\n# os.path.isfile('output.txt')\n# os.listdir(cwd)\n\ndef walk(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\" \n for name in os.listdir(dirname):\n path = os.path.join(dirname, name)\n if os.path.isfile(path):\n print(path)\n else:\n walk(path)\n#os.path.join takes a directory and a file name and joins them inot a complete path\n\ndef walk2(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for root, dirs, files in os.walk(dirname):\n for filename in files:\n print(os.path.join(root, filename))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
a = 2
while a == 1:
b = source()
c = function(b)
|
normal
|
{
"blob_id": "56cae7b7a0338bd4a405cdc3cdcd9945a9df8823",
"index": 5839,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile a == 1:\n b = source()\n<mask token>\n",
"step-3": "a = 2\nwhile a == 1:\n b = source()\nc = function(b)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def cgroup_mount_option(command: Callable[..., None]) ->Callable[..., None]:
"""
Option for choosing to mount `/sys/fs/cgroup` into the container.
"""
function = click.option('--mount-sys-fs-cgroup/--no-mount-sys-fs-cgroup',
default=True, show_default=True, help=
'Mounting ``/sys/fs/cgroup`` from the host is required to run applications which require ``cgroup`` isolation. Choose to not mount ``/sys/fs/cgroup`` if it is not available on the host.'
)(command)
return function
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from typing import Callable
import click
def cgroup_mount_option(command: Callable[..., None]) ->Callable[..., None]:
"""
Option for choosing to mount `/sys/fs/cgroup` into the container.
"""
function = click.option('--mount-sys-fs-cgroup/--no-mount-sys-fs-cgroup',
default=True, show_default=True, help=
'Mounting ``/sys/fs/cgroup`` from the host is required to run applications which require ``cgroup`` isolation. Choose to not mount ``/sys/fs/cgroup`` if it is not available on the host.'
)(command)
return function
<|reserved_special_token_1|>
"""
Mount /sys/fs/cgroup Option
"""
from typing import Callable
import click
def cgroup_mount_option(command: Callable[..., None]) -> Callable[..., None]:
"""
Option for choosing to mount `/sys/fs/cgroup` into the container.
"""
function = click.option(
'--mount-sys-fs-cgroup/--no-mount-sys-fs-cgroup',
default=True,
show_default=True,
help=(
'Mounting ``/sys/fs/cgroup`` from the host is required to run '
'applications which require ``cgroup`` isolation. '
'Choose to not mount ``/sys/fs/cgroup`` if it is not available on '
'the host.'
),
)(command) # type: Callable[..., None]
return function
|
flexible
|
{
"blob_id": "237f5e2e37187e26b5628032e37d3a525ef72b9a",
"index": 7261,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef cgroup_mount_option(command: Callable[..., None]) ->Callable[..., None]:\n \"\"\"\n Option for choosing to mount `/sys/fs/cgroup` into the container.\n \"\"\"\n function = click.option('--mount-sys-fs-cgroup/--no-mount-sys-fs-cgroup',\n default=True, show_default=True, help=\n 'Mounting ``/sys/fs/cgroup`` from the host is required to run applications which require ``cgroup`` isolation. Choose to not mount ``/sys/fs/cgroup`` if it is not available on the host.'\n )(command)\n return function\n",
"step-3": "<mask token>\nfrom typing import Callable\nimport click\n\n\ndef cgroup_mount_option(command: Callable[..., None]) ->Callable[..., None]:\n \"\"\"\n Option for choosing to mount `/sys/fs/cgroup` into the container.\n \"\"\"\n function = click.option('--mount-sys-fs-cgroup/--no-mount-sys-fs-cgroup',\n default=True, show_default=True, help=\n 'Mounting ``/sys/fs/cgroup`` from the host is required to run applications which require ``cgroup`` isolation. Choose to not mount ``/sys/fs/cgroup`` if it is not available on the host.'\n )(command)\n return function\n",
"step-4": "\"\"\"\nMount /sys/fs/cgroup Option\n\"\"\"\n\nfrom typing import Callable\n\nimport click\n\n\ndef cgroup_mount_option(command: Callable[..., None]) -> Callable[..., None]:\n \"\"\"\n Option for choosing to mount `/sys/fs/cgroup` into the container.\n \"\"\"\n function = click.option(\n '--mount-sys-fs-cgroup/--no-mount-sys-fs-cgroup',\n default=True,\n show_default=True,\n help=(\n 'Mounting ``/sys/fs/cgroup`` from the host is required to run '\n 'applications which require ``cgroup`` isolation. '\n 'Choose to not mount ``/sys/fs/cgroup`` if it is not available on '\n 'the host.'\n ),\n )(command) # type: Callable[..., None]\n return function\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
####################################################################
# a COM client coded in Python: talk to MS-Word via its COM object
# model; uses either dynamic dispatch (run-time lookup/binding),
# or the static and faster type-library dispatch if makepy.py has
# been run; install the windows win32all extensions package to use
# this interface; Word runs hidden unless Visible is set to 1 (and
# Visible lets you watch, but impacts interactive Word sessions);
####################################################################
from sys import argv
docdir = 'C:\\temp\\'
if len(argv) == 2: docdir = argv[1] # ex: comclient.py a:\
from win32com.client import Dispatch # early or late binding
word = Dispatch('Word.Application') # connect/start word
word.Visible = 1 # else word runs hidden
# create and save new doc file
newdoc = word.Documents.Add() # call word methods
spot = newdoc.Range(0,0)
spot.InsertBefore('Hello COM client world!') # insert some text
newdoc.SaveAs(docdir + 'pycom.doc') # save in doc file
newdoc.SaveAs(docdir + 'copy.doc')
newdoc.Close()
# open and change a doc file
olddoc = word.Documents.Open(docdir + 'copy.doc')
finder = word.Selection.Find
finder.text = 'COM'
finder.Execute()
word.Selection.TypeText('Automation')
olddoc.Close()
# and so on: see Word's COM interface specs
|
normal
|
{
"blob_id": "df19aa720993c2385a6d025cf7ec8f3935ee4191",
"index": 9343,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(argv) == 2:\n docdir = argv[1]\n<mask token>\nspot.InsertBefore('Hello COM client world!')\nnewdoc.SaveAs(docdir + 'pycom.doc')\nnewdoc.SaveAs(docdir + 'copy.doc')\nnewdoc.Close()\n<mask token>\nfinder.Execute()\nword.Selection.TypeText('Automation')\nolddoc.Close()\n",
"step-3": "<mask token>\ndocdir = 'C:\\\\temp\\\\'\nif len(argv) == 2:\n docdir = argv[1]\n<mask token>\nword = Dispatch('Word.Application')\nword.Visible = 1\nnewdoc = word.Documents.Add()\nspot = newdoc.Range(0, 0)\nspot.InsertBefore('Hello COM client world!')\nnewdoc.SaveAs(docdir + 'pycom.doc')\nnewdoc.SaveAs(docdir + 'copy.doc')\nnewdoc.Close()\nolddoc = word.Documents.Open(docdir + 'copy.doc')\nfinder = word.Selection.Find\nfinder.text = 'COM'\nfinder.Execute()\nword.Selection.TypeText('Automation')\nolddoc.Close()\n",
"step-4": "from sys import argv\ndocdir = 'C:\\\\temp\\\\'\nif len(argv) == 2:\n docdir = argv[1]\nfrom win32com.client import Dispatch\nword = Dispatch('Word.Application')\nword.Visible = 1\nnewdoc = word.Documents.Add()\nspot = newdoc.Range(0, 0)\nspot.InsertBefore('Hello COM client world!')\nnewdoc.SaveAs(docdir + 'pycom.doc')\nnewdoc.SaveAs(docdir + 'copy.doc')\nnewdoc.Close()\nolddoc = word.Documents.Open(docdir + 'copy.doc')\nfinder = word.Selection.Find\nfinder.text = 'COM'\nfinder.Execute()\nword.Selection.TypeText('Automation')\nolddoc.Close()\n",
"step-5": "####################################################################\n# a COM client coded in Python: talk to MS-Word via its COM object\n# model; uses either dynamic dispatch (run-time lookup/binding), \n# or the static and faster type-library dispatch if makepy.py has \n# been run; install the windows win32all extensions package to use \n# this interface; Word runs hidden unless Visible is set to 1 (and\n# Visible lets you watch, but impacts interactive Word sessions);\n####################################################################\n\nfrom sys import argv\ndocdir = 'C:\\\\temp\\\\'\nif len(argv) == 2: docdir = argv[1] # ex: comclient.py a:\\\n\nfrom win32com.client import Dispatch # early or late binding\nword = Dispatch('Word.Application') # connect/start word\nword.Visible = 1 # else word runs hidden\n\n# create and save new doc file\nnewdoc = word.Documents.Add() # call word methods\nspot = newdoc.Range(0,0)\nspot.InsertBefore('Hello COM client world!') # insert some text\nnewdoc.SaveAs(docdir + 'pycom.doc') # save in doc file\nnewdoc.SaveAs(docdir + 'copy.doc') \nnewdoc.Close()\n\n# open and change a doc file\nolddoc = word.Documents.Open(docdir + 'copy.doc')\nfinder = word.Selection.Find\nfinder.text = 'COM'\nfinder.Execute()\nword.Selection.TypeText('Automation')\nolddoc.Close()\n\n# and so on: see Word's COM interface specs\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
import sys, re
window = 2
for line in sys.stdin:
line = line.strip()
twits = line.split()
i = 0
while i <len(twits):
j = 0
while j <len(twits):
if i!= j:
print("%s%s\t%d" % (twits[i]+' ', twits[j], 1))
j+=1
i+=1
|
normal
|
{
"blob_id": "e884825325ceb401142cab0618d9d4e70e475cf5",
"index": 893,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in sys.stdin:\n line = line.strip()\n twits = line.split()\n i = 0\n while i < len(twits):\n j = 0\n while j < len(twits):\n if i != j:\n print('%s%s\\t%d' % (twits[i] + ' ', twits[j], 1))\n j += 1\n i += 1\n",
"step-3": "<mask token>\nwindow = 2\nfor line in sys.stdin:\n line = line.strip()\n twits = line.split()\n i = 0\n while i < len(twits):\n j = 0\n while j < len(twits):\n if i != j:\n print('%s%s\\t%d' % (twits[i] + ' ', twits[j], 1))\n j += 1\n i += 1\n",
"step-4": "import sys, re\nwindow = 2\nfor line in sys.stdin:\n line = line.strip()\n twits = line.split()\n i = 0\n while i < len(twits):\n j = 0\n while j < len(twits):\n if i != j:\n print('%s%s\\t%d' % (twits[i] + ' ', twits[j], 1))\n j += 1\n i += 1\n",
"step-5": "#!/usr/bin/env python\n\nimport sys, re\n\nwindow = 2\n\nfor line in sys.stdin:\n line = line.strip()\n twits = line.split()\n i = 0\n while i <len(twits):\n j = 0\n while j <len(twits):\n if i!= j:\n print(\"%s%s\\t%d\" % (twits[i]+' ', twits[j], 1))\n j+=1\n i+=1",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class ModelEvaluator(Evaluator):
def __init__(self, dataset: Dataset, batch_size: int, num_workers: int,
mixed_precision: bool=True):
self.dataset = dataset
self.mixed_precision = mixed_precision
self.loader = DataLoader(dataset, batch_size, shuffle=False,
num_workers=num_workers, drop_last=False)
@property
def num_batches(self):
return len(self.loader)
def evaluate(self, model: Classifier, device: Optional[Union[torch.
device, str]]=None) ->Evaluator.Result:
return expand_generator(self.evaluate_iter(model, device),
return_only=True)
def evaluate_iter(self, model: Classifier, device: Optional[Union[torch
.device, str]]=None) ->Generator[dict, None, Evaluator.Result]:
with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled
=self.mixed_precision):
mean_accuracy = 0.0
mean_log_loss = 0.0
for i, (x, y) in enumerate(self.loader):
x = x.to(device)
y = y.to(device)
logits = model(x)
correct = torch.sum(logits.argmax(-1) == y).item()
log_loss = F.cross_entropy(logits, y, reduction='sum').item()
mean_accuracy += correct / len(self.dataset)
mean_log_loss += log_loss / len(self.dataset)
yield dict(batch=i)
return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Evaluator(object):
class Result(NamedTuple):
accuracy: float
log_loss: float
<|reserved_special_token_0|>
class ModelEvaluator(Evaluator):
def __init__(self, dataset: Dataset, batch_size: int, num_workers: int,
mixed_precision: bool=True):
self.dataset = dataset
self.mixed_precision = mixed_precision
self.loader = DataLoader(dataset, batch_size, shuffle=False,
num_workers=num_workers, drop_last=False)
@property
def num_batches(self):
return len(self.loader)
def evaluate(self, model: Classifier, device: Optional[Union[torch.
device, str]]=None) ->Evaluator.Result:
return expand_generator(self.evaluate_iter(model, device),
return_only=True)
def evaluate_iter(self, model: Classifier, device: Optional[Union[torch
.device, str]]=None) ->Generator[dict, None, Evaluator.Result]:
with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled
=self.mixed_precision):
mean_accuracy = 0.0
mean_log_loss = 0.0
for i, (x, y) in enumerate(self.loader):
x = x.to(device)
y = y.to(device)
logits = model(x)
correct = torch.sum(logits.argmax(-1) == y).item()
log_loss = F.cross_entropy(logits, y, reduction='sum').item()
mean_accuracy += correct / len(self.dataset)
mean_log_loss += log_loss / len(self.dataset)
yield dict(batch=i)
return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Evaluator(object):
class Result(NamedTuple):
accuracy: float
log_loss: float
def evaluate(self, *args, **kwargs):
return NotImplemented
class ModelEvaluator(Evaluator):
def __init__(self, dataset: Dataset, batch_size: int, num_workers: int,
mixed_precision: bool=True):
self.dataset = dataset
self.mixed_precision = mixed_precision
self.loader = DataLoader(dataset, batch_size, shuffle=False,
num_workers=num_workers, drop_last=False)
@property
def num_batches(self):
return len(self.loader)
def evaluate(self, model: Classifier, device: Optional[Union[torch.
device, str]]=None) ->Evaluator.Result:
return expand_generator(self.evaluate_iter(model, device),
return_only=True)
def evaluate_iter(self, model: Classifier, device: Optional[Union[torch
.device, str]]=None) ->Generator[dict, None, Evaluator.Result]:
with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled
=self.mixed_precision):
mean_accuracy = 0.0
mean_log_loss = 0.0
for i, (x, y) in enumerate(self.loader):
x = x.to(device)
y = y.to(device)
logits = model(x)
correct = torch.sum(logits.argmax(-1) == y).item()
log_loss = F.cross_entropy(logits, y, reduction='sum').item()
mean_accuracy += correct / len(self.dataset)
mean_log_loss += log_loss / len(self.dataset)
yield dict(batch=i)
return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss)
<|reserved_special_token_1|>
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from modules import Classifier
from typing import Generator, NamedTuple, Optional, Union
from utils import expand_generator
class Evaluator(object):
class Result(NamedTuple):
accuracy: float
log_loss: float
def evaluate(self, *args, **kwargs):
return NotImplemented
class ModelEvaluator(Evaluator):
def __init__(self, dataset: Dataset, batch_size: int, num_workers: int,
mixed_precision: bool=True):
self.dataset = dataset
self.mixed_precision = mixed_precision
self.loader = DataLoader(dataset, batch_size, shuffle=False,
num_workers=num_workers, drop_last=False)
@property
def num_batches(self):
return len(self.loader)
def evaluate(self, model: Classifier, device: Optional[Union[torch.
device, str]]=None) ->Evaluator.Result:
return expand_generator(self.evaluate_iter(model, device),
return_only=True)
def evaluate_iter(self, model: Classifier, device: Optional[Union[torch
.device, str]]=None) ->Generator[dict, None, Evaluator.Result]:
with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled
=self.mixed_precision):
mean_accuracy = 0.0
mean_log_loss = 0.0
for i, (x, y) in enumerate(self.loader):
x = x.to(device)
y = y.to(device)
logits = model(x)
correct = torch.sum(logits.argmax(-1) == y).item()
log_loss = F.cross_entropy(logits, y, reduction='sum').item()
mean_accuracy += correct / len(self.dataset)
mean_log_loss += log_loss / len(self.dataset)
yield dict(batch=i)
return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss)
<|reserved_special_token_1|>
#
# Wrappers for model evaluation
#
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from modules import Classifier
from typing import Generator, NamedTuple, Optional, Union
from utils import expand_generator
class Evaluator(object):
class Result(NamedTuple):
accuracy: float
log_loss: float
def evaluate(self, *args, **kwargs):
return NotImplemented
class ModelEvaluator(Evaluator):
def __init__(self, dataset: Dataset, batch_size: int, num_workers: int, mixed_precision: bool = True):
self.dataset = dataset
self.mixed_precision = mixed_precision
self.loader = DataLoader(dataset, batch_size, shuffle=False, num_workers=num_workers, drop_last=False)
@property
def num_batches(self):
return len(self.loader)
def evaluate(self, model: Classifier, device: Optional[Union[torch.device, str]] = None) -> Evaluator.Result:
return expand_generator(self.evaluate_iter(model, device), return_only=True)
def evaluate_iter(
self,
model: Classifier,
device: Optional[Union[torch.device, str]] = None) -> Generator[dict, None, Evaluator.Result]:
with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled=self.mixed_precision):
mean_accuracy = 0.
mean_log_loss = 0.
for i, (x, y) in enumerate(self.loader):
x = x.to(device)
y = y.to(device)
logits = model(x)
correct = torch.sum(logits.argmax(-1) == y).item()
log_loss = F.cross_entropy(logits, y, reduction='sum').item()
mean_accuracy += correct / len(self.dataset)
mean_log_loss += log_loss / len(self.dataset)
yield dict(batch=i)
return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss)
|
flexible
|
{
"blob_id": "493dbf85069f2115896a5f5f5d593c8d95b85cff",
"index": 4594,
"step-1": "<mask token>\n\n\nclass ModelEvaluator(Evaluator):\n\n def __init__(self, dataset: Dataset, batch_size: int, num_workers: int,\n mixed_precision: bool=True):\n self.dataset = dataset\n self.mixed_precision = mixed_precision\n self.loader = DataLoader(dataset, batch_size, shuffle=False,\n num_workers=num_workers, drop_last=False)\n\n @property\n def num_batches(self):\n return len(self.loader)\n\n def evaluate(self, model: Classifier, device: Optional[Union[torch.\n device, str]]=None) ->Evaluator.Result:\n return expand_generator(self.evaluate_iter(model, device),\n return_only=True)\n\n def evaluate_iter(self, model: Classifier, device: Optional[Union[torch\n .device, str]]=None) ->Generator[dict, None, Evaluator.Result]:\n with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled\n =self.mixed_precision):\n mean_accuracy = 0.0\n mean_log_loss = 0.0\n for i, (x, y) in enumerate(self.loader):\n x = x.to(device)\n y = y.to(device)\n logits = model(x)\n correct = torch.sum(logits.argmax(-1) == y).item()\n log_loss = F.cross_entropy(logits, y, reduction='sum').item()\n mean_accuracy += correct / len(self.dataset)\n mean_log_loss += log_loss / len(self.dataset)\n yield dict(batch=i)\n return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss)\n",
"step-2": "<mask token>\n\n\nclass Evaluator(object):\n\n\n class Result(NamedTuple):\n accuracy: float\n log_loss: float\n <mask token>\n\n\nclass ModelEvaluator(Evaluator):\n\n def __init__(self, dataset: Dataset, batch_size: int, num_workers: int,\n mixed_precision: bool=True):\n self.dataset = dataset\n self.mixed_precision = mixed_precision\n self.loader = DataLoader(dataset, batch_size, shuffle=False,\n num_workers=num_workers, drop_last=False)\n\n @property\n def num_batches(self):\n return len(self.loader)\n\n def evaluate(self, model: Classifier, device: Optional[Union[torch.\n device, str]]=None) ->Evaluator.Result:\n return expand_generator(self.evaluate_iter(model, device),\n return_only=True)\n\n def evaluate_iter(self, model: Classifier, device: Optional[Union[torch\n .device, str]]=None) ->Generator[dict, None, Evaluator.Result]:\n with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled\n =self.mixed_precision):\n mean_accuracy = 0.0\n mean_log_loss = 0.0\n for i, (x, y) in enumerate(self.loader):\n x = x.to(device)\n y = y.to(device)\n logits = model(x)\n correct = torch.sum(logits.argmax(-1) == y).item()\n log_loss = F.cross_entropy(logits, y, reduction='sum').item()\n mean_accuracy += correct / len(self.dataset)\n mean_log_loss += log_loss / len(self.dataset)\n yield dict(batch=i)\n return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss)\n",
"step-3": "<mask token>\n\n\nclass Evaluator(object):\n\n\n class Result(NamedTuple):\n accuracy: float\n log_loss: float\n\n def evaluate(self, *args, **kwargs):\n return NotImplemented\n\n\nclass ModelEvaluator(Evaluator):\n\n def __init__(self, dataset: Dataset, batch_size: int, num_workers: int,\n mixed_precision: bool=True):\n self.dataset = dataset\n self.mixed_precision = mixed_precision\n self.loader = DataLoader(dataset, batch_size, shuffle=False,\n num_workers=num_workers, drop_last=False)\n\n @property\n def num_batches(self):\n return len(self.loader)\n\n def evaluate(self, model: Classifier, device: Optional[Union[torch.\n device, str]]=None) ->Evaluator.Result:\n return expand_generator(self.evaluate_iter(model, device),\n return_only=True)\n\n def evaluate_iter(self, model: Classifier, device: Optional[Union[torch\n .device, str]]=None) ->Generator[dict, None, Evaluator.Result]:\n with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled\n =self.mixed_precision):\n mean_accuracy = 0.0\n mean_log_loss = 0.0\n for i, (x, y) in enumerate(self.loader):\n x = x.to(device)\n y = y.to(device)\n logits = model(x)\n correct = torch.sum(logits.argmax(-1) == y).item()\n log_loss = F.cross_entropy(logits, y, reduction='sum').item()\n mean_accuracy += correct / len(self.dataset)\n mean_log_loss += log_loss / len(self.dataset)\n yield dict(batch=i)\n return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss)\n",
"step-4": "import torch\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nfrom modules import Classifier\nfrom typing import Generator, NamedTuple, Optional, Union\nfrom utils import expand_generator\n\n\nclass Evaluator(object):\n\n\n class Result(NamedTuple):\n accuracy: float\n log_loss: float\n\n def evaluate(self, *args, **kwargs):\n return NotImplemented\n\n\nclass ModelEvaluator(Evaluator):\n\n def __init__(self, dataset: Dataset, batch_size: int, num_workers: int,\n mixed_precision: bool=True):\n self.dataset = dataset\n self.mixed_precision = mixed_precision\n self.loader = DataLoader(dataset, batch_size, shuffle=False,\n num_workers=num_workers, drop_last=False)\n\n @property\n def num_batches(self):\n return len(self.loader)\n\n def evaluate(self, model: Classifier, device: Optional[Union[torch.\n device, str]]=None) ->Evaluator.Result:\n return expand_generator(self.evaluate_iter(model, device),\n return_only=True)\n\n def evaluate_iter(self, model: Classifier, device: Optional[Union[torch\n .device, str]]=None) ->Generator[dict, None, Evaluator.Result]:\n with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled\n =self.mixed_precision):\n mean_accuracy = 0.0\n mean_log_loss = 0.0\n for i, (x, y) in enumerate(self.loader):\n x = x.to(device)\n y = y.to(device)\n logits = model(x)\n correct = torch.sum(logits.argmax(-1) == y).item()\n log_loss = F.cross_entropy(logits, y, reduction='sum').item()\n mean_accuracy += correct / len(self.dataset)\n mean_log_loss += log_loss / len(self.dataset)\n yield dict(batch=i)\n return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss)\n",
"step-5": "#\n# Wrappers for model evaluation\n#\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nfrom modules import Classifier\nfrom typing import Generator, NamedTuple, Optional, Union\nfrom utils import expand_generator\n\n\nclass Evaluator(object):\n class Result(NamedTuple):\n accuracy: float\n log_loss: float\n\n def evaluate(self, *args, **kwargs):\n return NotImplemented\n\n\nclass ModelEvaluator(Evaluator):\n def __init__(self, dataset: Dataset, batch_size: int, num_workers: int, mixed_precision: bool = True):\n self.dataset = dataset\n self.mixed_precision = mixed_precision\n self.loader = DataLoader(dataset, batch_size, shuffle=False, num_workers=num_workers, drop_last=False)\n\n @property\n def num_batches(self):\n return len(self.loader)\n\n def evaluate(self, model: Classifier, device: Optional[Union[torch.device, str]] = None) -> Evaluator.Result:\n return expand_generator(self.evaluate_iter(model, device), return_only=True)\n\n def evaluate_iter(\n self,\n model: Classifier,\n device: Optional[Union[torch.device, str]] = None) -> Generator[dict, None, Evaluator.Result]:\n with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled=self.mixed_precision):\n mean_accuracy = 0.\n mean_log_loss = 0.\n for i, (x, y) in enumerate(self.loader):\n x = x.to(device)\n y = y.to(device)\n logits = model(x)\n correct = torch.sum(logits.argmax(-1) == y).item()\n log_loss = F.cross_entropy(logits, y, reduction='sum').item()\n mean_accuracy += correct / len(self.dataset)\n mean_log_loss += log_loss / len(self.dataset)\n yield dict(batch=i)\n return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss)\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
# This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.
import datetime
import scrapy
from ScrapyProject.items import ScrapyItem
class ThalesSpider(scrapy.Spider):
#item_id = ScrapyItem()
name = 'thales'
allowed_domains = ['https://www.thalesgroup.com']
start_urls = [('https://www.thalesgroup.com/fr/search-everything/all/propulsion?page=%d' %i ) for i in range(0,30)]
def parse(self, response):
# iterate entries
for entry in response.css('div.big__list__item__info'):
#retrieve info for our current post
item = ScrapyItem()
item['source'] = 'thales'
item['date'] = 'NotAvalaible'
item['brief'] = entry.css('div.field__item even::text').extract_first()
item['url'] = entry.css('a::attr(href)').extract_first()
item['title'] = entry.css('a::text').extract_first()
# check time
now = datetime.datetime.now()
item['tstamp'] = now
print(item)
yield item
|
normal
|
{
"blob_id": "fd1b871c5cf79874acf8d5c4f1f73f7a381e23f7",
"index": 8278,
"step-1": "<mask token>\n\n\nclass ThalesSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ThalesSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self, response):\n for entry in response.css('div.big__list__item__info'):\n item = ScrapyItem()\n item['source'] = 'thales'\n item['date'] = 'NotAvalaible'\n item['brief'] = entry.css('div.field__item even::text'\n ).extract_first()\n item['url'] = entry.css('a::attr(href)').extract_first()\n item['title'] = entry.css('a::text').extract_first()\n now = datetime.datetime.now()\n item['tstamp'] = now\n print(item)\n yield item\n",
"step-3": "<mask token>\n\n\nclass ThalesSpider(scrapy.Spider):\n name = 'thales'\n allowed_domains = ['https://www.thalesgroup.com']\n start_urls = [(\n 'https://www.thalesgroup.com/fr/search-everything/all/propulsion?page=%d'\n % i) for i in range(0, 30)]\n\n def parse(self, response):\n for entry in response.css('div.big__list__item__info'):\n item = ScrapyItem()\n item['source'] = 'thales'\n item['date'] = 'NotAvalaible'\n item['brief'] = entry.css('div.field__item even::text'\n ).extract_first()\n item['url'] = entry.css('a::attr(href)').extract_first()\n item['title'] = entry.css('a::text').extract_first()\n now = datetime.datetime.now()\n item['tstamp'] = now\n print(item)\n yield item\n",
"step-4": "import datetime\nimport scrapy\nfrom ScrapyProject.items import ScrapyItem\n\n\nclass ThalesSpider(scrapy.Spider):\n name = 'thales'\n allowed_domains = ['https://www.thalesgroup.com']\n start_urls = [(\n 'https://www.thalesgroup.com/fr/search-everything/all/propulsion?page=%d'\n % i) for i in range(0, 30)]\n\n def parse(self, response):\n for entry in response.css('div.big__list__item__info'):\n item = ScrapyItem()\n item['source'] = 'thales'\n item['date'] = 'NotAvalaible'\n item['brief'] = entry.css('div.field__item even::text'\n ).extract_first()\n item['url'] = entry.css('a::attr(href)').extract_first()\n item['title'] = entry.css('a::text').extract_first()\n now = datetime.datetime.now()\n item['tstamp'] = now\n print(item)\n yield item\n",
"step-5": "# This package will contain the spiders of your Scrapy project\n#\n# Please refer to the documentation for information on how to create and manage\n# your spiders.\n\nimport datetime\nimport scrapy\nfrom ScrapyProject.items import ScrapyItem\n\nclass ThalesSpider(scrapy.Spider):\n\t#item_id = ScrapyItem()\n\tname = 'thales'\n\n\n\tallowed_domains = ['https://www.thalesgroup.com']\n\n\tstart_urls = [('https://www.thalesgroup.com/fr/search-everything/all/propulsion?page=%d' %i ) for i in range(0,30)]\n\n\tdef parse(self, response):\n # iterate entries\n\n\n\t\tfor entry in response.css('div.big__list__item__info'):\n\n #retrieve info for our current post\n\t\t\titem = ScrapyItem()\n\n\t\t\titem['source'] = 'thales'\n\t\t\titem['date'] = 'NotAvalaible'\n\t\t\titem['brief'] = entry.css('div.field__item even::text').extract_first()\n\t\t\titem['url'] = entry.css('a::attr(href)').extract_first()\n\t\t\titem['title'] = entry.css('a::text').extract_first()\n\n\t\t\t# check time\n\t\t\tnow = datetime.datetime.now()\n\t\t\titem['tstamp'] = now\n\n\t\t\tprint(item)\n\n\t\t\tyield item\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import os
import shutil
def flatCopyWithExt(srcDir, dstDir, ext):
if not os.path.exists(dstDir):
os.makedirs(dstDir)
for basename in os.listdir(srcDir):
if basename.endswith(ext):
pathname = os.path.join(srcDir, basename)
if os.path.isfile(pathname):
shutil.copy2(pathname, dstDir)
def moveSDLIncludes():
flatCopyWithExt("./ext/SDL2/core/code/include/", "./ext/SDL2/core/include/", ".h")
flatCopyWithExt("./ext/SDL2/SDL2-image/code/", "./ext/SDL2/SDL2-image/include/", ".h")
flatCopyWithExt("./ext/SDL2/SDL2-mixer/code/", "./ext/SDL2/SDL2-mixer/include/", ".h")
flatCopyWithExt("./ext/SDL2/SDL2-ttf/code/", "./ext/SDL2/SDL2-ttf/include/", ".h")
|
normal
|
{
"blob_id": "649c0c0f170b50fe51f5eaf11908e968f66625c9",
"index": 5925,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef moveSDLIncludes():\n flatCopyWithExt('./ext/SDL2/core/code/include/',\n './ext/SDL2/core/include/', '.h')\n flatCopyWithExt('./ext/SDL2/SDL2-image/code/',\n './ext/SDL2/SDL2-image/include/', '.h')\n flatCopyWithExt('./ext/SDL2/SDL2-mixer/code/',\n './ext/SDL2/SDL2-mixer/include/', '.h')\n flatCopyWithExt('./ext/SDL2/SDL2-ttf/code/',\n './ext/SDL2/SDL2-ttf/include/', '.h')\n",
"step-3": "<mask token>\n\n\ndef flatCopyWithExt(srcDir, dstDir, ext):\n if not os.path.exists(dstDir):\n os.makedirs(dstDir)\n for basename in os.listdir(srcDir):\n if basename.endswith(ext):\n pathname = os.path.join(srcDir, basename)\n if os.path.isfile(pathname):\n shutil.copy2(pathname, dstDir)\n\n\ndef moveSDLIncludes():\n flatCopyWithExt('./ext/SDL2/core/code/include/',\n './ext/SDL2/core/include/', '.h')\n flatCopyWithExt('./ext/SDL2/SDL2-image/code/',\n './ext/SDL2/SDL2-image/include/', '.h')\n flatCopyWithExt('./ext/SDL2/SDL2-mixer/code/',\n './ext/SDL2/SDL2-mixer/include/', '.h')\n flatCopyWithExt('./ext/SDL2/SDL2-ttf/code/',\n './ext/SDL2/SDL2-ttf/include/', '.h')\n",
"step-4": "import os\nimport shutil\n\n\ndef flatCopyWithExt(srcDir, dstDir, ext):\n if not os.path.exists(dstDir):\n os.makedirs(dstDir)\n for basename in os.listdir(srcDir):\n if basename.endswith(ext):\n pathname = os.path.join(srcDir, basename)\n if os.path.isfile(pathname):\n shutil.copy2(pathname, dstDir)\n\n\ndef moveSDLIncludes():\n flatCopyWithExt('./ext/SDL2/core/code/include/',\n './ext/SDL2/core/include/', '.h')\n flatCopyWithExt('./ext/SDL2/SDL2-image/code/',\n './ext/SDL2/SDL2-image/include/', '.h')\n flatCopyWithExt('./ext/SDL2/SDL2-mixer/code/',\n './ext/SDL2/SDL2-mixer/include/', '.h')\n flatCopyWithExt('./ext/SDL2/SDL2-ttf/code/',\n './ext/SDL2/SDL2-ttf/include/', '.h')\n",
"step-5": "import os\r\nimport shutil\r\n\r\ndef flatCopyWithExt(srcDir, dstDir, ext):\r\n\tif not os.path.exists(dstDir):\r\n\t\tos.makedirs(dstDir)\r\n\tfor basename in os.listdir(srcDir):\r\n\t\tif basename.endswith(ext):\r\n\t\t\tpathname = os.path.join(srcDir, basename)\r\n\t\t\tif os.path.isfile(pathname):\r\n\t\t\t\tshutil.copy2(pathname, dstDir)\r\n\r\ndef moveSDLIncludes():\r\n\tflatCopyWithExt(\"./ext/SDL2/core/code/include/\", \"./ext/SDL2/core/include/\", \".h\")\r\n\tflatCopyWithExt(\"./ext/SDL2/SDL2-image/code/\", \"./ext/SDL2/SDL2-image/include/\", \".h\")\r\n\tflatCopyWithExt(\"./ext/SDL2/SDL2-mixer/code/\", \"./ext/SDL2/SDL2-mixer/include/\", \".h\")\r\n\tflatCopyWithExt(\"./ext/SDL2/SDL2-ttf/code/\", \"./ext/SDL2/SDL2-ttf/include/\", \".h\")\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Contact(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __unicode__(self):
return self.name
class Tag(models.Model):
contact = models.ForeignKey(Contact)
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Test(models.Model):
<|reserved_special_token_0|>
def __unicode__(self):
return self.name
class Contact(models.Model):
GENDER_TYPES = ('M', u'男'), ('F', u'女'), ('X', u'不告诉你')
name = models.CharField(u'姓名', max_length=20)
age = models.IntegerField(u'年龄', default=0)
gender = models.CharField(u'性别', max_length=1, null=False, blank=False,
choices=GENDER_TYPES, default='X')
email = models.EmailField()
tele = models.CharField(u'电话', max_length=20)
address = models.CharField(u'地址', max_length=200)
postcode = models.CharField(u'邮政编码', max_length=6)
notes = models.CharField(u'备注', max_length=200)
def __unicode__(self):
return self.name
class Tag(models.Model):
contact = models.ForeignKey(Contact)
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Test(models.Model):
name = models.CharField(max_length=20)
def __unicode__(self):
return self.name
class Contact(models.Model):
GENDER_TYPES = ('M', u'男'), ('F', u'女'), ('X', u'不告诉你')
name = models.CharField(u'姓名', max_length=20)
age = models.IntegerField(u'年龄', default=0)
gender = models.CharField(u'性别', max_length=1, null=False, blank=False,
choices=GENDER_TYPES, default='X')
email = models.EmailField()
tele = models.CharField(u'电话', max_length=20)
address = models.CharField(u'地址', max_length=200)
postcode = models.CharField(u'邮政编码', max_length=6)
notes = models.CharField(u'备注', max_length=200)
def __unicode__(self):
return self.name
class Tag(models.Model):
contact = models.ForeignKey(Contact)
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
<|reserved_special_token_1|>
from django.db import models
class Test(models.Model):
name = models.CharField(max_length=20)
def __unicode__(self):
return self.name
class Contact(models.Model):
GENDER_TYPES = ('M', u'男'), ('F', u'女'), ('X', u'不告诉你')
name = models.CharField(u'姓名', max_length=20)
age = models.IntegerField(u'年龄', default=0)
gender = models.CharField(u'性别', max_length=1, null=False, blank=False,
choices=GENDER_TYPES, default='X')
email = models.EmailField()
tele = models.CharField(u'电话', max_length=20)
address = models.CharField(u'地址', max_length=200)
postcode = models.CharField(u'邮政编码', max_length=6)
notes = models.CharField(u'备注', max_length=200)
def __unicode__(self):
return self.name
class Tag(models.Model):
contact = models.ForeignKey(Contact)
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from django.db import models
# Create your models here.
class Test(models.Model):
name = models.CharField(max_length=20)
def __unicode__(self):
return self.name
class Contact(models.Model):
GENDER_TYPES = (
('M', u'男'),
('F', u'女'),
('X', u'不告诉你'),
)
name = models.CharField(u'姓名', max_length=20)
age = models.IntegerField(u'年龄', default=0)
gender = models.CharField(u'性别', max_length=1, null=False, blank=False, choices=GENDER_TYPES, default='X')
email = models.EmailField()
tele = models.CharField(u'电话', max_length=20)
address = models.CharField(u'地址', max_length=200)
postcode = models.CharField(u'邮政编码', max_length=6)
notes = models.CharField(u'备注', max_length=200)
def __unicode__(self):
return self.name
class Tag(models.Model):
contact = models.ForeignKey(Contact)
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
|
flexible
|
{
"blob_id": "514a3fc312d36e6f9b601ede7f7a3940c138d39a",
"index": 2000,
"step-1": "<mask token>\n\n\nclass Contact(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __unicode__(self):\n return self.name\n\n\nclass Tag(models.Model):\n contact = models.ForeignKey(Contact)\n name = models.CharField(max_length=50)\n\n def __unicode__(self):\n return self.name\n",
"step-2": "<mask token>\n\n\nclass Test(models.Model):\n <mask token>\n\n def __unicode__(self):\n return self.name\n\n\nclass Contact(models.Model):\n GENDER_TYPES = ('M', u'男'), ('F', u'女'), ('X', u'不告诉你')\n name = models.CharField(u'姓名', max_length=20)\n age = models.IntegerField(u'年龄', default=0)\n gender = models.CharField(u'性别', max_length=1, null=False, blank=False,\n choices=GENDER_TYPES, default='X')\n email = models.EmailField()\n tele = models.CharField(u'电话', max_length=20)\n address = models.CharField(u'地址', max_length=200)\n postcode = models.CharField(u'邮政编码', max_length=6)\n notes = models.CharField(u'备注', max_length=200)\n\n def __unicode__(self):\n return self.name\n\n\nclass Tag(models.Model):\n contact = models.ForeignKey(Contact)\n name = models.CharField(max_length=50)\n\n def __unicode__(self):\n return self.name\n",
"step-3": "<mask token>\n\n\nclass Test(models.Model):\n name = models.CharField(max_length=20)\n\n def __unicode__(self):\n return self.name\n\n\nclass Contact(models.Model):\n GENDER_TYPES = ('M', u'男'), ('F', u'女'), ('X', u'不告诉你')\n name = models.CharField(u'姓名', max_length=20)\n age = models.IntegerField(u'年龄', default=0)\n gender = models.CharField(u'性别', max_length=1, null=False, blank=False,\n choices=GENDER_TYPES, default='X')\n email = models.EmailField()\n tele = models.CharField(u'电话', max_length=20)\n address = models.CharField(u'地址', max_length=200)\n postcode = models.CharField(u'邮政编码', max_length=6)\n notes = models.CharField(u'备注', max_length=200)\n\n def __unicode__(self):\n return self.name\n\n\nclass Tag(models.Model):\n contact = models.ForeignKey(Contact)\n name = models.CharField(max_length=50)\n\n def __unicode__(self):\n return self.name\n",
"step-4": "from django.db import models\n\n\nclass Test(models.Model):\n name = models.CharField(max_length=20)\n\n def __unicode__(self):\n return self.name\n\n\nclass Contact(models.Model):\n GENDER_TYPES = ('M', u'男'), ('F', u'女'), ('X', u'不告诉你')\n name = models.CharField(u'姓名', max_length=20)\n age = models.IntegerField(u'年龄', default=0)\n gender = models.CharField(u'性别', max_length=1, null=False, blank=False,\n choices=GENDER_TYPES, default='X')\n email = models.EmailField()\n tele = models.CharField(u'电话', max_length=20)\n address = models.CharField(u'地址', max_length=200)\n postcode = models.CharField(u'邮政编码', max_length=6)\n notes = models.CharField(u'备注', max_length=200)\n\n def __unicode__(self):\n return self.name\n\n\nclass Tag(models.Model):\n contact = models.ForeignKey(Contact)\n name = models.CharField(max_length=50)\n\n def __unicode__(self):\n return self.name\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom django.db import models\n\n\n# Create your models here.\n\n\nclass Test(models.Model):\n name = models.CharField(max_length=20)\n\n def __unicode__(self):\n return self.name\n\n\nclass Contact(models.Model):\n GENDER_TYPES = (\n ('M', u'男'),\n ('F', u'女'),\n ('X', u'不告诉你'),\n )\n name = models.CharField(u'姓名', max_length=20)\n age = models.IntegerField(u'年龄', default=0)\n gender = models.CharField(u'性别', max_length=1, null=False, blank=False, choices=GENDER_TYPES, default='X')\n email = models.EmailField()\n tele = models.CharField(u'电话', max_length=20)\n address = models.CharField(u'地址', max_length=200)\n postcode = models.CharField(u'邮政编码', max_length=6)\n notes = models.CharField(u'备注', max_length=200)\n\n def __unicode__(self):\n return self.name\n\n\nclass Tag(models.Model):\n contact = models.ForeignKey(Contact)\n name = models.CharField(max_length=50)\n\n def __unicode__(self):\n return self.name\n",
"step-ids": [
5,
8,
9,
10,
11
]
}
|
[
5,
8,
9,
10,
11
] |
import ssl
import sys
import psycopg2 #conectarte python con postresql
import paho.mqtt.client #pip install paho-mqtt
import json
conn = psycopg2.connect(host = 'raja.db.elephantsql.com', user= 'oyoqynnr', password ='myHVlpJkEO21o29GKYSvMCGI3g4y05bh', dbname= 'oyoqynnr')
def on_connect(client, userdata, flags, rc):
print('Conectado (%s)' % client._client_id)
client.subscribe(topic='unimet/#', qos = 0)
def ventasTIENDA(client, userdata, message):
a = json.loads(message.payload)
print(a)
cur = conn.cursor()
sql = '''INSERT INTO ventas (time_stamp, id_tienda, mac_add, monto) VALUES ( %s, %s, %s, %s);'''
cur.execute(sql, (a["DATE"],a["ID_TIENDA"],a["MAC_ADD"],a["MONTO"]))
conn.commit()
print('VENTA EFECTUADA')
print('------------------------------')
def main():
client = paho.mqtt.client.Client()
client.on_connect = on_connect
client.message_callback_add('unimet/ventas', ventasTIENDA)
client.connect("broker.hivemq.com",1883,60)
client.loop_forever()
if __name__ == '__main__':
main()
sys.exit(0)
|
normal
|
{
"blob_id": "f1b36e3ce3189c8dca2e41664ac1a6d632d23f79",
"index": 5078,
"step-1": "<mask token>\n\n\ndef on_connect(client, userdata, flags, rc):\n print('Conectado (%s)' % client._client_id)\n client.subscribe(topic='unimet/#', qos=0)\n\n\ndef ventasTIENDA(client, userdata, message):\n a = json.loads(message.payload)\n print(a)\n cur = conn.cursor()\n sql = (\n 'INSERT INTO ventas (time_stamp, id_tienda, mac_add, monto) VALUES ( %s, %s, %s, %s);'\n )\n cur.execute(sql, (a['DATE'], a['ID_TIENDA'], a['MAC_ADD'], a['MONTO']))\n conn.commit()\n print('VENTA EFECTUADA')\n print('------------------------------')\n\n\ndef main():\n client = paho.mqtt.client.Client()\n client.on_connect = on_connect\n client.message_callback_add('unimet/ventas', ventasTIENDA)\n client.connect('broker.hivemq.com', 1883, 60)\n client.loop_forever()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef on_connect(client, userdata, flags, rc):\n print('Conectado (%s)' % client._client_id)\n client.subscribe(topic='unimet/#', qos=0)\n\n\ndef ventasTIENDA(client, userdata, message):\n a = json.loads(message.payload)\n print(a)\n cur = conn.cursor()\n sql = (\n 'INSERT INTO ventas (time_stamp, id_tienda, mac_add, monto) VALUES ( %s, %s, %s, %s);'\n )\n cur.execute(sql, (a['DATE'], a['ID_TIENDA'], a['MAC_ADD'], a['MONTO']))\n conn.commit()\n print('VENTA EFECTUADA')\n print('------------------------------')\n\n\ndef main():\n client = paho.mqtt.client.Client()\n client.on_connect = on_connect\n client.message_callback_add('unimet/ventas', ventasTIENDA)\n client.connect('broker.hivemq.com', 1883, 60)\n client.loop_forever()\n\n\nif __name__ == '__main__':\n main()\n sys.exit(0)\n",
"step-3": "<mask token>\nconn = psycopg2.connect(host='raja.db.elephantsql.com', user='oyoqynnr',\n password='myHVlpJkEO21o29GKYSvMCGI3g4y05bh', dbname='oyoqynnr')\n\n\ndef on_connect(client, userdata, flags, rc):\n print('Conectado (%s)' % client._client_id)\n client.subscribe(topic='unimet/#', qos=0)\n\n\ndef ventasTIENDA(client, userdata, message):\n a = json.loads(message.payload)\n print(a)\n cur = conn.cursor()\n sql = (\n 'INSERT INTO ventas (time_stamp, id_tienda, mac_add, monto) VALUES ( %s, %s, %s, %s);'\n )\n cur.execute(sql, (a['DATE'], a['ID_TIENDA'], a['MAC_ADD'], a['MONTO']))\n conn.commit()\n print('VENTA EFECTUADA')\n print('------------------------------')\n\n\ndef main():\n client = paho.mqtt.client.Client()\n client.on_connect = on_connect\n client.message_callback_add('unimet/ventas', ventasTIENDA)\n client.connect('broker.hivemq.com', 1883, 60)\n client.loop_forever()\n\n\nif __name__ == '__main__':\n main()\n sys.exit(0)\n",
"step-4": "import ssl\nimport sys\nimport psycopg2\nimport paho.mqtt.client\nimport json\nconn = psycopg2.connect(host='raja.db.elephantsql.com', user='oyoqynnr',\n password='myHVlpJkEO21o29GKYSvMCGI3g4y05bh', dbname='oyoqynnr')\n\n\ndef on_connect(client, userdata, flags, rc):\n print('Conectado (%s)' % client._client_id)\n client.subscribe(topic='unimet/#', qos=0)\n\n\ndef ventasTIENDA(client, userdata, message):\n a = json.loads(message.payload)\n print(a)\n cur = conn.cursor()\n sql = (\n 'INSERT INTO ventas (time_stamp, id_tienda, mac_add, monto) VALUES ( %s, %s, %s, %s);'\n )\n cur.execute(sql, (a['DATE'], a['ID_TIENDA'], a['MAC_ADD'], a['MONTO']))\n conn.commit()\n print('VENTA EFECTUADA')\n print('------------------------------')\n\n\ndef main():\n client = paho.mqtt.client.Client()\n client.on_connect = on_connect\n client.message_callback_add('unimet/ventas', ventasTIENDA)\n client.connect('broker.hivemq.com', 1883, 60)\n client.loop_forever()\n\n\nif __name__ == '__main__':\n main()\n sys.exit(0)\n",
"step-5": "import ssl\nimport sys\nimport psycopg2 #conectarte python con postresql\nimport paho.mqtt.client #pip install paho-mqtt\nimport json\n\nconn = psycopg2.connect(host = 'raja.db.elephantsql.com', user= 'oyoqynnr', password ='myHVlpJkEO21o29GKYSvMCGI3g4y05bh', dbname= 'oyoqynnr')\n\n \ndef on_connect(client, userdata, flags, rc): \n print('Conectado (%s)' % client._client_id)\n client.subscribe(topic='unimet/#', qos = 0) \n\n\ndef ventasTIENDA(client, userdata, message): \n a = json.loads(message.payload) \n print(a) \n cur = conn.cursor()\n sql = '''INSERT INTO ventas (time_stamp, id_tienda, mac_add, monto) VALUES ( %s, %s, %s, %s);'''\n cur.execute(sql, (a[\"DATE\"],a[\"ID_TIENDA\"],a[\"MAC_ADD\"],a[\"MONTO\"]))\n conn.commit()\n print('VENTA EFECTUADA')\n print('------------------------------') \n\n\n\n\ndef main():\t\n client = paho.mqtt.client.Client()\n client.on_connect = on_connect\n client.message_callback_add('unimet/ventas', ventasTIENDA)\n client.connect(\"broker.hivemq.com\",1883,60)\n client.loop_forever()\n\nif __name__ == '__main__':\n\tmain()\n\tsys.exit(0)\n\n\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import os
from pathlib import Path
from sphinx_testing import with_app
@with_app(buildername="html", srcdir="doc_test/doc_role_need_max_title_length_unlimited")
def test_max_title_length_unlimited(app, status, warning):
os.environ["MAX_TITLE_LENGTH"] = "-1"
app.build()
html = Path(app.outdir, "index.html").read_text()
assert "ROLE NEED TEMPLATE" in html
assert (
"[SP_TOO_001] Command line interface (implemented) Specification/spec - test;test2 - SP_TOO_002 - - "
"The Tool awesome shall have a command line interface." in html
)
@with_app(buildername="html", srcdir="doc_test/doc_role_need_max_title_length")
def test_max_title_length_10(app, status, warning):
os.environ["MAX_TITLE_LENGTH"] = "10"
app.build()
html = Path(app.outdir, "index.html").read_text()
assert "ROLE NEED TEMPLATE" in html
assert (
"[SP_TOO_001] Command... (implemented) Specification/spec - test;test2 - SP_TOO_002 - - "
"The Tool awesome shall have a command line interface." in html
)
|
normal
|
{
"blob_id": "3346ca7cdcfe9d9627bfe08be2b282897b3c319c",
"index": 6943,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@with_app(buildername='html', srcdir=\n 'doc_test/doc_role_need_max_title_length_unlimited')\ndef test_max_title_length_unlimited(app, status, warning):\n os.environ['MAX_TITLE_LENGTH'] = '-1'\n app.build()\n html = Path(app.outdir, 'index.html').read_text()\n assert 'ROLE NEED TEMPLATE' in html\n assert '[SP_TOO_001] Command line interface (implemented) Specification/spec - test;test2 - SP_TOO_002 - - The Tool awesome shall have a command line interface.' in html\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@with_app(buildername='html', srcdir=\n 'doc_test/doc_role_need_max_title_length_unlimited')\ndef test_max_title_length_unlimited(app, status, warning):\n os.environ['MAX_TITLE_LENGTH'] = '-1'\n app.build()\n html = Path(app.outdir, 'index.html').read_text()\n assert 'ROLE NEED TEMPLATE' in html\n assert '[SP_TOO_001] Command line interface (implemented) Specification/spec - test;test2 - SP_TOO_002 - - The Tool awesome shall have a command line interface.' in html\n\n\n@with_app(buildername='html', srcdir='doc_test/doc_role_need_max_title_length')\ndef test_max_title_length_10(app, status, warning):\n os.environ['MAX_TITLE_LENGTH'] = '10'\n app.build()\n html = Path(app.outdir, 'index.html').read_text()\n assert 'ROLE NEED TEMPLATE' in html\n assert '[SP_TOO_001] Command... (implemented) Specification/spec - test;test2 - SP_TOO_002 - - The Tool awesome shall have a command line interface.' in html\n",
"step-4": "import os\nfrom pathlib import Path\nfrom sphinx_testing import with_app\n\n\n@with_app(buildername='html', srcdir=\n 'doc_test/doc_role_need_max_title_length_unlimited')\ndef test_max_title_length_unlimited(app, status, warning):\n os.environ['MAX_TITLE_LENGTH'] = '-1'\n app.build()\n html = Path(app.outdir, 'index.html').read_text()\n assert 'ROLE NEED TEMPLATE' in html\n assert '[SP_TOO_001] Command line interface (implemented) Specification/spec - test;test2 - SP_TOO_002 - - The Tool awesome shall have a command line interface.' in html\n\n\n@with_app(buildername='html', srcdir='doc_test/doc_role_need_max_title_length')\ndef test_max_title_length_10(app, status, warning):\n os.environ['MAX_TITLE_LENGTH'] = '10'\n app.build()\n html = Path(app.outdir, 'index.html').read_text()\n assert 'ROLE NEED TEMPLATE' in html\n assert '[SP_TOO_001] Command... (implemented) Specification/spec - test;test2 - SP_TOO_002 - - The Tool awesome shall have a command line interface.' in html\n",
"step-5": "import os\nfrom pathlib import Path\n\nfrom sphinx_testing import with_app\n\n\n@with_app(buildername=\"html\", srcdir=\"doc_test/doc_role_need_max_title_length_unlimited\")\ndef test_max_title_length_unlimited(app, status, warning):\n\n os.environ[\"MAX_TITLE_LENGTH\"] = \"-1\"\n app.build()\n html = Path(app.outdir, \"index.html\").read_text()\n assert \"ROLE NEED TEMPLATE\" in html\n assert (\n \"[SP_TOO_001] Command line interface (implemented) Specification/spec - test;test2 - SP_TOO_002 - - \"\n \"The Tool awesome shall have a command line interface.\" in html\n )\n\n\n@with_app(buildername=\"html\", srcdir=\"doc_test/doc_role_need_max_title_length\")\ndef test_max_title_length_10(app, status, warning):\n\n os.environ[\"MAX_TITLE_LENGTH\"] = \"10\"\n app.build()\n html = Path(app.outdir, \"index.html\").read_text()\n assert \"ROLE NEED TEMPLATE\" in html\n assert (\n \"[SP_TOO_001] Command... (implemented) Specification/spec - test;test2 - SP_TOO_002 - - \"\n \"The Tool awesome shall have a command line interface.\" in html\n )\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
import cv2 as cv
import methods as meth
from numpy.fft import fft2, fftshift, ifft2, ifftshift
import pandas
import os
import noGPU as h
import matplotlib.pyplot as plt
class fullSys():
def __init__(self, dir, file, size, line):
csv_reader = pandas.read_csv(file, index_col='Objective')
self.Params = {}
self.Params['mag'] = csv_reader['Magnification'][line]
self.Params['NA'] = csv_reader['NA'][line]
self.Params['ps'] = [csv_reader['Pixel Size x'][line], csv_reader['Pixel Size y'][line]]
self.Params['distance'] = csv_reader['Screen Distance'][line]
self.Params['LEDSpace'] = csv_reader['LED Spacing'][line]
self.Params['LEDNum'] = [csv_reader['Num LED x'][line], csv_reader['Num LED x'][line]]
self.Params['dir'] = dir
self.Params['images'] = os.listdir(dir)
self.Params['numImgs'] = len(self.Params['images'])
self.Params['smallSize'] = meth.readImage(dir, self.Params['images'][0], colour=1, getsize=True)
self.Params['fResolution'] = self.fRes(self.Params['mag'], self.Params['smallSize'], self.Params['ps'])
print("fullSys")
## Instantiate sub Objects ##
splitSize, self.Params['lc'] = self.getSS()
img = meth.readImage(self.Params['dir'], self.Params['images'][0])
print("fullSys2")
numFiles, divisor = self.getDivisor(img, splitSize)
print("fullSys2")
self.Params['numFiles'] = numFiles
self.Params['divisor'] = divisor
self.Params['size'] = self.getSize(size, numFiles)
self.subObjs = np.empty([numFiles, numFiles], dtype=section)
print("fullSys1")
for i in range(numFiles):
for j in range(numFiles):
subImg = img[i * divisor:(i + 1) * divisor, j * divisor:(j + 1) * divisor]
self.subObjs[i, j] = section(i, j, subImg, self.Params)
h.progbar(i, numFiles, 'Initializing')
def getSS(self):
""" Determines the required subsection size based on Cittert Zernike theorem """
rho = 300e-6 # LED size
lc = 0.61*R*530/rho
size = lc*slef.Params['mag'] / self.Params['ps']
return size, lc
def getDivisor(self, img, splitSize):
imgSize = img.shape[0]
while True:
if imgSize % splitSize == 0:
divisor = splitSize
break
splitSize += 1
numFiles = int(imgSize / divisor)
return numFiles, divisor
def getSize(self, size, numSplits):
while True:
if size[0] % numSplits == 0:
break
size[0] += 1
return size[0]
def fRes(self, mag, size, ps):
""" Determines the change in spatial frequency across one pixel in F-space """
x = 2 * np.pi * mag / (size[0] * ps[0])
y = 2 * np.pi * mag / (size[1] * ps[1])
return [x, y]
class section():
def __init__(self, i0, j0, subImg, Params):
self.Params = Params
self.subParams = {}
self.subParams['wLen'] = [630e-9, 530e-9, 430e-9]
self.subParams['subSize'] = subImg.shape
self.subParams['bigSize'] = [np.int(Params['size'] / Params['numFiles'])] * 2
self.S = np.empty([self.subParams['bigSize'][0], self.subParams['bigSize'][1], 3], dtype=np.complex64)
self.P = np.empty([self.subParams['subSize'][0], self.subParams['subSize'][1], 3], dtype=np.complex64)
self.meanFFT = np.zeros([self.subParams['subSize'][0], self.subParams['subSize'][1], 3], dtype=np.complex64)
self.meanNum = 0
self.subParams['fRApprox'] = np.empty([3], dtype=int)
self.subParams['coords'] = np.empty([3, 16, 16, 2])
self.subParams['isBF'] = np.empty([3, 16, 16])
for i in range(0, 3):
self.S[:, :, i] = self.initS0(subImg[:, :, i], self.subParams['bigSize'])
self.subParams['fRApprox'][i] = self.fRad(Params['fResolution'],
Params['NA'], self.subParams['wLen'][i])
print(Params['NA'], self.subParams['wLen'][i], Params['mag'], Params['ps'], Params['smallSize'])
self.P[:, :, i] = self.initP0(self.subParams['subSize'], self.subParams['fRApprox'][i])
self.subParams['coords'][i, :, :, :], self.subParams['isBF'][i, :, :] =\
self.initCoords(i0, j0, self.subParams['wLen'][i], self.subParams['fRApprox'][i])
self.bayer = np.empty([Params['divisor'], Params['divisor'], 3])
self.invBayer = np.empty([Params['divisor'], Params['divisor'], 3])
for i in range(3):
self.bayer[:, :, i], self.invBayer[:, :, i] = h.genBayer([Params['divisor'], Params['divisor']], i)
def initS0(self, img, size):
""" Initialises the FT of the high res image by linear interpolation of a low res image """
I0 = cv.resize(img, (size[1], size[0]),
interpolation=cv.INTER_LINEAR) # Bilinear interpolated upsampled image
amplitude = np.sqrt(I0)
FI0 = fft2(ifftshift(amplitude))
FI0 = fftshift(FI0) # FI0.shape[0]
S = np.array(FI0, dtype=np.complex64)
return S
def initP0(self, size, radius):
""" Initialises the pupil function as a real circular step function of value 1 """
return h.circle(size, radius)[:, :, 0]
def fRad(self, fDu, NA, wLen):
""" Determines the approximate radius in F-space in pixels of the pupil function """
x = 2 * np.pi * NA / (wLen * fDu[0])
y = 2 * np.pi * NA / (wLen * fDu[1])
avr = np.int32(np.average([x, y]))
return avr
def initCoords(self, i, j, wLen, Rad):
""" Returns 2D array where LED coords relate to fourier centre positions """
segmentPos = [i, j]
n = self.Params['numFiles']
w = self.subParams['subSize'][0]
c = w / (2 * n)
centre = (segmentPos[0] * 2 * c + c - w) * self.Params['ps'][0]/self.Params['mag']
self.Params['centre'] = centre
coords = np.empty((self.Params['LEDNum'][0], self.Params['LEDNum'][1], 2), dtype=np.int32)
isBF = np.zeros((self.Params['LEDNum'][0], self.Params['LEDNum'][1]), dtype=np.int32)
numImgs = int(len(self.Params['images']) ** 0.5)
for i, img in enumerate(self.Params['images']):
LED = meth.getLED(img)
LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre, wLen)
#print("LED:", LED, "LEDPixelPos:", LEDPixelPos)
#print("LEDPos:", [LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1])
coords[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1] = LEDPixelPos
if ((LEDPixelPos[0]-w/2)**2 + (LEDPixelPos[1]-w/2)**2 < Rad):
isBF[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1] = 1
return coords, isBF
def getLEDPos(self, nx, ny, centre, wLen):
""" Determines the location of the centre of the fourier pattern in pixels """
ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.Params['distance']) # Angle to x axis
ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.Params['distance']) # Angle to y axis
dx = ax / (wLen * self.Params['fResolution'][0])
dy = ay / (wLen * self.Params['fResolution'][1])
pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.subParams['subSize'][0] / 2)]
return pos
class splitImage():
def __init__(self, dir, imgName, numSplits, splitSize):
self.LEDPos = meth.getLED(imgName)
self.subImg = np.empty([numSplits, numSplits], dtype=subImage)
for i in range(numSplits):
for j in range(numSplits):
self.subImg[i, j] = subImage(dir, splitSize, imgName, self.LEDPos, i, j)
class subImage():
def __init__(self, dir, splitSize, imgName, LEDPos, i, j):
img = meth.readImage(dir, imgName)
self.image = img[i * splitSize:(i + 1) * splitSize, j * splitSize:(j + 1) * splitSize]
self.imgPos = [i, j]
self.LEDPos = LEDPos
########################################################################################################################
'''
class preProcess(objective):
def __init__(self, dir, file, size, line, colour=1):
""" Slices images into sections """
super().__init__(dir, file, size, line, colour=1)
numFiles, devisor = self.getDevisor(150)
self.genFiles(numFiles)
self.split(devisor, numFiles)
def genFiles(self, numFiles):
path = os.path.join(os.getcwd(), 'temp')
if os.path.isdir(path):
shutil.rmtree(path)
time.sleep(0.01)
os.mkdir(path)
for i in range(numFiles):
for j in range(numFiles):
folder = '%s_%s' % (str(i), str(j))
path1 = os.path.join(path, folder)
os.mkdir(path1)
def getDevisor(self, splitSize):
imgName = self.images[0]
img = self.readImage(self.dir, imgName)
imgSize = img.shape[0]
while True:
if imgSize % splitSize == 0:
devisor = splitSize
break
splitSize += 1
numFiles = int(imgSize / devisor)
return numFiles, devisor
def split(self, devisor, numFiles):
path0 = os.path.join(os.getcwd(), 'temp')
for i0, file in enumerate(self.images):
LED = self.getLED(file)
img = self.readImage(self.dir, file)
for i in range(numFiles):
for j in range(numFiles):
folder = '%s_%s' % (str(i), str(j))
path1 = os.path.join(path0, folder)
file = 'img_%s_%s_.jpg' % (str(LED[0]), str(LED[1]))
path = os.path.join(path1, file)
subImg = img[i * devisor:(i + 1) * devisor, j * devisor:(j + 1) * devisor]
cv.imwrite(path, subImg)
h.progbar(i0 * numFiles ** 2 + i * numFiles + j,
len(self.images) * numFiles ** 2, 'Slicing Images')
def initCoords(self, dir):
""" Returns 2D array where LED coords relate to fourier centre positions """
dirName = os.path.basename(dir)
segmentPos = self.getSegment(dirName)
N = len(os.listdir(dir))
n = np.sqrt(N)
w = self.smallSize[0]
c = w / (2 * n)
centre = (segmentPos[0] * 2 * c + c - w) * self.ps[0]/self.mag
coords = np.empty((self.LEDNum[0], self.LEDNum[1], 2), dtype=np.int32)
for i, img in enumerate(self.images):
LED = self.getLED(img)
LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre)
coords[LED[0], LED[1]] = LEDPixelPos
return coords
'''
|
normal
|
{
"blob_id": "e3c9487f3221ca89b9014b2e6470ca9d4dbc925a",
"index": 2239,
"step-1": "<mask token>\n\n\nclass section:\n\n def __init__(self, i0, j0, subImg, Params):\n self.Params = Params\n self.subParams = {}\n self.subParams['wLen'] = [6.3e-07, 5.3e-07, 4.3e-07]\n self.subParams['subSize'] = subImg.shape\n self.subParams['bigSize'] = [np.int(Params['size'] / Params[\n 'numFiles'])] * 2\n self.S = np.empty([self.subParams['bigSize'][0], self.subParams[\n 'bigSize'][1], 3], dtype=np.complex64)\n self.P = np.empty([self.subParams['subSize'][0], self.subParams[\n 'subSize'][1], 3], dtype=np.complex64)\n self.meanFFT = np.zeros([self.subParams['subSize'][0], self.\n subParams['subSize'][1], 3], dtype=np.complex64)\n self.meanNum = 0\n self.subParams['fRApprox'] = np.empty([3], dtype=int)\n self.subParams['coords'] = np.empty([3, 16, 16, 2])\n self.subParams['isBF'] = np.empty([3, 16, 16])\n for i in range(0, 3):\n self.S[:, :, i] = self.initS0(subImg[:, :, i], self.subParams[\n 'bigSize'])\n self.subParams['fRApprox'][i] = self.fRad(Params['fResolution'],\n Params['NA'], self.subParams['wLen'][i])\n print(Params['NA'], self.subParams['wLen'][i], Params['mag'],\n Params['ps'], Params['smallSize'])\n self.P[:, :, i] = self.initP0(self.subParams['subSize'], self.\n subParams['fRApprox'][i])\n self.subParams['coords'][i, :, :, :], self.subParams['isBF'][i,\n :, :] = self.initCoords(i0, j0, self.subParams['wLen'][i],\n self.subParams['fRApprox'][i])\n self.bayer = np.empty([Params['divisor'], Params['divisor'], 3])\n self.invBayer = np.empty([Params['divisor'], Params['divisor'], 3])\n for i in range(3):\n self.bayer[:, :, i], self.invBayer[:, :, i] = h.genBayer([\n Params['divisor'], Params['divisor']], i)\n\n def initS0(self, img, size):\n \"\"\" Initialises the FT of the high res image by linear interpolation of a low res image \"\"\"\n I0 = cv.resize(img, (size[1], size[0]), interpolation=cv.INTER_LINEAR)\n amplitude = np.sqrt(I0)\n FI0 = fft2(ifftshift(amplitude))\n FI0 = fftshift(FI0)\n S = np.array(FI0, dtype=np.complex64)\n return S\n <mask token>\n <mask token>\n\n def initCoords(self, i, j, wLen, Rad):\n \"\"\" Returns 2D array where LED coords relate to fourier centre positions \"\"\"\n segmentPos = [i, j]\n n = self.Params['numFiles']\n w = self.subParams['subSize'][0]\n c = w / (2 * n)\n centre = (segmentPos[0] * 2 * c + c - w) * self.Params['ps'][0\n ] / self.Params['mag']\n self.Params['centre'] = centre\n coords = np.empty((self.Params['LEDNum'][0], self.Params['LEDNum'][\n 1], 2), dtype=np.int32)\n isBF = np.zeros((self.Params['LEDNum'][0], self.Params['LEDNum'][1]\n ), dtype=np.int32)\n numImgs = int(len(self.Params['images']) ** 0.5)\n for i, img in enumerate(self.Params['images']):\n LED = meth.getLED(img)\n LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre, wLen)\n coords[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1\n ] = LEDPixelPos\n if (LEDPixelPos[0] - w / 2) ** 2 + (LEDPixelPos[1] - w / 2\n ) ** 2 < Rad:\n isBF[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / \n 2) - 1] = 1\n return coords, isBF\n\n def getLEDPos(self, nx, ny, centre, wLen):\n \"\"\" Determines the location of the centre of the fourier pattern in pixels \"\"\"\n ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.\n Params['distance'])\n ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.\n Params['distance'])\n dx = ax / (wLen * self.Params['fResolution'][0])\n dy = ay / (wLen * self.Params['fResolution'][1])\n pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.\n subParams['subSize'][0] / 2)]\n return pos\n\n\nclass splitImage:\n\n def __init__(self, dir, imgName, numSplits, splitSize):\n self.LEDPos = meth.getLED(imgName)\n self.subImg = np.empty([numSplits, numSplits], dtype=subImage)\n for i in range(numSplits):\n for j in range(numSplits):\n self.subImg[i, j] = subImage(dir, splitSize, imgName, self.\n LEDPos, i, j)\n\n\nclass subImage:\n\n def __init__(self, dir, splitSize, imgName, LEDPos, i, j):\n img = meth.readImage(dir, imgName)\n self.image = img[i * splitSize:(i + 1) * splitSize, j * splitSize:(\n j + 1) * splitSize]\n self.imgPos = [i, j]\n self.LEDPos = LEDPos\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass section:\n\n def __init__(self, i0, j0, subImg, Params):\n self.Params = Params\n self.subParams = {}\n self.subParams['wLen'] = [6.3e-07, 5.3e-07, 4.3e-07]\n self.subParams['subSize'] = subImg.shape\n self.subParams['bigSize'] = [np.int(Params['size'] / Params[\n 'numFiles'])] * 2\n self.S = np.empty([self.subParams['bigSize'][0], self.subParams[\n 'bigSize'][1], 3], dtype=np.complex64)\n self.P = np.empty([self.subParams['subSize'][0], self.subParams[\n 'subSize'][1], 3], dtype=np.complex64)\n self.meanFFT = np.zeros([self.subParams['subSize'][0], self.\n subParams['subSize'][1], 3], dtype=np.complex64)\n self.meanNum = 0\n self.subParams['fRApprox'] = np.empty([3], dtype=int)\n self.subParams['coords'] = np.empty([3, 16, 16, 2])\n self.subParams['isBF'] = np.empty([3, 16, 16])\n for i in range(0, 3):\n self.S[:, :, i] = self.initS0(subImg[:, :, i], self.subParams[\n 'bigSize'])\n self.subParams['fRApprox'][i] = self.fRad(Params['fResolution'],\n Params['NA'], self.subParams['wLen'][i])\n print(Params['NA'], self.subParams['wLen'][i], Params['mag'],\n Params['ps'], Params['smallSize'])\n self.P[:, :, i] = self.initP0(self.subParams['subSize'], self.\n subParams['fRApprox'][i])\n self.subParams['coords'][i, :, :, :], self.subParams['isBF'][i,\n :, :] = self.initCoords(i0, j0, self.subParams['wLen'][i],\n self.subParams['fRApprox'][i])\n self.bayer = np.empty([Params['divisor'], Params['divisor'], 3])\n self.invBayer = np.empty([Params['divisor'], Params['divisor'], 3])\n for i in range(3):\n self.bayer[:, :, i], self.invBayer[:, :, i] = h.genBayer([\n Params['divisor'], Params['divisor']], i)\n\n def initS0(self, img, size):\n \"\"\" Initialises the FT of the high res image by linear interpolation of a low res image \"\"\"\n I0 = cv.resize(img, (size[1], size[0]), interpolation=cv.INTER_LINEAR)\n amplitude = np.sqrt(I0)\n FI0 = fft2(ifftshift(amplitude))\n FI0 = fftshift(FI0)\n S = np.array(FI0, dtype=np.complex64)\n return S\n\n def initP0(self, size, radius):\n \"\"\" Initialises the pupil function as a real circular step function of value 1 \"\"\"\n return h.circle(size, radius)[:, :, 0]\n\n def fRad(self, fDu, NA, wLen):\n \"\"\" Determines the approximate radius in F-space in pixels of the pupil function \"\"\"\n x = 2 * np.pi * NA / (wLen * fDu[0])\n y = 2 * np.pi * NA / (wLen * fDu[1])\n avr = np.int32(np.average([x, y]))\n return avr\n\n def initCoords(self, i, j, wLen, Rad):\n \"\"\" Returns 2D array where LED coords relate to fourier centre positions \"\"\"\n segmentPos = [i, j]\n n = self.Params['numFiles']\n w = self.subParams['subSize'][0]\n c = w / (2 * n)\n centre = (segmentPos[0] * 2 * c + c - w) * self.Params['ps'][0\n ] / self.Params['mag']\n self.Params['centre'] = centre\n coords = np.empty((self.Params['LEDNum'][0], self.Params['LEDNum'][\n 1], 2), dtype=np.int32)\n isBF = np.zeros((self.Params['LEDNum'][0], self.Params['LEDNum'][1]\n ), dtype=np.int32)\n numImgs = int(len(self.Params['images']) ** 0.5)\n for i, img in enumerate(self.Params['images']):\n LED = meth.getLED(img)\n LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre, wLen)\n coords[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1\n ] = LEDPixelPos\n if (LEDPixelPos[0] - w / 2) ** 2 + (LEDPixelPos[1] - w / 2\n ) ** 2 < Rad:\n isBF[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / \n 2) - 1] = 1\n return coords, isBF\n\n def getLEDPos(self, nx, ny, centre, wLen):\n \"\"\" Determines the location of the centre of the fourier pattern in pixels \"\"\"\n ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.\n Params['distance'])\n ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.\n Params['distance'])\n dx = ax / (wLen * self.Params['fResolution'][0])\n dy = ay / (wLen * self.Params['fResolution'][1])\n pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.\n subParams['subSize'][0] / 2)]\n return pos\n\n\nclass splitImage:\n\n def __init__(self, dir, imgName, numSplits, splitSize):\n self.LEDPos = meth.getLED(imgName)\n self.subImg = np.empty([numSplits, numSplits], dtype=subImage)\n for i in range(numSplits):\n for j in range(numSplits):\n self.subImg[i, j] = subImage(dir, splitSize, imgName, self.\n LEDPos, i, j)\n\n\nclass subImage:\n\n def __init__(self, dir, splitSize, imgName, LEDPos, i, j):\n img = meth.readImage(dir, imgName)\n self.image = img[i * splitSize:(i + 1) * splitSize, j * splitSize:(\n j + 1) * splitSize]\n self.imgPos = [i, j]\n self.LEDPos = LEDPos\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass fullSys:\n <mask token>\n <mask token>\n\n def getDivisor(self, img, splitSize):\n imgSize = img.shape[0]\n while True:\n if imgSize % splitSize == 0:\n divisor = splitSize\n break\n splitSize += 1\n numFiles = int(imgSize / divisor)\n return numFiles, divisor\n <mask token>\n <mask token>\n\n\nclass section:\n\n def __init__(self, i0, j0, subImg, Params):\n self.Params = Params\n self.subParams = {}\n self.subParams['wLen'] = [6.3e-07, 5.3e-07, 4.3e-07]\n self.subParams['subSize'] = subImg.shape\n self.subParams['bigSize'] = [np.int(Params['size'] / Params[\n 'numFiles'])] * 2\n self.S = np.empty([self.subParams['bigSize'][0], self.subParams[\n 'bigSize'][1], 3], dtype=np.complex64)\n self.P = np.empty([self.subParams['subSize'][0], self.subParams[\n 'subSize'][1], 3], dtype=np.complex64)\n self.meanFFT = np.zeros([self.subParams['subSize'][0], self.\n subParams['subSize'][1], 3], dtype=np.complex64)\n self.meanNum = 0\n self.subParams['fRApprox'] = np.empty([3], dtype=int)\n self.subParams['coords'] = np.empty([3, 16, 16, 2])\n self.subParams['isBF'] = np.empty([3, 16, 16])\n for i in range(0, 3):\n self.S[:, :, i] = self.initS0(subImg[:, :, i], self.subParams[\n 'bigSize'])\n self.subParams['fRApprox'][i] = self.fRad(Params['fResolution'],\n Params['NA'], self.subParams['wLen'][i])\n print(Params['NA'], self.subParams['wLen'][i], Params['mag'],\n Params['ps'], Params['smallSize'])\n self.P[:, :, i] = self.initP0(self.subParams['subSize'], self.\n subParams['fRApprox'][i])\n self.subParams['coords'][i, :, :, :], self.subParams['isBF'][i,\n :, :] = self.initCoords(i0, j0, self.subParams['wLen'][i],\n self.subParams['fRApprox'][i])\n self.bayer = np.empty([Params['divisor'], Params['divisor'], 3])\n self.invBayer = np.empty([Params['divisor'], Params['divisor'], 3])\n for i in range(3):\n self.bayer[:, :, i], self.invBayer[:, :, i] = h.genBayer([\n Params['divisor'], Params['divisor']], i)\n\n def initS0(self, img, size):\n \"\"\" Initialises the FT of the high res image by linear interpolation of a low res image \"\"\"\n I0 = cv.resize(img, (size[1], size[0]), interpolation=cv.INTER_LINEAR)\n amplitude = np.sqrt(I0)\n FI0 = fft2(ifftshift(amplitude))\n FI0 = fftshift(FI0)\n S = np.array(FI0, dtype=np.complex64)\n return S\n\n def initP0(self, size, radius):\n \"\"\" Initialises the pupil function as a real circular step function of value 1 \"\"\"\n return h.circle(size, radius)[:, :, 0]\n\n def fRad(self, fDu, NA, wLen):\n \"\"\" Determines the approximate radius in F-space in pixels of the pupil function \"\"\"\n x = 2 * np.pi * NA / (wLen * fDu[0])\n y = 2 * np.pi * NA / (wLen * fDu[1])\n avr = np.int32(np.average([x, y]))\n return avr\n\n def initCoords(self, i, j, wLen, Rad):\n \"\"\" Returns 2D array where LED coords relate to fourier centre positions \"\"\"\n segmentPos = [i, j]\n n = self.Params['numFiles']\n w = self.subParams['subSize'][0]\n c = w / (2 * n)\n centre = (segmentPos[0] * 2 * c + c - w) * self.Params['ps'][0\n ] / self.Params['mag']\n self.Params['centre'] = centre\n coords = np.empty((self.Params['LEDNum'][0], self.Params['LEDNum'][\n 1], 2), dtype=np.int32)\n isBF = np.zeros((self.Params['LEDNum'][0], self.Params['LEDNum'][1]\n ), dtype=np.int32)\n numImgs = int(len(self.Params['images']) ** 0.5)\n for i, img in enumerate(self.Params['images']):\n LED = meth.getLED(img)\n LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre, wLen)\n coords[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1\n ] = LEDPixelPos\n if (LEDPixelPos[0] - w / 2) ** 2 + (LEDPixelPos[1] - w / 2\n ) ** 2 < Rad:\n isBF[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / \n 2) - 1] = 1\n return coords, isBF\n\n def getLEDPos(self, nx, ny, centre, wLen):\n \"\"\" Determines the location of the centre of the fourier pattern in pixels \"\"\"\n ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.\n Params['distance'])\n ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.\n Params['distance'])\n dx = ax / (wLen * self.Params['fResolution'][0])\n dy = ay / (wLen * self.Params['fResolution'][1])\n pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.\n subParams['subSize'][0] / 2)]\n return pos\n\n\nclass splitImage:\n\n def __init__(self, dir, imgName, numSplits, splitSize):\n self.LEDPos = meth.getLED(imgName)\n self.subImg = np.empty([numSplits, numSplits], dtype=subImage)\n for i in range(numSplits):\n for j in range(numSplits):\n self.subImg[i, j] = subImage(dir, splitSize, imgName, self.\n LEDPos, i, j)\n\n\nclass subImage:\n\n def __init__(self, dir, splitSize, imgName, LEDPos, i, j):\n img = meth.readImage(dir, imgName)\n self.image = img[i * splitSize:(i + 1) * splitSize, j * splitSize:(\n j + 1) * splitSize]\n self.imgPos = [i, j]\n self.LEDPos = LEDPos\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass fullSys:\n\n def __init__(self, dir, file, size, line):\n csv_reader = pandas.read_csv(file, index_col='Objective')\n self.Params = {}\n self.Params['mag'] = csv_reader['Magnification'][line]\n self.Params['NA'] = csv_reader['NA'][line]\n self.Params['ps'] = [csv_reader['Pixel Size x'][line], csv_reader[\n 'Pixel Size y'][line]]\n self.Params['distance'] = csv_reader['Screen Distance'][line]\n self.Params['LEDSpace'] = csv_reader['LED Spacing'][line]\n self.Params['LEDNum'] = [csv_reader['Num LED x'][line], csv_reader[\n 'Num LED x'][line]]\n self.Params['dir'] = dir\n self.Params['images'] = os.listdir(dir)\n self.Params['numImgs'] = len(self.Params['images'])\n self.Params['smallSize'] = meth.readImage(dir, self.Params['images'\n ][0], colour=1, getsize=True)\n self.Params['fResolution'] = self.fRes(self.Params['mag'], self.\n Params['smallSize'], self.Params['ps'])\n print('fullSys')\n splitSize, self.Params['lc'] = self.getSS()\n img = meth.readImage(self.Params['dir'], self.Params['images'][0])\n print('fullSys2')\n numFiles, divisor = self.getDivisor(img, splitSize)\n print('fullSys2')\n self.Params['numFiles'] = numFiles\n self.Params['divisor'] = divisor\n self.Params['size'] = self.getSize(size, numFiles)\n self.subObjs = np.empty([numFiles, numFiles], dtype=section)\n print('fullSys1')\n for i in range(numFiles):\n for j in range(numFiles):\n subImg = img[i * divisor:(i + 1) * divisor, j * divisor:(j +\n 1) * divisor]\n self.subObjs[i, j] = section(i, j, subImg, self.Params)\n h.progbar(i, numFiles, 'Initializing')\n\n def getSS(self):\n \"\"\" Determines the required subsection size based on Cittert Zernike theorem \"\"\"\n rho = 0.0003\n lc = 0.61 * R * 530 / rho\n size = lc * slef.Params['mag'] / self.Params['ps']\n return size, lc\n\n def getDivisor(self, img, splitSize):\n imgSize = img.shape[0]\n while True:\n if imgSize % splitSize == 0:\n divisor = splitSize\n break\n splitSize += 1\n numFiles = int(imgSize / divisor)\n return numFiles, divisor\n <mask token>\n <mask token>\n\n\nclass section:\n\n def __init__(self, i0, j0, subImg, Params):\n self.Params = Params\n self.subParams = {}\n self.subParams['wLen'] = [6.3e-07, 5.3e-07, 4.3e-07]\n self.subParams['subSize'] = subImg.shape\n self.subParams['bigSize'] = [np.int(Params['size'] / Params[\n 'numFiles'])] * 2\n self.S = np.empty([self.subParams['bigSize'][0], self.subParams[\n 'bigSize'][1], 3], dtype=np.complex64)\n self.P = np.empty([self.subParams['subSize'][0], self.subParams[\n 'subSize'][1], 3], dtype=np.complex64)\n self.meanFFT = np.zeros([self.subParams['subSize'][0], self.\n subParams['subSize'][1], 3], dtype=np.complex64)\n self.meanNum = 0\n self.subParams['fRApprox'] = np.empty([3], dtype=int)\n self.subParams['coords'] = np.empty([3, 16, 16, 2])\n self.subParams['isBF'] = np.empty([3, 16, 16])\n for i in range(0, 3):\n self.S[:, :, i] = self.initS0(subImg[:, :, i], self.subParams[\n 'bigSize'])\n self.subParams['fRApprox'][i] = self.fRad(Params['fResolution'],\n Params['NA'], self.subParams['wLen'][i])\n print(Params['NA'], self.subParams['wLen'][i], Params['mag'],\n Params['ps'], Params['smallSize'])\n self.P[:, :, i] = self.initP0(self.subParams['subSize'], self.\n subParams['fRApprox'][i])\n self.subParams['coords'][i, :, :, :], self.subParams['isBF'][i,\n :, :] = self.initCoords(i0, j0, self.subParams['wLen'][i],\n self.subParams['fRApprox'][i])\n self.bayer = np.empty([Params['divisor'], Params['divisor'], 3])\n self.invBayer = np.empty([Params['divisor'], Params['divisor'], 3])\n for i in range(3):\n self.bayer[:, :, i], self.invBayer[:, :, i] = h.genBayer([\n Params['divisor'], Params['divisor']], i)\n\n def initS0(self, img, size):\n \"\"\" Initialises the FT of the high res image by linear interpolation of a low res image \"\"\"\n I0 = cv.resize(img, (size[1], size[0]), interpolation=cv.INTER_LINEAR)\n amplitude = np.sqrt(I0)\n FI0 = fft2(ifftshift(amplitude))\n FI0 = fftshift(FI0)\n S = np.array(FI0, dtype=np.complex64)\n return S\n\n def initP0(self, size, radius):\n \"\"\" Initialises the pupil function as a real circular step function of value 1 \"\"\"\n return h.circle(size, radius)[:, :, 0]\n\n def fRad(self, fDu, NA, wLen):\n \"\"\" Determines the approximate radius in F-space in pixels of the pupil function \"\"\"\n x = 2 * np.pi * NA / (wLen * fDu[0])\n y = 2 * np.pi * NA / (wLen * fDu[1])\n avr = np.int32(np.average([x, y]))\n return avr\n\n def initCoords(self, i, j, wLen, Rad):\n \"\"\" Returns 2D array where LED coords relate to fourier centre positions \"\"\"\n segmentPos = [i, j]\n n = self.Params['numFiles']\n w = self.subParams['subSize'][0]\n c = w / (2 * n)\n centre = (segmentPos[0] * 2 * c + c - w) * self.Params['ps'][0\n ] / self.Params['mag']\n self.Params['centre'] = centre\n coords = np.empty((self.Params['LEDNum'][0], self.Params['LEDNum'][\n 1], 2), dtype=np.int32)\n isBF = np.zeros((self.Params['LEDNum'][0], self.Params['LEDNum'][1]\n ), dtype=np.int32)\n numImgs = int(len(self.Params['images']) ** 0.5)\n for i, img in enumerate(self.Params['images']):\n LED = meth.getLED(img)\n LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre, wLen)\n coords[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1\n ] = LEDPixelPos\n if (LEDPixelPos[0] - w / 2) ** 2 + (LEDPixelPos[1] - w / 2\n ) ** 2 < Rad:\n isBF[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / \n 2) - 1] = 1\n return coords, isBF\n\n def getLEDPos(self, nx, ny, centre, wLen):\n \"\"\" Determines the location of the centre of the fourier pattern in pixels \"\"\"\n ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.\n Params['distance'])\n ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.\n Params['distance'])\n dx = ax / (wLen * self.Params['fResolution'][0])\n dy = ay / (wLen * self.Params['fResolution'][1])\n pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.\n subParams['subSize'][0] / 2)]\n return pos\n\n\nclass splitImage:\n\n def __init__(self, dir, imgName, numSplits, splitSize):\n self.LEDPos = meth.getLED(imgName)\n self.subImg = np.empty([numSplits, numSplits], dtype=subImage)\n for i in range(numSplits):\n for j in range(numSplits):\n self.subImg[i, j] = subImage(dir, splitSize, imgName, self.\n LEDPos, i, j)\n\n\nclass subImage:\n\n def __init__(self, dir, splitSize, imgName, LEDPos, i, j):\n img = meth.readImage(dir, imgName)\n self.image = img[i * splitSize:(i + 1) * splitSize, j * splitSize:(\n j + 1) * splitSize]\n self.imgPos = [i, j]\n self.LEDPos = LEDPos\n\n\n<mask token>\n",
"step-5": "import numpy as np\nimport cv2 as cv\nimport methods as meth\nfrom numpy.fft import fft2, fftshift, ifft2, ifftshift\nimport pandas\nimport os\nimport noGPU as h\nimport matplotlib.pyplot as plt\n\nclass fullSys():\n def __init__(self, dir, file, size, line):\n csv_reader = pandas.read_csv(file, index_col='Objective')\n self.Params = {}\n self.Params['mag'] = csv_reader['Magnification'][line]\n self.Params['NA'] = csv_reader['NA'][line]\n self.Params['ps'] = [csv_reader['Pixel Size x'][line], csv_reader['Pixel Size y'][line]]\n self.Params['distance'] = csv_reader['Screen Distance'][line]\n self.Params['LEDSpace'] = csv_reader['LED Spacing'][line]\n self.Params['LEDNum'] = [csv_reader['Num LED x'][line], csv_reader['Num LED x'][line]]\n self.Params['dir'] = dir\n self.Params['images'] = os.listdir(dir)\n self.Params['numImgs'] = len(self.Params['images'])\n self.Params['smallSize'] = meth.readImage(dir, self.Params['images'][0], colour=1, getsize=True)\n self.Params['fResolution'] = self.fRes(self.Params['mag'], self.Params['smallSize'], self.Params['ps'])\n print(\"fullSys\")\n\n ## Instantiate sub Objects ##\n\n splitSize, self.Params['lc'] = self.getSS()\n img = meth.readImage(self.Params['dir'], self.Params['images'][0])\n print(\"fullSys2\")\n\n numFiles, divisor = self.getDivisor(img, splitSize)\n print(\"fullSys2\")\n\n self.Params['numFiles'] = numFiles\n self.Params['divisor'] = divisor\n self.Params['size'] = self.getSize(size, numFiles)\n\n self.subObjs = np.empty([numFiles, numFiles], dtype=section)\n print(\"fullSys1\")\n\n for i in range(numFiles):\n for j in range(numFiles):\n subImg = img[i * divisor:(i + 1) * divisor, j * divisor:(j + 1) * divisor]\n self.subObjs[i, j] = section(i, j, subImg, self.Params)\n h.progbar(i, numFiles, 'Initializing')\n\n\n def getSS(self):\n \"\"\" Determines the required subsection size based on Cittert Zernike theorem \"\"\"\n rho = 300e-6 # LED size\n lc = 0.61*R*530/rho\n size = lc*slef.Params['mag'] / self.Params['ps']\n return size, lc\n\n\n def getDivisor(self, img, splitSize):\n imgSize = img.shape[0]\n while True:\n if imgSize % splitSize == 0:\n divisor = splitSize\n break\n splitSize += 1\n numFiles = int(imgSize / divisor)\n return numFiles, divisor\n\n\n def getSize(self, size, numSplits):\n while True:\n if size[0] % numSplits == 0:\n break\n size[0] += 1\n return size[0]\n\n\n def fRes(self, mag, size, ps):\n \"\"\" Determines the change in spatial frequency across one pixel in F-space \"\"\"\n x = 2 * np.pi * mag / (size[0] * ps[0])\n y = 2 * np.pi * mag / (size[1] * ps[1])\n return [x, y]\n\n\nclass section():\n def __init__(self, i0, j0, subImg, Params):\n self.Params = Params\n self.subParams = {}\n self.subParams['wLen'] = [630e-9, 530e-9, 430e-9]\n self.subParams['subSize'] = subImg.shape\n self.subParams['bigSize'] = [np.int(Params['size'] / Params['numFiles'])] * 2\n self.S = np.empty([self.subParams['bigSize'][0], self.subParams['bigSize'][1], 3], dtype=np.complex64)\n self.P = np.empty([self.subParams['subSize'][0], self.subParams['subSize'][1], 3], dtype=np.complex64)\n self.meanFFT = np.zeros([self.subParams['subSize'][0], self.subParams['subSize'][1], 3], dtype=np.complex64)\n self.meanNum = 0\n self.subParams['fRApprox'] = np.empty([3], dtype=int)\n self.subParams['coords'] = np.empty([3, 16, 16, 2])\n self.subParams['isBF'] = np.empty([3, 16, 16])\n for i in range(0, 3):\n self.S[:, :, i] = self.initS0(subImg[:, :, i], self.subParams['bigSize'])\n self.subParams['fRApprox'][i] = self.fRad(Params['fResolution'],\n Params['NA'], self.subParams['wLen'][i])\n print(Params['NA'], self.subParams['wLen'][i], Params['mag'], Params['ps'], Params['smallSize'])\n self.P[:, :, i] = self.initP0(self.subParams['subSize'], self.subParams['fRApprox'][i])\n self.subParams['coords'][i, :, :, :], self.subParams['isBF'][i, :, :] =\\\n self.initCoords(i0, j0, self.subParams['wLen'][i], self.subParams['fRApprox'][i])\n self.bayer = np.empty([Params['divisor'], Params['divisor'], 3])\n self.invBayer = np.empty([Params['divisor'], Params['divisor'], 3])\n for i in range(3):\n self.bayer[:, :, i], self.invBayer[:, :, i] = h.genBayer([Params['divisor'], Params['divisor']], i)\n\n\n def initS0(self, img, size):\n \"\"\" Initialises the FT of the high res image by linear interpolation of a low res image \"\"\"\n\n I0 = cv.resize(img, (size[1], size[0]),\n interpolation=cv.INTER_LINEAR) # Bilinear interpolated upsampled image\n\n amplitude = np.sqrt(I0)\n\n FI0 = fft2(ifftshift(amplitude))\n FI0 = fftshift(FI0) # FI0.shape[0]\n S = np.array(FI0, dtype=np.complex64)\n return S\n\n\n def initP0(self, size, radius):\n \"\"\" Initialises the pupil function as a real circular step function of value 1 \"\"\"\n return h.circle(size, radius)[:, :, 0]\n\n\n def fRad(self, fDu, NA, wLen):\n \"\"\" Determines the approximate radius in F-space in pixels of the pupil function \"\"\"\n x = 2 * np.pi * NA / (wLen * fDu[0])\n y = 2 * np.pi * NA / (wLen * fDu[1])\n avr = np.int32(np.average([x, y]))\n return avr\n\n\n def initCoords(self, i, j, wLen, Rad):\n \"\"\" Returns 2D array where LED coords relate to fourier centre positions \"\"\"\n segmentPos = [i, j]\n n = self.Params['numFiles']\n w = self.subParams['subSize'][0]\n c = w / (2 * n)\n centre = (segmentPos[0] * 2 * c + c - w) * self.Params['ps'][0]/self.Params['mag']\n self.Params['centre'] = centre\n coords = np.empty((self.Params['LEDNum'][0], self.Params['LEDNum'][1], 2), dtype=np.int32)\n isBF = np.zeros((self.Params['LEDNum'][0], self.Params['LEDNum'][1]), dtype=np.int32)\n numImgs = int(len(self.Params['images']) ** 0.5)\n for i, img in enumerate(self.Params['images']):\n LED = meth.getLED(img)\n LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre, wLen)\n #print(\"LED:\", LED, \"LEDPixelPos:\", LEDPixelPos)\n #print(\"LEDPos:\", [LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1])\n coords[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1] = LEDPixelPos\n if ((LEDPixelPos[0]-w/2)**2 + (LEDPixelPos[1]-w/2)**2 < Rad):\n isBF[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1] = 1\n return coords, isBF\n\n\n def getLEDPos(self, nx, ny, centre, wLen):\n \"\"\" Determines the location of the centre of the fourier pattern in pixels \"\"\"\n ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.Params['distance']) # Angle to x axis\n ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.Params['distance']) # Angle to y axis\n dx = ax / (wLen * self.Params['fResolution'][0])\n dy = ay / (wLen * self.Params['fResolution'][1])\n pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.subParams['subSize'][0] / 2)]\n return pos\n\n\nclass splitImage():\n def __init__(self, dir, imgName, numSplits, splitSize):\n self.LEDPos = meth.getLED(imgName)\n self.subImg = np.empty([numSplits, numSplits], dtype=subImage)\n for i in range(numSplits):\n for j in range(numSplits):\n self.subImg[i, j] = subImage(dir, splitSize, imgName, self.LEDPos, i, j)\n\n\nclass subImage():\n def __init__(self, dir, splitSize, imgName, LEDPos, i, j):\n img = meth.readImage(dir, imgName)\n self.image = img[i * splitSize:(i + 1) * splitSize, j * splitSize:(j + 1) * splitSize]\n self.imgPos = [i, j]\n self.LEDPos = LEDPos\n\n\n\n\n\n\n########################################################################################################################\n'''\nclass preProcess(objective):\n def __init__(self, dir, file, size, line, colour=1):\n \"\"\" Slices images into sections \"\"\"\n super().__init__(dir, file, size, line, colour=1)\n numFiles, devisor = self.getDevisor(150)\n self.genFiles(numFiles)\n self.split(devisor, numFiles)\n\n\n def genFiles(self, numFiles):\n path = os.path.join(os.getcwd(), 'temp')\n if os.path.isdir(path):\n shutil.rmtree(path)\n time.sleep(0.01)\n os.mkdir(path)\n for i in range(numFiles):\n for j in range(numFiles):\n folder = '%s_%s' % (str(i), str(j))\n path1 = os.path.join(path, folder)\n os.mkdir(path1)\n\n\n def getDevisor(self, splitSize):\n imgName = self.images[0]\n img = self.readImage(self.dir, imgName)\n imgSize = img.shape[0]\n while True:\n if imgSize % splitSize == 0:\n devisor = splitSize\n break\n splitSize += 1\n numFiles = int(imgSize / devisor)\n return numFiles, devisor\n\n\n def split(self, devisor, numFiles):\n path0 = os.path.join(os.getcwd(), 'temp')\n for i0, file in enumerate(self.images):\n LED = self.getLED(file)\n img = self.readImage(self.dir, file)\n for i in range(numFiles):\n for j in range(numFiles):\n folder = '%s_%s' % (str(i), str(j))\n path1 = os.path.join(path0, folder)\n file = 'img_%s_%s_.jpg' % (str(LED[0]), str(LED[1]))\n path = os.path.join(path1, file)\n subImg = img[i * devisor:(i + 1) * devisor, j * devisor:(j + 1) * devisor]\n cv.imwrite(path, subImg)\n h.progbar(i0 * numFiles ** 2 + i * numFiles + j,\n len(self.images) * numFiles ** 2, 'Slicing Images')\n\n\n\n def initCoords(self, dir):\n \"\"\" Returns 2D array where LED coords relate to fourier centre positions \"\"\"\n dirName = os.path.basename(dir)\n segmentPos = self.getSegment(dirName)\n N = len(os.listdir(dir))\n n = np.sqrt(N)\n w = self.smallSize[0]\n c = w / (2 * n)\n centre = (segmentPos[0] * 2 * c + c - w) * self.ps[0]/self.mag\n coords = np.empty((self.LEDNum[0], self.LEDNum[1], 2), dtype=np.int32)\n for i, img in enumerate(self.images):\n LED = self.getLED(img)\n LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre)\n coords[LED[0], LED[1]] = LEDPixelPos\n return coords\n'''",
"step-ids": [
9,
11,
13,
15,
19
]
}
|
[
9,
11,
13,
15,
19
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('{0:f} {1:f}'.format(r * r * math.pi, 2 * r * math.pi))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
r = float(input())
print('{0:f} {1:f}'.format(r * r * math.pi, 2 * r * math.pi))
<|reserved_special_token_1|>
import math
r = float(input())
print('{0:f} {1:f}'.format(r * r * math.pi, 2 * r * math.pi))
<|reserved_special_token_1|>
# -*- coding:utf-8 -*-
import math
r = float(input())
print("{0:f} {1:f}".format(r*r*math.pi,2*r*math.pi))
|
flexible
|
{
"blob_id": "e28cca2273e1c3ad4b8a955843e7dfb45c00694c",
"index": 3246,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('{0:f} {1:f}'.format(r * r * math.pi, 2 * r * math.pi))\n",
"step-3": "<mask token>\nr = float(input())\nprint('{0:f} {1:f}'.format(r * r * math.pi, 2 * r * math.pi))\n",
"step-4": "import math\nr = float(input())\nprint('{0:f} {1:f}'.format(r * r * math.pi, 2 * r * math.pi))\n",
"step-5": "# -*- coding:utf-8 -*-\nimport math\nr = float(input())\nprint(\"{0:f} {1:f}\".format(r*r*math.pi,2*r*math.pi))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
import optparse
import os
import shutil
import sys
from AutoCrab.AutoCrab2 import core
def main():
parser = optparse.OptionParser()
parser.add_option("-r", "--recursive", dest="recursive", action="store_true", help="Recursively look for CRAB job files and directories.")
(opts, args) = parser.parse_args()
for arg in args:
if not core.isValidCommand(arg):
print "Error: unrecognized autocrab command."
else:
core.doAutoCrab(arg, opts.recursive)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "d1fe06766958e8532c49d33e887d6c4996573c22",
"index": 4964,
"step-1": "#!/usr/bin/env python\n\nimport optparse\nimport os\nimport shutil\nimport sys\n\nfrom AutoCrab.AutoCrab2 import core\n\ndef main():\n\tparser = optparse.OptionParser()\n\tparser.add_option(\"-r\", \"--recursive\", dest=\"recursive\", action=\"store_true\", help=\"Recursively look for CRAB job files and directories.\")\n\t(opts, args) = parser.parse_args()\n\n\tfor arg in args:\n\t\tif not core.isValidCommand(arg):\n\t\t\tprint \"Error: unrecognized autocrab command.\"\n\t\telse:\n\t\t\tcore.doAutoCrab(arg, opts.recursive)\n\nif __name__ == '__main__':\n\tmain()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import cv2
import color_to_gray_operations
VIZ_PATH = '../output_data/visualizations/gray_intensities/'
def visualize_grayscale_intensities(img, out_path):
img_x, img_y = np.mgrid[0: img.shape[0], 0: img.shape[1]]
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(img_x, img_y, img, rstride=1, cstride=1, cmap=plt.cm.jet,
linewidth=0)
plt.savefig(out_path + 'surface.png')
plt.close()
def visualize_color_intensities(color_img, out_path):
b, g, r = cv2.split(color_img)
blue_x, blue_y = np.mgrid[0: b.shape[0], 0: b.shape[1]]
green_x, green_y = np.mgrid[0: g.shape[0], 0: g.shape[1]]
red_x, red_y = np.mgrid[0: r.shape[0], 0: r.shape[1]]
fig_blue = plt.figure()
ax_blue = fig_blue.gca(projection='3d')
ax_blue.plot_surface(blue_x, blue_y, b ,rstride=1, cstride=1, cmap=plt.cm.jet,
linewidth=0)
plt.savefig(out_path + 'blue_surface.png')
plt.close()
fig_green = plt.figure()
ax_green = fig_green.gca(projection='3d')
ax_green.plot_surface(green_x, green_y, g ,rstride=1, cstride=1, cmap=plt.cm.jet,
linewidth=0)
plt.savefig(out_path + 'green_surface.png')
plt.close()
fig_red = plt.figure()
ax_red = fig_red.gca(projection='3d')
ax_red.plot_surface(red_x, red_y, r ,rstride=1, cstride=1, cmap=plt.cm.jet,
linewidth=0)
plt.savefig(out_path + 'red_surface.png')
plt.close()
def visualize_histogram(img):
if len(img.shape) == 2:
hist, bins = np.histogram(img, bins=[x for x in range(0, 257)])
fig = plt.figure()
fig.plot(bins, hist)
fig.show()
def visualization_tests(path='../input_data/tunnel_1.png'):
path = '/Users/adamcatto/src/L0-Smoothing/src/output_tunnels/tunnel_1.png'
img = cv2.imread(path)
visualize_color_intensities(img, out_path=VIZ_PATH)
def experiments(path='../input_data/noisy_segments/honeycomb_1.png'):
img = cv2.imread(path)
img = color_to_gray_operations.luminosity_method(img)
visualize_grayscale_intensities(img, out_path=VIZ_PATH)
experiments()
#visualization_tests()
|
normal
|
{
"blob_id": "21fec6d307b928a295f2ffbf267456f9cd9ea722",
"index": 9105,
"step-1": "<mask token>\n\n\ndef visualize_grayscale_intensities(img, out_path):\n img_x, img_y = np.mgrid[0:img.shape[0], 0:img.shape[1]]\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.plot_surface(img_x, img_y, img, rstride=1, cstride=1, cmap=plt.cm.\n jet, linewidth=0)\n plt.savefig(out_path + 'surface.png')\n plt.close()\n\n\n<mask token>\n\n\ndef visualize_histogram(img):\n if len(img.shape) == 2:\n hist, bins = np.histogram(img, bins=[x for x in range(0, 257)])\n fig = plt.figure()\n fig.plot(bins, hist)\n fig.show()\n\n\ndef visualization_tests(path='../input_data/tunnel_1.png'):\n path = '/Users/adamcatto/src/L0-Smoothing/src/output_tunnels/tunnel_1.png'\n img = cv2.imread(path)\n visualize_color_intensities(img, out_path=VIZ_PATH)\n\n\ndef experiments(path='../input_data/noisy_segments/honeycomb_1.png'):\n img = cv2.imread(path)\n img = color_to_gray_operations.luminosity_method(img)\n visualize_grayscale_intensities(img, out_path=VIZ_PATH)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef visualize_grayscale_intensities(img, out_path):\n img_x, img_y = np.mgrid[0:img.shape[0], 0:img.shape[1]]\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.plot_surface(img_x, img_y, img, rstride=1, cstride=1, cmap=plt.cm.\n jet, linewidth=0)\n plt.savefig(out_path + 'surface.png')\n plt.close()\n\n\ndef visualize_color_intensities(color_img, out_path):\n b, g, r = cv2.split(color_img)\n blue_x, blue_y = np.mgrid[0:b.shape[0], 0:b.shape[1]]\n green_x, green_y = np.mgrid[0:g.shape[0], 0:g.shape[1]]\n red_x, red_y = np.mgrid[0:r.shape[0], 0:r.shape[1]]\n fig_blue = plt.figure()\n ax_blue = fig_blue.gca(projection='3d')\n ax_blue.plot_surface(blue_x, blue_y, b, rstride=1, cstride=1, cmap=plt.\n cm.jet, linewidth=0)\n plt.savefig(out_path + 'blue_surface.png')\n plt.close()\n fig_green = plt.figure()\n ax_green = fig_green.gca(projection='3d')\n ax_green.plot_surface(green_x, green_y, g, rstride=1, cstride=1, cmap=\n plt.cm.jet, linewidth=0)\n plt.savefig(out_path + 'green_surface.png')\n plt.close()\n fig_red = plt.figure()\n ax_red = fig_red.gca(projection='3d')\n ax_red.plot_surface(red_x, red_y, r, rstride=1, cstride=1, cmap=plt.cm.\n jet, linewidth=0)\n plt.savefig(out_path + 'red_surface.png')\n plt.close()\n\n\ndef visualize_histogram(img):\n if len(img.shape) == 2:\n hist, bins = np.histogram(img, bins=[x for x in range(0, 257)])\n fig = plt.figure()\n fig.plot(bins, hist)\n fig.show()\n\n\ndef visualization_tests(path='../input_data/tunnel_1.png'):\n path = '/Users/adamcatto/src/L0-Smoothing/src/output_tunnels/tunnel_1.png'\n img = cv2.imread(path)\n visualize_color_intensities(img, out_path=VIZ_PATH)\n\n\ndef experiments(path='../input_data/noisy_segments/honeycomb_1.png'):\n img = cv2.imread(path)\n img = color_to_gray_operations.luminosity_method(img)\n visualize_grayscale_intensities(img, out_path=VIZ_PATH)\n\n\nexperiments()\n",
"step-3": "<mask token>\nVIZ_PATH = '../output_data/visualizations/gray_intensities/'\n\n\ndef visualize_grayscale_intensities(img, out_path):\n img_x, img_y = np.mgrid[0:img.shape[0], 0:img.shape[1]]\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.plot_surface(img_x, img_y, img, rstride=1, cstride=1, cmap=plt.cm.\n jet, linewidth=0)\n plt.savefig(out_path + 'surface.png')\n plt.close()\n\n\ndef visualize_color_intensities(color_img, out_path):\n b, g, r = cv2.split(color_img)\n blue_x, blue_y = np.mgrid[0:b.shape[0], 0:b.shape[1]]\n green_x, green_y = np.mgrid[0:g.shape[0], 0:g.shape[1]]\n red_x, red_y = np.mgrid[0:r.shape[0], 0:r.shape[1]]\n fig_blue = plt.figure()\n ax_blue = fig_blue.gca(projection='3d')\n ax_blue.plot_surface(blue_x, blue_y, b, rstride=1, cstride=1, cmap=plt.\n cm.jet, linewidth=0)\n plt.savefig(out_path + 'blue_surface.png')\n plt.close()\n fig_green = plt.figure()\n ax_green = fig_green.gca(projection='3d')\n ax_green.plot_surface(green_x, green_y, g, rstride=1, cstride=1, cmap=\n plt.cm.jet, linewidth=0)\n plt.savefig(out_path + 'green_surface.png')\n plt.close()\n fig_red = plt.figure()\n ax_red = fig_red.gca(projection='3d')\n ax_red.plot_surface(red_x, red_y, r, rstride=1, cstride=1, cmap=plt.cm.\n jet, linewidth=0)\n plt.savefig(out_path + 'red_surface.png')\n plt.close()\n\n\ndef visualize_histogram(img):\n if len(img.shape) == 2:\n hist, bins = np.histogram(img, bins=[x for x in range(0, 257)])\n fig = plt.figure()\n fig.plot(bins, hist)\n fig.show()\n\n\ndef visualization_tests(path='../input_data/tunnel_1.png'):\n path = '/Users/adamcatto/src/L0-Smoothing/src/output_tunnels/tunnel_1.png'\n img = cv2.imread(path)\n visualize_color_intensities(img, out_path=VIZ_PATH)\n\n\ndef experiments(path='../input_data/noisy_segments/honeycomb_1.png'):\n img = cv2.imread(path)\n img = color_to_gray_operations.luminosity_method(img)\n visualize_grayscale_intensities(img, out_path=VIZ_PATH)\n\n\nexperiments()\n",
"step-4": "import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport cv2\nimport color_to_gray_operations\nVIZ_PATH = '../output_data/visualizations/gray_intensities/'\n\n\ndef visualize_grayscale_intensities(img, out_path):\n img_x, img_y = np.mgrid[0:img.shape[0], 0:img.shape[1]]\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.plot_surface(img_x, img_y, img, rstride=1, cstride=1, cmap=plt.cm.\n jet, linewidth=0)\n plt.savefig(out_path + 'surface.png')\n plt.close()\n\n\ndef visualize_color_intensities(color_img, out_path):\n b, g, r = cv2.split(color_img)\n blue_x, blue_y = np.mgrid[0:b.shape[0], 0:b.shape[1]]\n green_x, green_y = np.mgrid[0:g.shape[0], 0:g.shape[1]]\n red_x, red_y = np.mgrid[0:r.shape[0], 0:r.shape[1]]\n fig_blue = plt.figure()\n ax_blue = fig_blue.gca(projection='3d')\n ax_blue.plot_surface(blue_x, blue_y, b, rstride=1, cstride=1, cmap=plt.\n cm.jet, linewidth=0)\n plt.savefig(out_path + 'blue_surface.png')\n plt.close()\n fig_green = plt.figure()\n ax_green = fig_green.gca(projection='3d')\n ax_green.plot_surface(green_x, green_y, g, rstride=1, cstride=1, cmap=\n plt.cm.jet, linewidth=0)\n plt.savefig(out_path + 'green_surface.png')\n plt.close()\n fig_red = plt.figure()\n ax_red = fig_red.gca(projection='3d')\n ax_red.plot_surface(red_x, red_y, r, rstride=1, cstride=1, cmap=plt.cm.\n jet, linewidth=0)\n plt.savefig(out_path + 'red_surface.png')\n plt.close()\n\n\ndef visualize_histogram(img):\n if len(img.shape) == 2:\n hist, bins = np.histogram(img, bins=[x for x in range(0, 257)])\n fig = plt.figure()\n fig.plot(bins, hist)\n fig.show()\n\n\ndef visualization_tests(path='../input_data/tunnel_1.png'):\n path = '/Users/adamcatto/src/L0-Smoothing/src/output_tunnels/tunnel_1.png'\n img = cv2.imread(path)\n visualize_color_intensities(img, out_path=VIZ_PATH)\n\n\ndef experiments(path='../input_data/noisy_segments/honeycomb_1.png'):\n img = cv2.imread(path)\n img = color_to_gray_operations.luminosity_method(img)\n visualize_grayscale_intensities(img, out_path=VIZ_PATH)\n\n\nexperiments()\n",
"step-5": "import os\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport cv2\n\nimport color_to_gray_operations\n\n\nVIZ_PATH = '../output_data/visualizations/gray_intensities/'\n\n\ndef visualize_grayscale_intensities(img, out_path):\n img_x, img_y = np.mgrid[0: img.shape[0], 0: img.shape[1]]\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.plot_surface(img_x, img_y, img, rstride=1, cstride=1, cmap=plt.cm.jet,\n linewidth=0)\n plt.savefig(out_path + 'surface.png')\n plt.close()\n\n\ndef visualize_color_intensities(color_img, out_path):\n b, g, r = cv2.split(color_img)\n\n blue_x, blue_y = np.mgrid[0: b.shape[0], 0: b.shape[1]]\n green_x, green_y = np.mgrid[0: g.shape[0], 0: g.shape[1]]\n red_x, red_y = np.mgrid[0: r.shape[0], 0: r.shape[1]]\n\n fig_blue = plt.figure()\n ax_blue = fig_blue.gca(projection='3d')\n ax_blue.plot_surface(blue_x, blue_y, b ,rstride=1, cstride=1, cmap=plt.cm.jet,\n linewidth=0)\n plt.savefig(out_path + 'blue_surface.png')\n plt.close()\n\n fig_green = plt.figure()\n ax_green = fig_green.gca(projection='3d')\n ax_green.plot_surface(green_x, green_y, g ,rstride=1, cstride=1, cmap=plt.cm.jet,\n linewidth=0)\n plt.savefig(out_path + 'green_surface.png')\n plt.close()\n\n fig_red = plt.figure()\n ax_red = fig_red.gca(projection='3d')\n ax_red.plot_surface(red_x, red_y, r ,rstride=1, cstride=1, cmap=plt.cm.jet,\n linewidth=0)\n plt.savefig(out_path + 'red_surface.png')\n plt.close()\n\n\ndef visualize_histogram(img):\n if len(img.shape) == 2:\n hist, bins = np.histogram(img, bins=[x for x in range(0, 257)])\n fig = plt.figure()\n fig.plot(bins, hist)\n fig.show()\n\n\ndef visualization_tests(path='../input_data/tunnel_1.png'):\n path = '/Users/adamcatto/src/L0-Smoothing/src/output_tunnels/tunnel_1.png'\n img = cv2.imread(path)\n visualize_color_intensities(img, out_path=VIZ_PATH)\n\n\ndef experiments(path='../input_data/noisy_segments/honeycomb_1.png'):\n img = cv2.imread(path)\n img = color_to_gray_operations.luminosity_method(img)\n visualize_grayscale_intensities(img, out_path=VIZ_PATH)\n\n\nexperiments()\n#visualization_tests()",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class FormControllerApi(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def submit_form_with_http_info(self, **kwargs):
"""Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501
This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action="https://python.api.mailslurp.com/forms" method="post" > <input name="_to" type="hidden" value="test@example.com"/> <textarea name="feedback"></textarea> <button type="submit">Submit</button> </form> ``` #### URL Example ```html <form action="https://python.api.mailslurp.com/forms?_to=test@example.com" method="post" > <textarea name="feedback"></textarea> <button type="submit">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype="multipart/form-data">`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.submit_form_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str to: The email address that submitted form should be sent to.
:param str subject: Optional subject of the email that will be sent.
:param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.
:param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.
:param str success_message: Optional success message to display if no _redirectTo present.
:param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.
:param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(str, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['to', 'subject', 'redirect_to', 'email_address',
'success_message', 'spam_check', 'other_parameters']
all_params.extend(['async_req', '_return_http_data_only',
'_preload_content', '_request_timeout'])
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s' to method submit_form"
% key)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'to' in local_var_params and local_var_params['to'] is not None:
query_params.append(('_to', local_var_params['to']))
if 'subject' in local_var_params and local_var_params['subject'
] is not None:
query_params.append(('_subject', local_var_params['subject']))
if 'redirect_to' in local_var_params and local_var_params['redirect_to'
] is not None:
query_params.append(('_redirectTo', local_var_params[
'redirect_to']))
if 'email_address' in local_var_params and local_var_params[
'email_address'] is not None:
query_params.append(('_emailAddress', local_var_params[
'email_address']))
if 'success_message' in local_var_params and local_var_params[
'success_message'] is not None:
query_params.append(('_successMessage', local_var_params[
'success_message']))
if 'spam_check' in local_var_params and local_var_params['spam_check'
] is not None:
query_params.append(('_spamCheck', local_var_params['spam_check']))
if 'other_parameters' in local_var_params and local_var_params[
'other_parameters'] is not None:
query_params.append(('otherParameters', local_var_params[
'other_parameters']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['*/*'])
auth_settings = ['API_KEY']
return self.api_client.call_api('/forms', 'POST', path_params,
query_params, header_params, body=body_params, post_params=
form_params, files=local_var_files, response_type='str',
auth_settings=auth_settings, async_req=local_var_params.get(
'async_req'), _return_http_data_only=local_var_params.get(
'_return_http_data_only'), _preload_content=local_var_params.
get('_preload_content', True), _request_timeout=
local_var_params.get('_request_timeout'), collection_formats=
collection_formats)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FormControllerApi(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def submit_form(self, **kwargs):
"""Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501
This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action="https://python.api.mailslurp.com/forms" method="post" > <input name="_to" type="hidden" value="test@example.com"/> <textarea name="feedback"></textarea> <button type="submit">Submit</button> </form> ``` #### URL Example ```html <form action="https://python.api.mailslurp.com/forms?_to=test@example.com" method="post" > <textarea name="feedback"></textarea> <button type="submit">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype="multipart/form-data">`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.submit_form(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str to: The email address that submitted form should be sent to.
:param str subject: Optional subject of the email that will be sent.
:param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.
:param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.
:param str success_message: Optional success message to display if no _redirectTo present.
:param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.
:param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.submit_form_with_http_info(**kwargs)
def submit_form_with_http_info(self, **kwargs):
"""Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501
This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action="https://python.api.mailslurp.com/forms" method="post" > <input name="_to" type="hidden" value="test@example.com"/> <textarea name="feedback"></textarea> <button type="submit">Submit</button> </form> ``` #### URL Example ```html <form action="https://python.api.mailslurp.com/forms?_to=test@example.com" method="post" > <textarea name="feedback"></textarea> <button type="submit">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype="multipart/form-data">`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.submit_form_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str to: The email address that submitted form should be sent to.
:param str subject: Optional subject of the email that will be sent.
:param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.
:param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.
:param str success_message: Optional success message to display if no _redirectTo present.
:param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.
:param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(str, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['to', 'subject', 'redirect_to', 'email_address',
'success_message', 'spam_check', 'other_parameters']
all_params.extend(['async_req', '_return_http_data_only',
'_preload_content', '_request_timeout'])
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s' to method submit_form"
% key)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'to' in local_var_params and local_var_params['to'] is not None:
query_params.append(('_to', local_var_params['to']))
if 'subject' in local_var_params and local_var_params['subject'
] is not None:
query_params.append(('_subject', local_var_params['subject']))
if 'redirect_to' in local_var_params and local_var_params['redirect_to'
] is not None:
query_params.append(('_redirectTo', local_var_params[
'redirect_to']))
if 'email_address' in local_var_params and local_var_params[
'email_address'] is not None:
query_params.append(('_emailAddress', local_var_params[
'email_address']))
if 'success_message' in local_var_params and local_var_params[
'success_message'] is not None:
query_params.append(('_successMessage', local_var_params[
'success_message']))
if 'spam_check' in local_var_params and local_var_params['spam_check'
] is not None:
query_params.append(('_spamCheck', local_var_params['spam_check']))
if 'other_parameters' in local_var_params and local_var_params[
'other_parameters'] is not None:
query_params.append(('otherParameters', local_var_params[
'other_parameters']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['*/*'])
auth_settings = ['API_KEY']
return self.api_client.call_api('/forms', 'POST', path_params,
query_params, header_params, body=body_params, post_params=
form_params, files=local_var_files, response_type='str',
auth_settings=auth_settings, async_req=local_var_params.get(
'async_req'), _return_http_data_only=local_var_params.get(
'_return_http_data_only'), _preload_content=local_var_params.
get('_preload_content', True), _request_timeout=
local_var_params.get('_request_timeout'), collection_formats=
collection_formats)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FormControllerApi(object):
<|reserved_special_token_0|>
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def submit_form(self, **kwargs):
"""Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501
This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action="https://python.api.mailslurp.com/forms" method="post" > <input name="_to" type="hidden" value="test@example.com"/> <textarea name="feedback"></textarea> <button type="submit">Submit</button> </form> ``` #### URL Example ```html <form action="https://python.api.mailslurp.com/forms?_to=test@example.com" method="post" > <textarea name="feedback"></textarea> <button type="submit">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype="multipart/form-data">`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.submit_form(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str to: The email address that submitted form should be sent to.
:param str subject: Optional subject of the email that will be sent.
:param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.
:param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.
:param str success_message: Optional success message to display if no _redirectTo present.
:param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.
:param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.submit_form_with_http_info(**kwargs)
def submit_form_with_http_info(self, **kwargs):
"""Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501
This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action="https://python.api.mailslurp.com/forms" method="post" > <input name="_to" type="hidden" value="test@example.com"/> <textarea name="feedback"></textarea> <button type="submit">Submit</button> </form> ``` #### URL Example ```html <form action="https://python.api.mailslurp.com/forms?_to=test@example.com" method="post" > <textarea name="feedback"></textarea> <button type="submit">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype="multipart/form-data">`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.submit_form_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str to: The email address that submitted form should be sent to.
:param str subject: Optional subject of the email that will be sent.
:param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.
:param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.
:param str success_message: Optional success message to display if no _redirectTo present.
:param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.
:param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(str, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['to', 'subject', 'redirect_to', 'email_address',
'success_message', 'spam_check', 'other_parameters']
all_params.extend(['async_req', '_return_http_data_only',
'_preload_content', '_request_timeout'])
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s' to method submit_form"
% key)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'to' in local_var_params and local_var_params['to'] is not None:
query_params.append(('_to', local_var_params['to']))
if 'subject' in local_var_params and local_var_params['subject'
] is not None:
query_params.append(('_subject', local_var_params['subject']))
if 'redirect_to' in local_var_params and local_var_params['redirect_to'
] is not None:
query_params.append(('_redirectTo', local_var_params[
'redirect_to']))
if 'email_address' in local_var_params and local_var_params[
'email_address'] is not None:
query_params.append(('_emailAddress', local_var_params[
'email_address']))
if 'success_message' in local_var_params and local_var_params[
'success_message'] is not None:
query_params.append(('_successMessage', local_var_params[
'success_message']))
if 'spam_check' in local_var_params and local_var_params['spam_check'
] is not None:
query_params.append(('_spamCheck', local_var_params['spam_check']))
if 'other_parameters' in local_var_params and local_var_params[
'other_parameters'] is not None:
query_params.append(('otherParameters', local_var_params[
'other_parameters']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['*/*'])
auth_settings = ['API_KEY']
return self.api_client.call_api('/forms', 'POST', path_params,
query_params, header_params, body=body_params, post_params=
form_params, files=local_var_files, response_type='str',
auth_settings=auth_settings, async_req=local_var_params.get(
'async_req'), _return_http_data_only=local_var_params.get(
'_return_http_data_only'), _preload_content=local_var_params.
get('_preload_content', True), _request_timeout=
local_var_params.get('_request_timeout'), collection_formats=
collection_formats)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FormControllerApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def submit_form(self, **kwargs):
"""Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501
This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action="https://python.api.mailslurp.com/forms" method="post" > <input name="_to" type="hidden" value="test@example.com"/> <textarea name="feedback"></textarea> <button type="submit">Submit</button> </form> ``` #### URL Example ```html <form action="https://python.api.mailslurp.com/forms?_to=test@example.com" method="post" > <textarea name="feedback"></textarea> <button type="submit">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype="multipart/form-data">`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.submit_form(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str to: The email address that submitted form should be sent to.
:param str subject: Optional subject of the email that will be sent.
:param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.
:param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.
:param str success_message: Optional success message to display if no _redirectTo present.
:param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.
:param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.submit_form_with_http_info(**kwargs)
def submit_form_with_http_info(self, **kwargs):
"""Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501
This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action="https://python.api.mailslurp.com/forms" method="post" > <input name="_to" type="hidden" value="test@example.com"/> <textarea name="feedback"></textarea> <button type="submit">Submit</button> </form> ``` #### URL Example ```html <form action="https://python.api.mailslurp.com/forms?_to=test@example.com" method="post" > <textarea name="feedback"></textarea> <button type="submit">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype="multipart/form-data">`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.submit_form_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str to: The email address that submitted form should be sent to.
:param str subject: Optional subject of the email that will be sent.
:param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.
:param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.
:param str success_message: Optional success message to display if no _redirectTo present.
:param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.
:param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(str, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['to', 'subject', 'redirect_to', 'email_address',
'success_message', 'spam_check', 'other_parameters']
all_params.extend(['async_req', '_return_http_data_only',
'_preload_content', '_request_timeout'])
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s' to method submit_form"
% key)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'to' in local_var_params and local_var_params['to'] is not None:
query_params.append(('_to', local_var_params['to']))
if 'subject' in local_var_params and local_var_params['subject'
] is not None:
query_params.append(('_subject', local_var_params['subject']))
if 'redirect_to' in local_var_params and local_var_params['redirect_to'
] is not None:
query_params.append(('_redirectTo', local_var_params[
'redirect_to']))
if 'email_address' in local_var_params and local_var_params[
'email_address'] is not None:
query_params.append(('_emailAddress', local_var_params[
'email_address']))
if 'success_message' in local_var_params and local_var_params[
'success_message'] is not None:
query_params.append(('_successMessage', local_var_params[
'success_message']))
if 'spam_check' in local_var_params and local_var_params['spam_check'
] is not None:
query_params.append(('_spamCheck', local_var_params['spam_check']))
if 'other_parameters' in local_var_params and local_var_params[
'other_parameters'] is not None:
query_params.append(('otherParameters', local_var_params[
'other_parameters']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['*/*'])
auth_settings = ['API_KEY']
return self.api_client.call_api('/forms', 'POST', path_params,
query_params, header_params, body=body_params, post_params=
form_params, files=local_var_files, response_type='str',
auth_settings=auth_settings, async_req=local_var_params.get(
'async_req'), _return_http_data_only=local_var_params.get(
'_return_http_data_only'), _preload_content=local_var_params.
get('_preload_content', True), _request_timeout=
local_var_params.get('_request_timeout'), collection_formats=
collection_formats)
<|reserved_special_token_1|>
# coding: utf-8
"""
MailSlurp API
MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://docs.mailslurp.com/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501
The version of the OpenAPI document: 6.5.2
Contact: contact@mailslurp.dev
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from mailslurp_client.api_client import ApiClient
from mailslurp_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class FormControllerApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def submit_form(self, **kwargs): # noqa: E501
"""Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501
This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"test@example.com\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/forms?_to=test@example.com\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.submit_form(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str to: The email address that submitted form should be sent to.
:param str subject: Optional subject of the email that will be sent.
:param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.
:param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.
:param str success_message: Optional success message to display if no _redirectTo present.
:param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.
:param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.submit_form_with_http_info(**kwargs) # noqa: E501
def submit_form_with_http_info(self, **kwargs): # noqa: E501
"""Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501
This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"test@example.com\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/forms?_to=test@example.com\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.submit_form_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str to: The email address that submitted form should be sent to.
:param str subject: Optional subject of the email that will be sent.
:param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.
:param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.
:param str success_message: Optional success message to display if no _redirectTo present.
:param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.
:param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(str, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'to',
'subject',
'redirect_to',
'email_address',
'success_message',
'spam_check',
'other_parameters'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method submit_form" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'to' in local_var_params and local_var_params['to'] is not None: # noqa: E501
query_params.append(('_to', local_var_params['to'])) # noqa: E501
if 'subject' in local_var_params and local_var_params['subject'] is not None: # noqa: E501
query_params.append(('_subject', local_var_params['subject'])) # noqa: E501
if 'redirect_to' in local_var_params and local_var_params['redirect_to'] is not None: # noqa: E501
query_params.append(('_redirectTo', local_var_params['redirect_to'])) # noqa: E501
if 'email_address' in local_var_params and local_var_params['email_address'] is not None: # noqa: E501
query_params.append(('_emailAddress', local_var_params['email_address'])) # noqa: E501
if 'success_message' in local_var_params and local_var_params['success_message'] is not None: # noqa: E501
query_params.append(('_successMessage', local_var_params['success_message'])) # noqa: E501
if 'spam_check' in local_var_params and local_var_params['spam_check'] is not None: # noqa: E501
query_params.append(('_spamCheck', local_var_params['spam_check'])) # noqa: E501
if 'other_parameters' in local_var_params and local_var_params['other_parameters'] is not None: # noqa: E501
query_params.append(('otherParameters', local_var_params['other_parameters'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['API_KEY'] # noqa: E501
return self.api_client.call_api(
'/forms', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
flexible
|
{
"blob_id": "a4ccf373695b7df60039bc8f6440a6ad43d265c1",
"index": 3750,
"step-1": "<mask token>\n\n\nclass FormControllerApi(object):\n <mask token>\n <mask token>\n <mask token>\n\n def submit_form_with_http_info(self, **kwargs):\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"test@example.com\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/forms?_to=test@example.com\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(str, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n local_var_params = locals()\n all_params = ['to', 'subject', 'redirect_to', 'email_address',\n 'success_message', 'spam_check', 'other_parameters']\n all_params.extend(['async_req', '_return_http_data_only',\n '_preload_content', '_request_timeout'])\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise ApiTypeError(\n \"Got an unexpected keyword argument '%s' to method submit_form\"\n % key)\n local_var_params[key] = val\n del local_var_params['kwargs']\n collection_formats = {}\n path_params = {}\n query_params = []\n if 'to' in local_var_params and local_var_params['to'] is not None:\n query_params.append(('_to', local_var_params['to']))\n if 'subject' in local_var_params and local_var_params['subject'\n ] is not None:\n query_params.append(('_subject', local_var_params['subject']))\n if 'redirect_to' in local_var_params and local_var_params['redirect_to'\n ] is not None:\n query_params.append(('_redirectTo', local_var_params[\n 'redirect_to']))\n if 'email_address' in local_var_params and local_var_params[\n 'email_address'] is not None:\n query_params.append(('_emailAddress', local_var_params[\n 'email_address']))\n if 'success_message' in local_var_params and local_var_params[\n 'success_message'] is not None:\n query_params.append(('_successMessage', local_var_params[\n 'success_message']))\n if 'spam_check' in local_var_params and local_var_params['spam_check'\n ] is not None:\n query_params.append(('_spamCheck', local_var_params['spam_check']))\n if 'other_parameters' in local_var_params and local_var_params[\n 'other_parameters'] is not None:\n query_params.append(('otherParameters', local_var_params[\n 'other_parameters']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n auth_settings = ['API_KEY']\n return self.api_client.call_api('/forms', 'POST', path_params,\n query_params, header_params, body=body_params, post_params=\n form_params, files=local_var_files, response_type='str',\n auth_settings=auth_settings, async_req=local_var_params.get(\n 'async_req'), _return_http_data_only=local_var_params.get(\n '_return_http_data_only'), _preload_content=local_var_params.\n get('_preload_content', True), _request_timeout=\n local_var_params.get('_request_timeout'), collection_formats=\n collection_formats)\n",
"step-2": "<mask token>\n\n\nclass FormControllerApi(object):\n <mask token>\n <mask token>\n\n def submit_form(self, **kwargs):\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"test@example.com\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/forms?_to=test@example.com\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: str\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n return self.submit_form_with_http_info(**kwargs)\n\n def submit_form_with_http_info(self, **kwargs):\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"test@example.com\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/forms?_to=test@example.com\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(str, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n local_var_params = locals()\n all_params = ['to', 'subject', 'redirect_to', 'email_address',\n 'success_message', 'spam_check', 'other_parameters']\n all_params.extend(['async_req', '_return_http_data_only',\n '_preload_content', '_request_timeout'])\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise ApiTypeError(\n \"Got an unexpected keyword argument '%s' to method submit_form\"\n % key)\n local_var_params[key] = val\n del local_var_params['kwargs']\n collection_formats = {}\n path_params = {}\n query_params = []\n if 'to' in local_var_params and local_var_params['to'] is not None:\n query_params.append(('_to', local_var_params['to']))\n if 'subject' in local_var_params and local_var_params['subject'\n ] is not None:\n query_params.append(('_subject', local_var_params['subject']))\n if 'redirect_to' in local_var_params and local_var_params['redirect_to'\n ] is not None:\n query_params.append(('_redirectTo', local_var_params[\n 'redirect_to']))\n if 'email_address' in local_var_params and local_var_params[\n 'email_address'] is not None:\n query_params.append(('_emailAddress', local_var_params[\n 'email_address']))\n if 'success_message' in local_var_params and local_var_params[\n 'success_message'] is not None:\n query_params.append(('_successMessage', local_var_params[\n 'success_message']))\n if 'spam_check' in local_var_params and local_var_params['spam_check'\n ] is not None:\n query_params.append(('_spamCheck', local_var_params['spam_check']))\n if 'other_parameters' in local_var_params and local_var_params[\n 'other_parameters'] is not None:\n query_params.append(('otherParameters', local_var_params[\n 'other_parameters']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n auth_settings = ['API_KEY']\n return self.api_client.call_api('/forms', 'POST', path_params,\n query_params, header_params, body=body_params, post_params=\n form_params, files=local_var_files, response_type='str',\n auth_settings=auth_settings, async_req=local_var_params.get(\n 'async_req'), _return_http_data_only=local_var_params.get(\n '_return_http_data_only'), _preload_content=local_var_params.\n get('_preload_content', True), _request_timeout=\n local_var_params.get('_request_timeout'), collection_formats=\n collection_formats)\n",
"step-3": "<mask token>\n\n\nclass FormControllerApi(object):\n <mask token>\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def submit_form(self, **kwargs):\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"test@example.com\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/forms?_to=test@example.com\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: str\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n return self.submit_form_with_http_info(**kwargs)\n\n def submit_form_with_http_info(self, **kwargs):\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"test@example.com\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/forms?_to=test@example.com\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(str, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n local_var_params = locals()\n all_params = ['to', 'subject', 'redirect_to', 'email_address',\n 'success_message', 'spam_check', 'other_parameters']\n all_params.extend(['async_req', '_return_http_data_only',\n '_preload_content', '_request_timeout'])\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise ApiTypeError(\n \"Got an unexpected keyword argument '%s' to method submit_form\"\n % key)\n local_var_params[key] = val\n del local_var_params['kwargs']\n collection_formats = {}\n path_params = {}\n query_params = []\n if 'to' in local_var_params and local_var_params['to'] is not None:\n query_params.append(('_to', local_var_params['to']))\n if 'subject' in local_var_params and local_var_params['subject'\n ] is not None:\n query_params.append(('_subject', local_var_params['subject']))\n if 'redirect_to' in local_var_params and local_var_params['redirect_to'\n ] is not None:\n query_params.append(('_redirectTo', local_var_params[\n 'redirect_to']))\n if 'email_address' in local_var_params and local_var_params[\n 'email_address'] is not None:\n query_params.append(('_emailAddress', local_var_params[\n 'email_address']))\n if 'success_message' in local_var_params and local_var_params[\n 'success_message'] is not None:\n query_params.append(('_successMessage', local_var_params[\n 'success_message']))\n if 'spam_check' in local_var_params and local_var_params['spam_check'\n ] is not None:\n query_params.append(('_spamCheck', local_var_params['spam_check']))\n if 'other_parameters' in local_var_params and local_var_params[\n 'other_parameters'] is not None:\n query_params.append(('otherParameters', local_var_params[\n 'other_parameters']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n auth_settings = ['API_KEY']\n return self.api_client.call_api('/forms', 'POST', path_params,\n query_params, header_params, body=body_params, post_params=\n form_params, files=local_var_files, response_type='str',\n auth_settings=auth_settings, async_req=local_var_params.get(\n 'async_req'), _return_http_data_only=local_var_params.get(\n '_return_http_data_only'), _preload_content=local_var_params.\n get('_preload_content', True), _request_timeout=\n local_var_params.get('_request_timeout'), collection_formats=\n collection_formats)\n",
"step-4": "<mask token>\n\n\nclass FormControllerApi(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def submit_form(self, **kwargs):\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"test@example.com\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/forms?_to=test@example.com\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: str\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n return self.submit_form_with_http_info(**kwargs)\n\n def submit_form_with_http_info(self, **kwargs):\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"test@example.com\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/forms?_to=test@example.com\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(str, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n local_var_params = locals()\n all_params = ['to', 'subject', 'redirect_to', 'email_address',\n 'success_message', 'spam_check', 'other_parameters']\n all_params.extend(['async_req', '_return_http_data_only',\n '_preload_content', '_request_timeout'])\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise ApiTypeError(\n \"Got an unexpected keyword argument '%s' to method submit_form\"\n % key)\n local_var_params[key] = val\n del local_var_params['kwargs']\n collection_formats = {}\n path_params = {}\n query_params = []\n if 'to' in local_var_params and local_var_params['to'] is not None:\n query_params.append(('_to', local_var_params['to']))\n if 'subject' in local_var_params and local_var_params['subject'\n ] is not None:\n query_params.append(('_subject', local_var_params['subject']))\n if 'redirect_to' in local_var_params and local_var_params['redirect_to'\n ] is not None:\n query_params.append(('_redirectTo', local_var_params[\n 'redirect_to']))\n if 'email_address' in local_var_params and local_var_params[\n 'email_address'] is not None:\n query_params.append(('_emailAddress', local_var_params[\n 'email_address']))\n if 'success_message' in local_var_params and local_var_params[\n 'success_message'] is not None:\n query_params.append(('_successMessage', local_var_params[\n 'success_message']))\n if 'spam_check' in local_var_params and local_var_params['spam_check'\n ] is not None:\n query_params.append(('_spamCheck', local_var_params['spam_check']))\n if 'other_parameters' in local_var_params and local_var_params[\n 'other_parameters'] is not None:\n query_params.append(('otherParameters', local_var_params[\n 'other_parameters']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n auth_settings = ['API_KEY']\n return self.api_client.call_api('/forms', 'POST', path_params,\n query_params, header_params, body=body_params, post_params=\n form_params, files=local_var_files, response_type='str',\n auth_settings=auth_settings, async_req=local_var_params.get(\n 'async_req'), _return_http_data_only=local_var_params.get(\n '_return_http_data_only'), _preload_content=local_var_params.\n get('_preload_content', True), _request_timeout=\n local_var_params.get('_request_timeout'), collection_formats=\n collection_formats)\n",
"step-5": "# coding: utf-8\n\n\"\"\"\n MailSlurp API\n\n MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://docs.mailslurp.com/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501\n\n The version of the OpenAPI document: 6.5.2\n Contact: contact@mailslurp.dev\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nfrom __future__ import absolute_import\n\nimport re # noqa: F401\n\n# python 2 and python 3 compatibility library\nimport six\n\nfrom mailslurp_client.api_client import ApiClient\nfrom mailslurp_client.exceptions import ( # noqa: F401\n ApiTypeError,\n ApiValueError\n)\n\n\nclass FormControllerApi(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def submit_form(self, **kwargs): # noqa: E501\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\\\"https://python.api.mailslurp.com/forms\\\" method=\\\"post\\\" > <input name=\\\"_to\\\" type=\\\"hidden\\\" value=\\\"test@example.com\\\"/> <textarea name=\\\"feedback\\\"></textarea> <button type=\\\"submit\\\">Submit</button> </form> ``` #### URL Example ```html <form action=\\\"https://python.api.mailslurp.com/forms?_to=test@example.com\\\" method=\\\"post\\\" > <textarea name=\\\"feedback\\\"></textarea> <button type=\\\"submit\\\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\\\"multipart/form-data\\\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: str\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n return self.submit_form_with_http_info(**kwargs) # noqa: E501\n\n def submit_form_with_http_info(self, **kwargs): # noqa: E501\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `?_to=test@example.com` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\\\"https://python.api.mailslurp.com/forms\\\" method=\\\"post\\\" > <input name=\\\"_to\\\" type=\\\"hidden\\\" value=\\\"test@example.com\\\"/> <textarea name=\\\"feedback\\\"></textarea> <button type=\\\"submit\\\">Submit</button> </form> ``` #### URL Example ```html <form action=\\\"https://python.api.mailslurp.com/forms?_to=test@example.com\\\" method=\\\"post\\\" > <textarea name=\\\"feedback\\\"></textarea> <button type=\\\"submit\\\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\\\"multipart/form-data\\\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(str, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n local_var_params = locals()\n\n all_params = [\n 'to',\n 'subject',\n 'redirect_to',\n 'email_address',\n 'success_message',\n 'spam_check',\n 'other_parameters'\n ]\n all_params.extend(\n [\n 'async_req',\n '_return_http_data_only',\n '_preload_content',\n '_request_timeout'\n ]\n )\n\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise ApiTypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method submit_form\" % key\n )\n local_var_params[key] = val\n del local_var_params['kwargs']\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n if 'to' in local_var_params and local_var_params['to'] is not None: # noqa: E501\n query_params.append(('_to', local_var_params['to'])) # noqa: E501\n if 'subject' in local_var_params and local_var_params['subject'] is not None: # noqa: E501\n query_params.append(('_subject', local_var_params['subject'])) # noqa: E501\n if 'redirect_to' in local_var_params and local_var_params['redirect_to'] is not None: # noqa: E501\n query_params.append(('_redirectTo', local_var_params['redirect_to'])) # noqa: E501\n if 'email_address' in local_var_params and local_var_params['email_address'] is not None: # noqa: E501\n query_params.append(('_emailAddress', local_var_params['email_address'])) # noqa: E501\n if 'success_message' in local_var_params and local_var_params['success_message'] is not None: # noqa: E501\n query_params.append(('_successMessage', local_var_params['success_message'])) # noqa: E501\n if 'spam_check' in local_var_params and local_var_params['spam_check'] is not None: # noqa: E501\n query_params.append(('_spamCheck', local_var_params['spam_check'])) # noqa: E501\n if 'other_parameters' in local_var_params and local_var_params['other_parameters'] is not None: # noqa: E501\n query_params.append(('otherParameters', local_var_params['other_parameters'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['*/*']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['API_KEY'] # noqa: E501\n\n return self.api_client.call_api(\n '/forms', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='str', # noqa: E501\n auth_settings=auth_settings,\n async_req=local_var_params.get('async_req'),\n _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501\n _preload_content=local_var_params.get('_preload_content', True),\n _request_timeout=local_var_params.get('_request_timeout'),\n collection_formats=collection_formats)\n",
"step-ids": [
2,
3,
4,
5,
7
]
}
|
[
2,
3,
4,
5,
7
] |
<|reserved_special_token_0|>
def main():
args, ipython_args = parser.parse_known_args()
lines = ['from diofant import *', 'init_printing()',
"a, b, c, d, t, x, y, z = symbols('a:d t x:z')",
"k, m, n = symbols('k m n', integer=True)",
"f, g, h = symbols('f g h', cls=Function)",
'init_printing(pretty_print=True, use_unicode=True)']
try:
import IPython
import traitlets
except ImportError:
args.no_ipython = True
if not args.no_ipython:
config = traitlets.config.loader.Config()
shell = config.InteractiveShell
ast_transformers = shell.ast_transformers
if not args.no_wrap_division:
ast_transformers.append(IntegerDivisionWrapper())
shell.confirm_exit = False
config.TerminalIPythonApp.display_banner = False
config.TerminalInteractiveShell.autoformatter = None
app = IPython.terminal.ipapp.TerminalIPythonApp.instance(config=config)
app.initialize(ipython_args)
shell = app.shell
for l in lines:
shell.run_cell(l, silent=True)
if args.auto_symbols:
shell.run_cell(
'from diofant.interactive.session import AutomaticSymbols')
shell.run_cell('ip = get_ipython()')
shell.run_cell(
'ip.ast_transformers.append(AutomaticSymbols(ip.user_ns))')
shell.run_cell('del ip')
if args.unicode_identifiers:
shell.run_cell(
'from diofant.interactive.session import unicode_identifiers')
shell.run_cell('ip = get_ipython()')
shell.run_cell(
'ip.input_transformers_cleanup.append(unicode_identifiers)')
shell.run_cell('del ip')
app.start()
else:
ast_transformers = []
source_transformers = []
ns = {}
if not args.no_wrap_division:
ast_transformers.append(IntegerDivisionWrapper())
if args.auto_symbols:
ast_transformers.append(AutomaticSymbols(ns))
if args.unicode_identifiers:
source_transformers.append(unicode_identifiers)
class DiofantConsole(code.InteractiveConsole):
"""An interactive console with readline support."""
def __init__(self, ast_transformers=[], source_transformers=[],
**kwargs):
super().__init__(**kwargs)
readline.set_completer(rlcompleter.Completer(ns).complete)
readline.parse_and_bind('tab: complete')
history = os.path.expanduser('~/.python_history')
readline.read_history_file(history)
atexit.register(readline.write_history_file, history)
self.ast_transformers = ast_transformers
self.source_transformers = source_transformers
def runsource(self, source, filename='<input>', symbol='single'):
for t in self.source_transformers:
source = '\n'.join(t(source.splitlines()))
try:
tree = ast.parse(source)
except SyntaxError:
return True
for t in self.ast_transformers:
tree = t.visit(tree)
ast.fix_missing_locations(tree)
source = ast.unparse(tree)
source = source.split('\n')
source = ';'.join(source)
return super().runsource(source, filename=filename, symbol=
symbol)
c = DiofantConsole(ast_transformers=ast_transformers,
source_transformers=source_transformers, locals=ns)
for l in lines:
c.push(l)
c.interact('', '')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
parser.add_argument('--no-wrap-division', help=
"Don't wrap integer divisions with Fraction", action='store_true')
parser.add_argument('-a', '--auto-symbols', help=
"Automatically create missing Symbol's", action='store_true')
parser.add_argument('--no-ipython', help="Don't use IPython", action=
'store_true')
parser.add_argument('--unicode-identifiers', help=
'Allow any unicode identifiers', action='store_true')
def main():
args, ipython_args = parser.parse_known_args()
lines = ['from diofant import *', 'init_printing()',
"a, b, c, d, t, x, y, z = symbols('a:d t x:z')",
"k, m, n = symbols('k m n', integer=True)",
"f, g, h = symbols('f g h', cls=Function)",
'init_printing(pretty_print=True, use_unicode=True)']
try:
import IPython
import traitlets
except ImportError:
args.no_ipython = True
if not args.no_ipython:
config = traitlets.config.loader.Config()
shell = config.InteractiveShell
ast_transformers = shell.ast_transformers
if not args.no_wrap_division:
ast_transformers.append(IntegerDivisionWrapper())
shell.confirm_exit = False
config.TerminalIPythonApp.display_banner = False
config.TerminalInteractiveShell.autoformatter = None
app = IPython.terminal.ipapp.TerminalIPythonApp.instance(config=config)
app.initialize(ipython_args)
shell = app.shell
for l in lines:
shell.run_cell(l, silent=True)
if args.auto_symbols:
shell.run_cell(
'from diofant.interactive.session import AutomaticSymbols')
shell.run_cell('ip = get_ipython()')
shell.run_cell(
'ip.ast_transformers.append(AutomaticSymbols(ip.user_ns))')
shell.run_cell('del ip')
if args.unicode_identifiers:
shell.run_cell(
'from diofant.interactive.session import unicode_identifiers')
shell.run_cell('ip = get_ipython()')
shell.run_cell(
'ip.input_transformers_cleanup.append(unicode_identifiers)')
shell.run_cell('del ip')
app.start()
else:
ast_transformers = []
source_transformers = []
ns = {}
if not args.no_wrap_division:
ast_transformers.append(IntegerDivisionWrapper())
if args.auto_symbols:
ast_transformers.append(AutomaticSymbols(ns))
if args.unicode_identifiers:
source_transformers.append(unicode_identifiers)
class DiofantConsole(code.InteractiveConsole):
"""An interactive console with readline support."""
def __init__(self, ast_transformers=[], source_transformers=[],
**kwargs):
super().__init__(**kwargs)
readline.set_completer(rlcompleter.Completer(ns).complete)
readline.parse_and_bind('tab: complete')
history = os.path.expanduser('~/.python_history')
readline.read_history_file(history)
atexit.register(readline.write_history_file, history)
self.ast_transformers = ast_transformers
self.source_transformers = source_transformers
def runsource(self, source, filename='<input>', symbol='single'):
for t in self.source_transformers:
source = '\n'.join(t(source.splitlines()))
try:
tree = ast.parse(source)
except SyntaxError:
return True
for t in self.ast_transformers:
tree = t.visit(tree)
ast.fix_missing_locations(tree)
source = ast.unparse(tree)
source = source.split('\n')
source = ';'.join(source)
return super().runsource(source, filename=filename, symbol=
symbol)
c = DiofantConsole(ast_transformers=ast_transformers,
source_transformers=source_transformers, locals=ns)
for l in lines:
c.push(l)
c.interact('', '')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__all__ = ()
parser = argparse.ArgumentParser(description=__doc__, prog='python -m diofant')
parser.add_argument('--no-wrap-division', help=
"Don't wrap integer divisions with Fraction", action='store_true')
parser.add_argument('-a', '--auto-symbols', help=
"Automatically create missing Symbol's", action='store_true')
parser.add_argument('--no-ipython', help="Don't use IPython", action=
'store_true')
parser.add_argument('--unicode-identifiers', help=
'Allow any unicode identifiers', action='store_true')
def main():
args, ipython_args = parser.parse_known_args()
lines = ['from diofant import *', 'init_printing()',
"a, b, c, d, t, x, y, z = symbols('a:d t x:z')",
"k, m, n = symbols('k m n', integer=True)",
"f, g, h = symbols('f g h', cls=Function)",
'init_printing(pretty_print=True, use_unicode=True)']
try:
import IPython
import traitlets
except ImportError:
args.no_ipython = True
if not args.no_ipython:
config = traitlets.config.loader.Config()
shell = config.InteractiveShell
ast_transformers = shell.ast_transformers
if not args.no_wrap_division:
ast_transformers.append(IntegerDivisionWrapper())
shell.confirm_exit = False
config.TerminalIPythonApp.display_banner = False
config.TerminalInteractiveShell.autoformatter = None
app = IPython.terminal.ipapp.TerminalIPythonApp.instance(config=config)
app.initialize(ipython_args)
shell = app.shell
for l in lines:
shell.run_cell(l, silent=True)
if args.auto_symbols:
shell.run_cell(
'from diofant.interactive.session import AutomaticSymbols')
shell.run_cell('ip = get_ipython()')
shell.run_cell(
'ip.ast_transformers.append(AutomaticSymbols(ip.user_ns))')
shell.run_cell('del ip')
if args.unicode_identifiers:
shell.run_cell(
'from diofant.interactive.session import unicode_identifiers')
shell.run_cell('ip = get_ipython()')
shell.run_cell(
'ip.input_transformers_cleanup.append(unicode_identifiers)')
shell.run_cell('del ip')
app.start()
else:
ast_transformers = []
source_transformers = []
ns = {}
if not args.no_wrap_division:
ast_transformers.append(IntegerDivisionWrapper())
if args.auto_symbols:
ast_transformers.append(AutomaticSymbols(ns))
if args.unicode_identifiers:
source_transformers.append(unicode_identifiers)
class DiofantConsole(code.InteractiveConsole):
"""An interactive console with readline support."""
def __init__(self, ast_transformers=[], source_transformers=[],
**kwargs):
super().__init__(**kwargs)
readline.set_completer(rlcompleter.Completer(ns).complete)
readline.parse_and_bind('tab: complete')
history = os.path.expanduser('~/.python_history')
readline.read_history_file(history)
atexit.register(readline.write_history_file, history)
self.ast_transformers = ast_transformers
self.source_transformers = source_transformers
def runsource(self, source, filename='<input>', symbol='single'):
for t in self.source_transformers:
source = '\n'.join(t(source.splitlines()))
try:
tree = ast.parse(source)
except SyntaxError:
return True
for t in self.ast_transformers:
tree = t.visit(tree)
ast.fix_missing_locations(tree)
source = ast.unparse(tree)
source = source.split('\n')
source = ';'.join(source)
return super().runsource(source, filename=filename, symbol=
symbol)
c = DiofantConsole(ast_transformers=ast_transformers,
source_transformers=source_transformers, locals=ns)
for l in lines:
c.push(l)
c.interact('', '')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import argparse
import ast
import atexit
import code
import os
import readline
import rlcompleter
from diofant.interactive.session import AutomaticSymbols, IntegerDivisionWrapper, unicode_identifiers
__all__ = ()
parser = argparse.ArgumentParser(description=__doc__, prog='python -m diofant')
parser.add_argument('--no-wrap-division', help=
"Don't wrap integer divisions with Fraction", action='store_true')
parser.add_argument('-a', '--auto-symbols', help=
"Automatically create missing Symbol's", action='store_true')
parser.add_argument('--no-ipython', help="Don't use IPython", action=
'store_true')
parser.add_argument('--unicode-identifiers', help=
'Allow any unicode identifiers', action='store_true')
def main():
args, ipython_args = parser.parse_known_args()
lines = ['from diofant import *', 'init_printing()',
"a, b, c, d, t, x, y, z = symbols('a:d t x:z')",
"k, m, n = symbols('k m n', integer=True)",
"f, g, h = symbols('f g h', cls=Function)",
'init_printing(pretty_print=True, use_unicode=True)']
try:
import IPython
import traitlets
except ImportError:
args.no_ipython = True
if not args.no_ipython:
config = traitlets.config.loader.Config()
shell = config.InteractiveShell
ast_transformers = shell.ast_transformers
if not args.no_wrap_division:
ast_transformers.append(IntegerDivisionWrapper())
shell.confirm_exit = False
config.TerminalIPythonApp.display_banner = False
config.TerminalInteractiveShell.autoformatter = None
app = IPython.terminal.ipapp.TerminalIPythonApp.instance(config=config)
app.initialize(ipython_args)
shell = app.shell
for l in lines:
shell.run_cell(l, silent=True)
if args.auto_symbols:
shell.run_cell(
'from diofant.interactive.session import AutomaticSymbols')
shell.run_cell('ip = get_ipython()')
shell.run_cell(
'ip.ast_transformers.append(AutomaticSymbols(ip.user_ns))')
shell.run_cell('del ip')
if args.unicode_identifiers:
shell.run_cell(
'from diofant.interactive.session import unicode_identifiers')
shell.run_cell('ip = get_ipython()')
shell.run_cell(
'ip.input_transformers_cleanup.append(unicode_identifiers)')
shell.run_cell('del ip')
app.start()
else:
ast_transformers = []
source_transformers = []
ns = {}
if not args.no_wrap_division:
ast_transformers.append(IntegerDivisionWrapper())
if args.auto_symbols:
ast_transformers.append(AutomaticSymbols(ns))
if args.unicode_identifiers:
source_transformers.append(unicode_identifiers)
class DiofantConsole(code.InteractiveConsole):
"""An interactive console with readline support."""
def __init__(self, ast_transformers=[], source_transformers=[],
**kwargs):
super().__init__(**kwargs)
readline.set_completer(rlcompleter.Completer(ns).complete)
readline.parse_and_bind('tab: complete')
history = os.path.expanduser('~/.python_history')
readline.read_history_file(history)
atexit.register(readline.write_history_file, history)
self.ast_transformers = ast_transformers
self.source_transformers = source_transformers
def runsource(self, source, filename='<input>', symbol='single'):
for t in self.source_transformers:
source = '\n'.join(t(source.splitlines()))
try:
tree = ast.parse(source)
except SyntaxError:
return True
for t in self.ast_transformers:
tree = t.visit(tree)
ast.fix_missing_locations(tree)
source = ast.unparse(tree)
source = source.split('\n')
source = ';'.join(source)
return super().runsource(source, filename=filename, symbol=
symbol)
c = DiofantConsole(ast_transformers=ast_transformers,
source_transformers=source_transformers, locals=ns)
for l in lines:
c.push(l)
c.interact('', '')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
"""
Python shell for Diofant.
This is just a normal Python shell (IPython shell if you have the
IPython package installed), that adds default imports and run
some initialization code.
"""
import argparse
import ast
import atexit
import code
import os
import readline
import rlcompleter
from diofant.interactive.session import (AutomaticSymbols,
IntegerDivisionWrapper,
unicode_identifiers)
__all__ = ()
parser = argparse.ArgumentParser(description=__doc__,
prog='python -m diofant')
parser.add_argument('--no-wrap-division',
help="Don't wrap integer divisions with Fraction",
action='store_true')
parser.add_argument('-a', '--auto-symbols',
help="Automatically create missing Symbol's",
action='store_true')
parser.add_argument('--no-ipython', help="Don't use IPython",
action='store_true')
parser.add_argument('--unicode-identifiers',
help='Allow any unicode identifiers',
action='store_true')
def main():
args, ipython_args = parser.parse_known_args()
lines = ['from diofant import *',
'init_printing()',
"a, b, c, d, t, x, y, z = symbols('a:d t x:z')",
"k, m, n = symbols('k m n', integer=True)",
"f, g, h = symbols('f g h', cls=Function)",
'init_printing(pretty_print=True, use_unicode=True)']
try:
import IPython
import traitlets
except ImportError:
args.no_ipython = True
if not args.no_ipython:
config = traitlets.config.loader.Config()
shell = config.InteractiveShell
ast_transformers = shell.ast_transformers
if not args.no_wrap_division:
ast_transformers.append(IntegerDivisionWrapper())
shell.confirm_exit = False
config.TerminalIPythonApp.display_banner = False
config.TerminalInteractiveShell.autoformatter = None
app = IPython.terminal.ipapp.TerminalIPythonApp.instance(config=config)
app.initialize(ipython_args)
shell = app.shell
for l in lines:
shell.run_cell(l, silent=True)
if args.auto_symbols:
shell.run_cell('from diofant.interactive.session import AutomaticSymbols')
shell.run_cell('ip = get_ipython()')
shell.run_cell('ip.ast_transformers.append(AutomaticSymbols(ip.user_ns))')
shell.run_cell('del ip')
if args.unicode_identifiers:
shell.run_cell('from diofant.interactive.session import unicode_identifiers')
shell.run_cell('ip = get_ipython()')
shell.run_cell('ip.input_transformers_cleanup.append(unicode_identifiers)')
shell.run_cell('del ip')
app.start()
else:
ast_transformers = []
source_transformers = []
ns = {}
if not args.no_wrap_division:
ast_transformers.append(IntegerDivisionWrapper())
if args.auto_symbols:
ast_transformers.append(AutomaticSymbols(ns))
if args.unicode_identifiers:
source_transformers.append(unicode_identifiers)
class DiofantConsole(code.InteractiveConsole):
"""An interactive console with readline support."""
def __init__(self, ast_transformers=[],
source_transformers=[], **kwargs):
super().__init__(**kwargs)
readline.set_completer(rlcompleter.Completer(ns).complete)
readline.parse_and_bind('tab: complete')
history = os.path.expanduser('~/.python_history')
readline.read_history_file(history)
atexit.register(readline.write_history_file, history)
self.ast_transformers = ast_transformers
self.source_transformers = source_transformers
def runsource(self, source, filename='<input>', symbol='single'):
for t in self.source_transformers:
source = '\n'.join(t(source.splitlines()))
try:
tree = ast.parse(source)
except SyntaxError:
return True
for t in self.ast_transformers:
tree = t.visit(tree)
ast.fix_missing_locations(tree)
source = ast.unparse(tree)
source = source.split('\n')
source = ';'.join(source)
return super().runsource(source, filename=filename, symbol=symbol)
c = DiofantConsole(ast_transformers=ast_transformers,
source_transformers=source_transformers, locals=ns)
for l in lines:
c.push(l)
c.interact('', '')
if __name__ == '__main__': # pragma: no branch
main()
|
flexible
|
{
"blob_id": "80e395715d3ae216beb17e7caed1d8d03c5c56de",
"index": 9943,
"step-1": "<mask token>\n\n\ndef main():\n args, ipython_args = parser.parse_known_args()\n lines = ['from diofant import *', 'init_printing()',\n \"a, b, c, d, t, x, y, z = symbols('a:d t x:z')\",\n \"k, m, n = symbols('k m n', integer=True)\",\n \"f, g, h = symbols('f g h', cls=Function)\",\n 'init_printing(pretty_print=True, use_unicode=True)']\n try:\n import IPython\n import traitlets\n except ImportError:\n args.no_ipython = True\n if not args.no_ipython:\n config = traitlets.config.loader.Config()\n shell = config.InteractiveShell\n ast_transformers = shell.ast_transformers\n if not args.no_wrap_division:\n ast_transformers.append(IntegerDivisionWrapper())\n shell.confirm_exit = False\n config.TerminalIPythonApp.display_banner = False\n config.TerminalInteractiveShell.autoformatter = None\n app = IPython.terminal.ipapp.TerminalIPythonApp.instance(config=config)\n app.initialize(ipython_args)\n shell = app.shell\n for l in lines:\n shell.run_cell(l, silent=True)\n if args.auto_symbols:\n shell.run_cell(\n 'from diofant.interactive.session import AutomaticSymbols')\n shell.run_cell('ip = get_ipython()')\n shell.run_cell(\n 'ip.ast_transformers.append(AutomaticSymbols(ip.user_ns))')\n shell.run_cell('del ip')\n if args.unicode_identifiers:\n shell.run_cell(\n 'from diofant.interactive.session import unicode_identifiers')\n shell.run_cell('ip = get_ipython()')\n shell.run_cell(\n 'ip.input_transformers_cleanup.append(unicode_identifiers)')\n shell.run_cell('del ip')\n app.start()\n else:\n ast_transformers = []\n source_transformers = []\n ns = {}\n if not args.no_wrap_division:\n ast_transformers.append(IntegerDivisionWrapper())\n if args.auto_symbols:\n ast_transformers.append(AutomaticSymbols(ns))\n if args.unicode_identifiers:\n source_transformers.append(unicode_identifiers)\n\n\n class DiofantConsole(code.InteractiveConsole):\n \"\"\"An interactive console with readline support.\"\"\"\n\n def __init__(self, ast_transformers=[], source_transformers=[],\n **kwargs):\n super().__init__(**kwargs)\n readline.set_completer(rlcompleter.Completer(ns).complete)\n readline.parse_and_bind('tab: complete')\n history = os.path.expanduser('~/.python_history')\n readline.read_history_file(history)\n atexit.register(readline.write_history_file, history)\n self.ast_transformers = ast_transformers\n self.source_transformers = source_transformers\n\n def runsource(self, source, filename='<input>', symbol='single'):\n for t in self.source_transformers:\n source = '\\n'.join(t(source.splitlines()))\n try:\n tree = ast.parse(source)\n except SyntaxError:\n return True\n for t in self.ast_transformers:\n tree = t.visit(tree)\n ast.fix_missing_locations(tree)\n source = ast.unparse(tree)\n source = source.split('\\n')\n source = ';'.join(source)\n return super().runsource(source, filename=filename, symbol=\n symbol)\n c = DiofantConsole(ast_transformers=ast_transformers,\n source_transformers=source_transformers, locals=ns)\n for l in lines:\n c.push(l)\n c.interact('', '')\n\n\n<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('--no-wrap-division', help=\n \"Don't wrap integer divisions with Fraction\", action='store_true')\nparser.add_argument('-a', '--auto-symbols', help=\n \"Automatically create missing Symbol's\", action='store_true')\nparser.add_argument('--no-ipython', help=\"Don't use IPython\", action=\n 'store_true')\nparser.add_argument('--unicode-identifiers', help=\n 'Allow any unicode identifiers', action='store_true')\n\n\ndef main():\n args, ipython_args = parser.parse_known_args()\n lines = ['from diofant import *', 'init_printing()',\n \"a, b, c, d, t, x, y, z = symbols('a:d t x:z')\",\n \"k, m, n = symbols('k m n', integer=True)\",\n \"f, g, h = symbols('f g h', cls=Function)\",\n 'init_printing(pretty_print=True, use_unicode=True)']\n try:\n import IPython\n import traitlets\n except ImportError:\n args.no_ipython = True\n if not args.no_ipython:\n config = traitlets.config.loader.Config()\n shell = config.InteractiveShell\n ast_transformers = shell.ast_transformers\n if not args.no_wrap_division:\n ast_transformers.append(IntegerDivisionWrapper())\n shell.confirm_exit = False\n config.TerminalIPythonApp.display_banner = False\n config.TerminalInteractiveShell.autoformatter = None\n app = IPython.terminal.ipapp.TerminalIPythonApp.instance(config=config)\n app.initialize(ipython_args)\n shell = app.shell\n for l in lines:\n shell.run_cell(l, silent=True)\n if args.auto_symbols:\n shell.run_cell(\n 'from diofant.interactive.session import AutomaticSymbols')\n shell.run_cell('ip = get_ipython()')\n shell.run_cell(\n 'ip.ast_transformers.append(AutomaticSymbols(ip.user_ns))')\n shell.run_cell('del ip')\n if args.unicode_identifiers:\n shell.run_cell(\n 'from diofant.interactive.session import unicode_identifiers')\n shell.run_cell('ip = get_ipython()')\n shell.run_cell(\n 'ip.input_transformers_cleanup.append(unicode_identifiers)')\n shell.run_cell('del ip')\n app.start()\n else:\n ast_transformers = []\n source_transformers = []\n ns = {}\n if not args.no_wrap_division:\n ast_transformers.append(IntegerDivisionWrapper())\n if args.auto_symbols:\n ast_transformers.append(AutomaticSymbols(ns))\n if args.unicode_identifiers:\n source_transformers.append(unicode_identifiers)\n\n\n class DiofantConsole(code.InteractiveConsole):\n \"\"\"An interactive console with readline support.\"\"\"\n\n def __init__(self, ast_transformers=[], source_transformers=[],\n **kwargs):\n super().__init__(**kwargs)\n readline.set_completer(rlcompleter.Completer(ns).complete)\n readline.parse_and_bind('tab: complete')\n history = os.path.expanduser('~/.python_history')\n readline.read_history_file(history)\n atexit.register(readline.write_history_file, history)\n self.ast_transformers = ast_transformers\n self.source_transformers = source_transformers\n\n def runsource(self, source, filename='<input>', symbol='single'):\n for t in self.source_transformers:\n source = '\\n'.join(t(source.splitlines()))\n try:\n tree = ast.parse(source)\n except SyntaxError:\n return True\n for t in self.ast_transformers:\n tree = t.visit(tree)\n ast.fix_missing_locations(tree)\n source = ast.unparse(tree)\n source = source.split('\\n')\n source = ';'.join(source)\n return super().runsource(source, filename=filename, symbol=\n symbol)\n c = DiofantConsole(ast_transformers=ast_transformers,\n source_transformers=source_transformers, locals=ns)\n for l in lines:\n c.push(l)\n c.interact('', '')\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\n__all__ = ()\nparser = argparse.ArgumentParser(description=__doc__, prog='python -m diofant')\nparser.add_argument('--no-wrap-division', help=\n \"Don't wrap integer divisions with Fraction\", action='store_true')\nparser.add_argument('-a', '--auto-symbols', help=\n \"Automatically create missing Symbol's\", action='store_true')\nparser.add_argument('--no-ipython', help=\"Don't use IPython\", action=\n 'store_true')\nparser.add_argument('--unicode-identifiers', help=\n 'Allow any unicode identifiers', action='store_true')\n\n\ndef main():\n args, ipython_args = parser.parse_known_args()\n lines = ['from diofant import *', 'init_printing()',\n \"a, b, c, d, t, x, y, z = symbols('a:d t x:z')\",\n \"k, m, n = symbols('k m n', integer=True)\",\n \"f, g, h = symbols('f g h', cls=Function)\",\n 'init_printing(pretty_print=True, use_unicode=True)']\n try:\n import IPython\n import traitlets\n except ImportError:\n args.no_ipython = True\n if not args.no_ipython:\n config = traitlets.config.loader.Config()\n shell = config.InteractiveShell\n ast_transformers = shell.ast_transformers\n if not args.no_wrap_division:\n ast_transformers.append(IntegerDivisionWrapper())\n shell.confirm_exit = False\n config.TerminalIPythonApp.display_banner = False\n config.TerminalInteractiveShell.autoformatter = None\n app = IPython.terminal.ipapp.TerminalIPythonApp.instance(config=config)\n app.initialize(ipython_args)\n shell = app.shell\n for l in lines:\n shell.run_cell(l, silent=True)\n if args.auto_symbols:\n shell.run_cell(\n 'from diofant.interactive.session import AutomaticSymbols')\n shell.run_cell('ip = get_ipython()')\n shell.run_cell(\n 'ip.ast_transformers.append(AutomaticSymbols(ip.user_ns))')\n shell.run_cell('del ip')\n if args.unicode_identifiers:\n shell.run_cell(\n 'from diofant.interactive.session import unicode_identifiers')\n shell.run_cell('ip = get_ipython()')\n shell.run_cell(\n 'ip.input_transformers_cleanup.append(unicode_identifiers)')\n shell.run_cell('del ip')\n app.start()\n else:\n ast_transformers = []\n source_transformers = []\n ns = {}\n if not args.no_wrap_division:\n ast_transformers.append(IntegerDivisionWrapper())\n if args.auto_symbols:\n ast_transformers.append(AutomaticSymbols(ns))\n if args.unicode_identifiers:\n source_transformers.append(unicode_identifiers)\n\n\n class DiofantConsole(code.InteractiveConsole):\n \"\"\"An interactive console with readline support.\"\"\"\n\n def __init__(self, ast_transformers=[], source_transformers=[],\n **kwargs):\n super().__init__(**kwargs)\n readline.set_completer(rlcompleter.Completer(ns).complete)\n readline.parse_and_bind('tab: complete')\n history = os.path.expanduser('~/.python_history')\n readline.read_history_file(history)\n atexit.register(readline.write_history_file, history)\n self.ast_transformers = ast_transformers\n self.source_transformers = source_transformers\n\n def runsource(self, source, filename='<input>', symbol='single'):\n for t in self.source_transformers:\n source = '\\n'.join(t(source.splitlines()))\n try:\n tree = ast.parse(source)\n except SyntaxError:\n return True\n for t in self.ast_transformers:\n tree = t.visit(tree)\n ast.fix_missing_locations(tree)\n source = ast.unparse(tree)\n source = source.split('\\n')\n source = ';'.join(source)\n return super().runsource(source, filename=filename, symbol=\n symbol)\n c = DiofantConsole(ast_transformers=ast_transformers,\n source_transformers=source_transformers, locals=ns)\n for l in lines:\n c.push(l)\n c.interact('', '')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport argparse\nimport ast\nimport atexit\nimport code\nimport os\nimport readline\nimport rlcompleter\nfrom diofant.interactive.session import AutomaticSymbols, IntegerDivisionWrapper, unicode_identifiers\n__all__ = ()\nparser = argparse.ArgumentParser(description=__doc__, prog='python -m diofant')\nparser.add_argument('--no-wrap-division', help=\n \"Don't wrap integer divisions with Fraction\", action='store_true')\nparser.add_argument('-a', '--auto-symbols', help=\n \"Automatically create missing Symbol's\", action='store_true')\nparser.add_argument('--no-ipython', help=\"Don't use IPython\", action=\n 'store_true')\nparser.add_argument('--unicode-identifiers', help=\n 'Allow any unicode identifiers', action='store_true')\n\n\ndef main():\n args, ipython_args = parser.parse_known_args()\n lines = ['from diofant import *', 'init_printing()',\n \"a, b, c, d, t, x, y, z = symbols('a:d t x:z')\",\n \"k, m, n = symbols('k m n', integer=True)\",\n \"f, g, h = symbols('f g h', cls=Function)\",\n 'init_printing(pretty_print=True, use_unicode=True)']\n try:\n import IPython\n import traitlets\n except ImportError:\n args.no_ipython = True\n if not args.no_ipython:\n config = traitlets.config.loader.Config()\n shell = config.InteractiveShell\n ast_transformers = shell.ast_transformers\n if not args.no_wrap_division:\n ast_transformers.append(IntegerDivisionWrapper())\n shell.confirm_exit = False\n config.TerminalIPythonApp.display_banner = False\n config.TerminalInteractiveShell.autoformatter = None\n app = IPython.terminal.ipapp.TerminalIPythonApp.instance(config=config)\n app.initialize(ipython_args)\n shell = app.shell\n for l in lines:\n shell.run_cell(l, silent=True)\n if args.auto_symbols:\n shell.run_cell(\n 'from diofant.interactive.session import AutomaticSymbols')\n shell.run_cell('ip = get_ipython()')\n shell.run_cell(\n 'ip.ast_transformers.append(AutomaticSymbols(ip.user_ns))')\n shell.run_cell('del ip')\n if args.unicode_identifiers:\n shell.run_cell(\n 'from diofant.interactive.session import unicode_identifiers')\n shell.run_cell('ip = get_ipython()')\n shell.run_cell(\n 'ip.input_transformers_cleanup.append(unicode_identifiers)')\n shell.run_cell('del ip')\n app.start()\n else:\n ast_transformers = []\n source_transformers = []\n ns = {}\n if not args.no_wrap_division:\n ast_transformers.append(IntegerDivisionWrapper())\n if args.auto_symbols:\n ast_transformers.append(AutomaticSymbols(ns))\n if args.unicode_identifiers:\n source_transformers.append(unicode_identifiers)\n\n\n class DiofantConsole(code.InteractiveConsole):\n \"\"\"An interactive console with readline support.\"\"\"\n\n def __init__(self, ast_transformers=[], source_transformers=[],\n **kwargs):\n super().__init__(**kwargs)\n readline.set_completer(rlcompleter.Completer(ns).complete)\n readline.parse_and_bind('tab: complete')\n history = os.path.expanduser('~/.python_history')\n readline.read_history_file(history)\n atexit.register(readline.write_history_file, history)\n self.ast_transformers = ast_transformers\n self.source_transformers = source_transformers\n\n def runsource(self, source, filename='<input>', symbol='single'):\n for t in self.source_transformers:\n source = '\\n'.join(t(source.splitlines()))\n try:\n tree = ast.parse(source)\n except SyntaxError:\n return True\n for t in self.ast_transformers:\n tree = t.visit(tree)\n ast.fix_missing_locations(tree)\n source = ast.unparse(tree)\n source = source.split('\\n')\n source = ';'.join(source)\n return super().runsource(source, filename=filename, symbol=\n symbol)\n c = DiofantConsole(ast_transformers=ast_transformers,\n source_transformers=source_transformers, locals=ns)\n for l in lines:\n c.push(l)\n c.interact('', '')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"\nPython shell for Diofant.\n\nThis is just a normal Python shell (IPython shell if you have the\nIPython package installed), that adds default imports and run\nsome initialization code.\n\"\"\"\n\nimport argparse\nimport ast\nimport atexit\nimport code\nimport os\nimport readline\nimport rlcompleter\n\nfrom diofant.interactive.session import (AutomaticSymbols,\n IntegerDivisionWrapper,\n unicode_identifiers)\n\n\n__all__ = ()\n\n\nparser = argparse.ArgumentParser(description=__doc__,\n prog='python -m diofant')\nparser.add_argument('--no-wrap-division',\n help=\"Don't wrap integer divisions with Fraction\",\n action='store_true')\nparser.add_argument('-a', '--auto-symbols',\n help=\"Automatically create missing Symbol's\",\n action='store_true')\nparser.add_argument('--no-ipython', help=\"Don't use IPython\",\n action='store_true')\nparser.add_argument('--unicode-identifiers',\n help='Allow any unicode identifiers',\n action='store_true')\n\n\ndef main():\n args, ipython_args = parser.parse_known_args()\n\n lines = ['from diofant import *',\n 'init_printing()',\n \"a, b, c, d, t, x, y, z = symbols('a:d t x:z')\",\n \"k, m, n = symbols('k m n', integer=True)\",\n \"f, g, h = symbols('f g h', cls=Function)\",\n 'init_printing(pretty_print=True, use_unicode=True)']\n\n try:\n import IPython\n import traitlets\n except ImportError:\n args.no_ipython = True\n\n if not args.no_ipython:\n config = traitlets.config.loader.Config()\n shell = config.InteractiveShell\n ast_transformers = shell.ast_transformers\n if not args.no_wrap_division:\n ast_transformers.append(IntegerDivisionWrapper())\n shell.confirm_exit = False\n config.TerminalIPythonApp.display_banner = False\n config.TerminalInteractiveShell.autoformatter = None\n\n app = IPython.terminal.ipapp.TerminalIPythonApp.instance(config=config)\n app.initialize(ipython_args)\n shell = app.shell\n for l in lines:\n shell.run_cell(l, silent=True)\n if args.auto_symbols:\n shell.run_cell('from diofant.interactive.session import AutomaticSymbols')\n shell.run_cell('ip = get_ipython()')\n shell.run_cell('ip.ast_transformers.append(AutomaticSymbols(ip.user_ns))')\n shell.run_cell('del ip')\n if args.unicode_identifiers:\n shell.run_cell('from diofant.interactive.session import unicode_identifiers')\n shell.run_cell('ip = get_ipython()')\n shell.run_cell('ip.input_transformers_cleanup.append(unicode_identifiers)')\n shell.run_cell('del ip')\n app.start()\n else:\n ast_transformers = []\n source_transformers = []\n ns = {}\n\n if not args.no_wrap_division:\n ast_transformers.append(IntegerDivisionWrapper())\n if args.auto_symbols:\n ast_transformers.append(AutomaticSymbols(ns))\n if args.unicode_identifiers:\n source_transformers.append(unicode_identifiers)\n\n class DiofantConsole(code.InteractiveConsole):\n \"\"\"An interactive console with readline support.\"\"\"\n\n def __init__(self, ast_transformers=[],\n source_transformers=[], **kwargs):\n super().__init__(**kwargs)\n\n readline.set_completer(rlcompleter.Completer(ns).complete)\n readline.parse_and_bind('tab: complete')\n\n history = os.path.expanduser('~/.python_history')\n readline.read_history_file(history)\n atexit.register(readline.write_history_file, history)\n self.ast_transformers = ast_transformers\n self.source_transformers = source_transformers\n\n def runsource(self, source, filename='<input>', symbol='single'):\n for t in self.source_transformers:\n source = '\\n'.join(t(source.splitlines()))\n\n try:\n tree = ast.parse(source)\n except SyntaxError:\n return True\n\n for t in self.ast_transformers:\n tree = t.visit(tree)\n ast.fix_missing_locations(tree)\n\n source = ast.unparse(tree)\n source = source.split('\\n')\n source = ';'.join(source)\n return super().runsource(source, filename=filename, symbol=symbol)\n\n c = DiofantConsole(ast_transformers=ast_transformers,\n source_transformers=source_transformers, locals=ns)\n\n for l in lines:\n c.push(l)\n c.interact('', '')\n\n\nif __name__ == '__main__': # pragma: no branch\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
@handler_define
class HelloWorld(BaseHandler):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@handler_define
class HelloWorld(BaseHandler):
@api_define('HelloWorld', '/', [], description='HelloWorld')
def get(self):
self.write({'status': 'HelloWorld'})
<|reserved_special_token_1|>
__author__ = 'ldd'
<|reserved_special_token_0|>
@handler_define
class HelloWorld(BaseHandler):
@api_define('HelloWorld', '/', [], description='HelloWorld')
def get(self):
self.write({'status': 'HelloWorld'})
<|reserved_special_token_1|>
__author__ = 'ldd'
from view.api_doc import handler_define, api_define, Param
from view.base import BaseHandler, CachedPlusHandler
@handler_define
class HelloWorld(BaseHandler):
@api_define('HelloWorld', '/', [], description='HelloWorld')
def get(self):
self.write({'status': 'HelloWorld'})
<|reserved_special_token_1|>
__author__ = 'ldd'
# -*- coding: utf-8 -*-
from view.api_doc import handler_define, api_define, Param
from view.base import BaseHandler,CachedPlusHandler
@handler_define
class HelloWorld(BaseHandler):
@api_define("HelloWorld", r'/', [
], description="HelloWorld")
def get(self):
self.write({'status':"HelloWorld"})
|
flexible
|
{
"blob_id": "3c738a07d71338ab838e4f1d683e631252d50a30",
"index": 4085,
"step-1": "<mask token>\n\n\n@handler_define\nclass HelloWorld(BaseHandler):\n <mask token>\n",
"step-2": "<mask token>\n\n\n@handler_define\nclass HelloWorld(BaseHandler):\n\n @api_define('HelloWorld', '/', [], description='HelloWorld')\n def get(self):\n self.write({'status': 'HelloWorld'})\n",
"step-3": "__author__ = 'ldd'\n<mask token>\n\n\n@handler_define\nclass HelloWorld(BaseHandler):\n\n @api_define('HelloWorld', '/', [], description='HelloWorld')\n def get(self):\n self.write({'status': 'HelloWorld'})\n",
"step-4": "__author__ = 'ldd'\nfrom view.api_doc import handler_define, api_define, Param\nfrom view.base import BaseHandler, CachedPlusHandler\n\n\n@handler_define\nclass HelloWorld(BaseHandler):\n\n @api_define('HelloWorld', '/', [], description='HelloWorld')\n def get(self):\n self.write({'status': 'HelloWorld'})\n",
"step-5": "__author__ = 'ldd'\n# -*- coding: utf-8 -*-\n\nfrom view.api_doc import handler_define, api_define, Param\nfrom view.base import BaseHandler,CachedPlusHandler\n\n@handler_define\nclass HelloWorld(BaseHandler):\n @api_define(\"HelloWorld\", r'/', [\n ], description=\"HelloWorld\")\n def get(self):\n self.write({'status':\"HelloWorld\"})",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import csv
from functools import reduce
class Csvread:
def __init__(self, fpath):
self._path=fpath
with open (fpath) as file:
read_f=csv.reader(file)
print(read_f) #<_csv.reader object at 0x000002A53144DF40>
self._sheet = list(read_f)[1:] #utworzenie listy
def get_sheet(self):
return self._sheet
class Csvcalc:
def __init__(self, cont):
self._cont=cont
def row_count(self):
return len(self._cont)
def get_row (self, row_no):
return self._cont[row_no]
def col_count (self):
return len(self._cont[1])
def get_colum (self,no_col):
return list (x[no_col] for x in self._cont)
def sum_col (self,col_no):
return reduce(lambda x, y: x+y, self.get_colum(col_no))
def mul_col(self, col_no):
return sum(lambda x,y: x*y, self.get_colum(col_no))
csv1= Csvread('./data.csv')
print(csv1) #<__main__.Csvread object at 0x000002A5312B4040>
|
normal
|
{
"blob_id": "67793c8851e7107c6566da4e0ca5d5ffcf6341ad",
"index": 8867,
"step-1": "<mask token>\n\n\nclass Csvcalc:\n\n def __init__(self, cont):\n self._cont = cont\n\n def row_count(self):\n return len(self._cont)\n\n def get_row(self, row_no):\n return self._cont[row_no]\n\n def col_count(self):\n return len(self._cont[1])\n\n def get_colum(self, no_col):\n return list(x[no_col] for x in self._cont)\n <mask token>\n\n def mul_col(self, col_no):\n return sum(lambda x, y: x * y, self.get_colum(col_no))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Csvread:\n <mask token>\n\n def get_sheet(self):\n return self._sheet\n\n\nclass Csvcalc:\n\n def __init__(self, cont):\n self._cont = cont\n\n def row_count(self):\n return len(self._cont)\n\n def get_row(self, row_no):\n return self._cont[row_no]\n\n def col_count(self):\n return len(self._cont[1])\n\n def get_colum(self, no_col):\n return list(x[no_col] for x in self._cont)\n\n def sum_col(self, col_no):\n return reduce(lambda x, y: x + y, self.get_colum(col_no))\n\n def mul_col(self, col_no):\n return sum(lambda x, y: x * y, self.get_colum(col_no))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Csvread:\n\n def __init__(self, fpath):\n self._path = fpath\n with open(fpath) as file:\n read_f = csv.reader(file)\n print(read_f)\n self._sheet = list(read_f)[1:]\n\n def get_sheet(self):\n return self._sheet\n\n\nclass Csvcalc:\n\n def __init__(self, cont):\n self._cont = cont\n\n def row_count(self):\n return len(self._cont)\n\n def get_row(self, row_no):\n return self._cont[row_no]\n\n def col_count(self):\n return len(self._cont[1])\n\n def get_colum(self, no_col):\n return list(x[no_col] for x in self._cont)\n\n def sum_col(self, col_no):\n return reduce(lambda x, y: x + y, self.get_colum(col_no))\n\n def mul_col(self, col_no):\n return sum(lambda x, y: x * y, self.get_colum(col_no))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Csvread:\n\n def __init__(self, fpath):\n self._path = fpath\n with open(fpath) as file:\n read_f = csv.reader(file)\n print(read_f)\n self._sheet = list(read_f)[1:]\n\n def get_sheet(self):\n return self._sheet\n\n\nclass Csvcalc:\n\n def __init__(self, cont):\n self._cont = cont\n\n def row_count(self):\n return len(self._cont)\n\n def get_row(self, row_no):\n return self._cont[row_no]\n\n def col_count(self):\n return len(self._cont[1])\n\n def get_colum(self, no_col):\n return list(x[no_col] for x in self._cont)\n\n def sum_col(self, col_no):\n return reduce(lambda x, y: x + y, self.get_colum(col_no))\n\n def mul_col(self, col_no):\n return sum(lambda x, y: x * y, self.get_colum(col_no))\n\n\ncsv1 = Csvread('./data.csv')\nprint(csv1)\n",
"step-5": "import csv\nfrom functools import reduce\n\nclass Csvread:\n\n def __init__(self, fpath):\n self._path=fpath\n with open (fpath) as file:\n read_f=csv.reader(file)\n print(read_f) #<_csv.reader object at 0x000002A53144DF40>\n\n self._sheet = list(read_f)[1:] #utworzenie listy\n\n\n def get_sheet(self):\n return self._sheet\n\nclass Csvcalc:\n def __init__(self, cont):\n self._cont=cont\n def row_count(self):\n return len(self._cont)\n def get_row (self, row_no):\n return self._cont[row_no]\n def col_count (self):\n return len(self._cont[1])\n def get_colum (self,no_col):\n return list (x[no_col] for x in self._cont)\n def sum_col (self,col_no):\n return reduce(lambda x, y: x+y, self.get_colum(col_no))\n\n def mul_col(self, col_no):\n return sum(lambda x,y: x*y, self.get_colum(col_no))\n\n\n\n\n\ncsv1= Csvread('./data.csv')\nprint(csv1) #<__main__.Csvread object at 0x000002A5312B4040>\n\n\n\n\n",
"step-ids": [
7,
10,
11,
13,
15
]
}
|
[
7,
10,
11,
13,
15
] |
# help from https://stackoverflow.com/questions/19007383/compare-two-different-files-line-by-line-in-python
with open('Book1.txt', 'r') as file1:
with open('20k.txt', 'r') as file2:
same = set(file1).intersection(file2)
same.discard('\n')
with open('notin20kforBook1.txt', 'w') as file_out:
for line in same:
file_out.write(line)
with open('Book2.txt', 'r') as file3:
with open('20k.txt', 'r') as file2:
same = set(file3).intersection(file2)
same.discard('\n')
with open('notin20kforBook2.txt', 'w') as file_out:
for line in same:
file_out.write(line)
with open('Book3.txt', 'r') as file4:
with open('20k.txt', 'r') as file2:
same = set(file4).intersection(file2)
same.discard('\n')
with open('notin20kforBook3.txt', 'w') as file_out:
for line in same:
file_out.write(line)
|
normal
|
{
"blob_id": "21a41356fcedb36223498db0fe783e4a9e8e1ba6",
"index": 210,
"step-1": "<mask token>\n",
"step-2": "with open('Book1.txt', 'r') as file1:\n with open('20k.txt', 'r') as file2:\n same = set(file1).intersection(file2)\nsame.discard('\\n')\nwith open('notin20kforBook1.txt', 'w') as file_out:\n for line in same:\n file_out.write(line)\nwith open('Book2.txt', 'r') as file3:\n with open('20k.txt', 'r') as file2:\n same = set(file3).intersection(file2)\nsame.discard('\\n')\nwith open('notin20kforBook2.txt', 'w') as file_out:\n for line in same:\n file_out.write(line)\nwith open('Book3.txt', 'r') as file4:\n with open('20k.txt', 'r') as file2:\n same = set(file4).intersection(file2)\nsame.discard('\\n')\nwith open('notin20kforBook3.txt', 'w') as file_out:\n for line in same:\n file_out.write(line)\n",
"step-3": "# help from https://stackoverflow.com/questions/19007383/compare-two-different-files-line-by-line-in-python\n\nwith open('Book1.txt', 'r') as file1:\n with open('20k.txt', 'r') as file2:\n same = set(file1).intersection(file2)\n\nsame.discard('\\n')\n\nwith open('notin20kforBook1.txt', 'w') as file_out:\n for line in same:\n file_out.write(line)\n\nwith open('Book2.txt', 'r') as file3:\n with open('20k.txt', 'r') as file2:\n same = set(file3).intersection(file2)\n\nsame.discard('\\n')\n\nwith open('notin20kforBook2.txt', 'w') as file_out:\n for line in same:\n file_out.write(line)\n\nwith open('Book3.txt', 'r') as file4:\n with open('20k.txt', 'r') as file2:\n same = set(file4).intersection(file2)\n\nsame.discard('\\n')\n\nwith open('notin20kforBook3.txt', 'w') as file_out:\n for line in same:\n file_out.write(line)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def MostTeams(OffAndDef):
most = []
count = 0
for playerid, playerdata in OffAndDef.items():
if playerdata['name'] != '':
if len(playerdata['Teams']) > count:
count = len(playerdata['Teams'])
most = [[playerdata['name'], len(playerdata['Teams'])]]
elif len(playerdata['Teams']) == count:
most.append([playerdata['name'], len(playerdata['Teams'])])
return most
def MostTeamsOneYear(OffAndDef):
teams = {}
maximum = {}
count = 0
for playerid, playerdata in OffAndDef.items():
if playerdata['name'] != '':
for years in playerdata:
if (years != 'Drops' and years != 'NegRushYards' and years !=
'NegRush' and years != 'Teams' and years !=
'PassForLoss' and years != 'name'):
try:
if len(playerdata[years]) > count:
if len(playerdata[years]) not in teams.keys():
teams.clear()
teams[len(playerdata[years])] = {}
teams[len(playerdata[years])][playerdata['name']
] = years
count = len(playerdata[years])
elif len(playerdata[years]) == count:
teams[len(playerdata[years])].append(playerdata
['name'], years)
except:
pass
return teams
<|reserved_special_token_0|>
def MostPassForLoss(OffAndDef):
PassForLoss = []
att = 0
for player in OffAndDef:
if OffAndDef[player]['PassForLoss'] > att:
att = OffAndDef[player]['PassForLoss']
PassForLoss = [[OffAndDef[player]['name'], OffAndDef[player][
'PassForLoss']]]
elif OffAndDef[player]['PassForLoss'] == att:
PassForLoss.append([OffAndDef[player]['name'], OffAndDef[player
]['PassForLoss']])
return PassForLoss
<|reserved_special_token_0|>
def TeamPenaltyYards(penalties):
pens = []
num = 0
for teamname, teamdata in penalties.items():
if teamdata['PenaltyYards'] > num:
num = teamdata['PenaltyYards']
pens = [[teamname, teamdata['PenaltyYards']]]
elif teamdata['PenaltyYards'] == num:
pens.append([teamname, teamdata['PenaltyYards']])
return pens
<|reserved_special_token_0|>
def AverageNumberOfPlays():
games = 0
plays = 0
for filename in os.listdir(os.path.dirname(os.path.abspath(__file__)) +
'/stats'):
with open(os.path.dirname(os.path.abspath(__file__)) + '/stats/' +
filename, 'r') as json_file:
try:
data = json.load(json_file)
except:
pass
else:
for gameid, gamedata in data.items():
if gameid != 'nextupdate':
games += 1
for driveid, drivedata in gamedata['drives'].items():
if driveid != 'crntdrv':
plays += drivedata['numplays']
avgplays = plays / games
return avgplays
def LongestFG(fg):
fgs = []
length = 0
for playerid, playerdata in fg.items():
if playerdata['Long'] > length:
length = playerdata['Long']
fgs = [[playerdata['Name'], playerdata['Long']]]
elif playerdata['Long'] == length:
fgs.append([playerdata['Name'], playerdata['Long']])
return fgs
def MostFG(fg):
fgs = []
count = 0
for playerid, playerdata in fg.items():
if playerdata['FG'] > count:
count = playerdata['FG']
fgs = [[playerdata['Name'], playerdata['FG']]]
elif playerdata['FG'] == count:
fgs.append([playerdata['Name'], playerdata['FG']])
return fgs
def MostMFG(fg):
fgs = []
count = 0
for playerid, playerdata in fg.items():
if playerdata['MFG'] > count:
count = playerdata['MFG']
fgs = [[playerdata['Name'], playerdata['MFG']]]
elif playerdata['MFG'] == count:
fgs.append([playerdata['Name'], playerdata['MFG']])
return fgs
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def MostTeams(OffAndDef):
most = []
count = 0
for playerid, playerdata in OffAndDef.items():
if playerdata['name'] != '':
if len(playerdata['Teams']) > count:
count = len(playerdata['Teams'])
most = [[playerdata['name'], len(playerdata['Teams'])]]
elif len(playerdata['Teams']) == count:
most.append([playerdata['name'], len(playerdata['Teams'])])
return most
def MostTeamsOneYear(OffAndDef):
teams = {}
maximum = {}
count = 0
for playerid, playerdata in OffAndDef.items():
if playerdata['name'] != '':
for years in playerdata:
if (years != 'Drops' and years != 'NegRushYards' and years !=
'NegRush' and years != 'Teams' and years !=
'PassForLoss' and years != 'name'):
try:
if len(playerdata[years]) > count:
if len(playerdata[years]) not in teams.keys():
teams.clear()
teams[len(playerdata[years])] = {}
teams[len(playerdata[years])][playerdata['name']
] = years
count = len(playerdata[years])
elif len(playerdata[years]) == count:
teams[len(playerdata[years])].append(playerdata
['name'], years)
except:
pass
return teams
<|reserved_special_token_0|>
def MostPassForLoss(OffAndDef):
PassForLoss = []
att = 0
for player in OffAndDef:
if OffAndDef[player]['PassForLoss'] > att:
att = OffAndDef[player]['PassForLoss']
PassForLoss = [[OffAndDef[player]['name'], OffAndDef[player][
'PassForLoss']]]
elif OffAndDef[player]['PassForLoss'] == att:
PassForLoss.append([OffAndDef[player]['name'], OffAndDef[player
]['PassForLoss']])
return PassForLoss
<|reserved_special_token_0|>
def TeamPenaltyYards(penalties):
pens = []
num = 0
for teamname, teamdata in penalties.items():
if teamdata['PenaltyYards'] > num:
num = teamdata['PenaltyYards']
pens = [[teamname, teamdata['PenaltyYards']]]
elif teamdata['PenaltyYards'] == num:
pens.append([teamname, teamdata['PenaltyYards']])
return pens
def PenaltyWins(penalties):
x = MostPenalties(penalties)
mostPenalized = []
for temp in x:
mostPenalized.append(team[0])
least = penalties[mostPenalized[0]]['Penalties']
mostandleast = [[mostPenalized[0], penalties[mostPenalized[0]]['Wins'],
penalties[mostPenalized[0]]['Losses']]]
leastTeam = []
for teamname, teamdata in penalties.items():
if teamdata['Penalties'] < least:
least = teamdata['Penalties']
leastTeam = [[teamname, teamdata['Wins'], teamdata['Losses']]]
elif teamdata['Penalties'] == least:
leastTeam.append([teamname, teamdata['Wins'], teamdata['Losses']])
mostandleast.append(leastTeam[0])
return mostandleast
def AverageNumberOfPlays():
games = 0
plays = 0
for filename in os.listdir(os.path.dirname(os.path.abspath(__file__)) +
'/stats'):
with open(os.path.dirname(os.path.abspath(__file__)) + '/stats/' +
filename, 'r') as json_file:
try:
data = json.load(json_file)
except:
pass
else:
for gameid, gamedata in data.items():
if gameid != 'nextupdate':
games += 1
for driveid, drivedata in gamedata['drives'].items():
if driveid != 'crntdrv':
plays += drivedata['numplays']
avgplays = plays / games
return avgplays
def LongestFG(fg):
fgs = []
length = 0
for playerid, playerdata in fg.items():
if playerdata['Long'] > length:
length = playerdata['Long']
fgs = [[playerdata['Name'], playerdata['Long']]]
elif playerdata['Long'] == length:
fgs.append([playerdata['Name'], playerdata['Long']])
return fgs
def MostFG(fg):
fgs = []
count = 0
for playerid, playerdata in fg.items():
if playerdata['FG'] > count:
count = playerdata['FG']
fgs = [[playerdata['Name'], playerdata['FG']]]
elif playerdata['FG'] == count:
fgs.append([playerdata['Name'], playerdata['FG']])
return fgs
def MostMFG(fg):
fgs = []
count = 0
for playerid, playerdata in fg.items():
if playerdata['MFG'] > count:
count = playerdata['MFG']
fgs = [[playerdata['Name'], playerdata['MFG']]]
elif playerdata['MFG'] == count:
fgs.append([playerdata['Name'], playerdata['MFG']])
return fgs
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def MostTeams(OffAndDef):
most = []
count = 0
for playerid, playerdata in OffAndDef.items():
if playerdata['name'] != '':
if len(playerdata['Teams']) > count:
count = len(playerdata['Teams'])
most = [[playerdata['name'], len(playerdata['Teams'])]]
elif len(playerdata['Teams']) == count:
most.append([playerdata['name'], len(playerdata['Teams'])])
return most
def MostTeamsOneYear(OffAndDef):
teams = {}
maximum = {}
count = 0
for playerid, playerdata in OffAndDef.items():
if playerdata['name'] != '':
for years in playerdata:
if (years != 'Drops' and years != 'NegRushYards' and years !=
'NegRush' and years != 'Teams' and years !=
'PassForLoss' and years != 'name'):
try:
if len(playerdata[years]) > count:
if len(playerdata[years]) not in teams.keys():
teams.clear()
teams[len(playerdata[years])] = {}
teams[len(playerdata[years])][playerdata['name']
] = years
count = len(playerdata[years])
elif len(playerdata[years]) == count:
teams[len(playerdata[years])].append(playerdata
['name'], years)
except:
pass
return teams
def NegativeRushingYards(OffAndDef):
NegRushYds = []
yds = 0
for playerid, playerdata in OffAndDef.items():
if playerdata['NegRushYards'] < yds:
yds = playerdata['NegRushYards']
NegRushYds = [[playerdata['name'], playerdata['NegRushYards']]]
elif playerdata['NegRushYards'] == yds:
NegRushYds.append([playerdata['name'], playerdata['NegRushYards']])
return NegRushYds
<|reserved_special_token_0|>
def MostPassForLoss(OffAndDef):
PassForLoss = []
att = 0
for player in OffAndDef:
if OffAndDef[player]['PassForLoss'] > att:
att = OffAndDef[player]['PassForLoss']
PassForLoss = [[OffAndDef[player]['name'], OffAndDef[player][
'PassForLoss']]]
elif OffAndDef[player]['PassForLoss'] == att:
PassForLoss.append([OffAndDef[player]['name'], OffAndDef[player
]['PassForLoss']])
return PassForLoss
<|reserved_special_token_0|>
def TeamPenaltyYards(penalties):
pens = []
num = 0
for teamname, teamdata in penalties.items():
if teamdata['PenaltyYards'] > num:
num = teamdata['PenaltyYards']
pens = [[teamname, teamdata['PenaltyYards']]]
elif teamdata['PenaltyYards'] == num:
pens.append([teamname, teamdata['PenaltyYards']])
return pens
def PenaltyWins(penalties):
x = MostPenalties(penalties)
mostPenalized = []
for temp in x:
mostPenalized.append(team[0])
least = penalties[mostPenalized[0]]['Penalties']
mostandleast = [[mostPenalized[0], penalties[mostPenalized[0]]['Wins'],
penalties[mostPenalized[0]]['Losses']]]
leastTeam = []
for teamname, teamdata in penalties.items():
if teamdata['Penalties'] < least:
least = teamdata['Penalties']
leastTeam = [[teamname, teamdata['Wins'], teamdata['Losses']]]
elif teamdata['Penalties'] == least:
leastTeam.append([teamname, teamdata['Wins'], teamdata['Losses']])
mostandleast.append(leastTeam[0])
return mostandleast
def AverageNumberOfPlays():
games = 0
plays = 0
for filename in os.listdir(os.path.dirname(os.path.abspath(__file__)) +
'/stats'):
with open(os.path.dirname(os.path.abspath(__file__)) + '/stats/' +
filename, 'r') as json_file:
try:
data = json.load(json_file)
except:
pass
else:
for gameid, gamedata in data.items():
if gameid != 'nextupdate':
games += 1
for driveid, drivedata in gamedata['drives'].items():
if driveid != 'crntdrv':
plays += drivedata['numplays']
avgplays = plays / games
return avgplays
def LongestFG(fg):
fgs = []
length = 0
for playerid, playerdata in fg.items():
if playerdata['Long'] > length:
length = playerdata['Long']
fgs = [[playerdata['Name'], playerdata['Long']]]
elif playerdata['Long'] == length:
fgs.append([playerdata['Name'], playerdata['Long']])
return fgs
def MostFG(fg):
fgs = []
count = 0
for playerid, playerdata in fg.items():
if playerdata['FG'] > count:
count = playerdata['FG']
fgs = [[playerdata['Name'], playerdata['FG']]]
elif playerdata['FG'] == count:
fgs.append([playerdata['Name'], playerdata['FG']])
return fgs
def MostMFG(fg):
fgs = []
count = 0
for playerid, playerdata in fg.items():
if playerdata['MFG'] > count:
count = playerdata['MFG']
fgs = [[playerdata['Name'], playerdata['MFG']]]
elif playerdata['MFG'] == count:
fgs.append([playerdata['Name'], playerdata['MFG']])
return fgs
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def MostTeams(OffAndDef):
most = []
count = 0
for playerid, playerdata in OffAndDef.items():
if playerdata['name'] != '':
if len(playerdata['Teams']) > count:
count = len(playerdata['Teams'])
most = [[playerdata['name'], len(playerdata['Teams'])]]
elif len(playerdata['Teams']) == count:
most.append([playerdata['name'], len(playerdata['Teams'])])
return most
def MostTeamsOneYear(OffAndDef):
teams = {}
maximum = {}
count = 0
for playerid, playerdata in OffAndDef.items():
if playerdata['name'] != '':
for years in playerdata:
if (years != 'Drops' and years != 'NegRushYards' and years !=
'NegRush' and years != 'Teams' and years !=
'PassForLoss' and years != 'name'):
try:
if len(playerdata[years]) > count:
if len(playerdata[years]) not in teams.keys():
teams.clear()
teams[len(playerdata[years])] = {}
teams[len(playerdata[years])][playerdata['name']
] = years
count = len(playerdata[years])
elif len(playerdata[years]) == count:
teams[len(playerdata[years])].append(playerdata
['name'], years)
except:
pass
return teams
def NegativeRushingYards(OffAndDef):
NegRushYds = []
yds = 0
for playerid, playerdata in OffAndDef.items():
if playerdata['NegRushYards'] < yds:
yds = playerdata['NegRushYards']
NegRushYds = [[playerdata['name'], playerdata['NegRushYards']]]
elif playerdata['NegRushYards'] == yds:
NegRushYds.append([playerdata['name'], playerdata['NegRushYards']])
return NegRushYds
def NegativeRushes(OffAndDef):
rushes = []
att = 0
for player in OffAndDef:
if OffAndDef[player]['NegRush'] > att:
att = OffAndDef[player]['NegRush']
rushes = [[OffAndDef[player]['name'], OffAndDef[player]['NegRush']]
]
elif OffAndDef[player]['NegRush'] == att:
rushes.append([OffAndDef[player]['name'], OffAndDef[player][
'NegRush']])
return rushes
def MostPassForLoss(OffAndDef):
PassForLoss = []
att = 0
for player in OffAndDef:
if OffAndDef[player]['PassForLoss'] > att:
att = OffAndDef[player]['PassForLoss']
PassForLoss = [[OffAndDef[player]['name'], OffAndDef[player][
'PassForLoss']]]
elif OffAndDef[player]['PassForLoss'] == att:
PassForLoss.append([OffAndDef[player]['name'], OffAndDef[player
]['PassForLoss']])
return PassForLoss
def MostPenalties(penalties):
pens = []
num = 0
for teamname, teamdata in penalties.items():
if teamdata['Penalties'] > num:
num = teamdata['Penalties']
pens = [[teamname, teamdata['Penalties']]]
elif teamdata['Penalties'] == num:
pens.append([teamname, teamdata['Penalties']])
return pens
def TeamPenaltyYards(penalties):
pens = []
num = 0
for teamname, teamdata in penalties.items():
if teamdata['PenaltyYards'] > num:
num = teamdata['PenaltyYards']
pens = [[teamname, teamdata['PenaltyYards']]]
elif teamdata['PenaltyYards'] == num:
pens.append([teamname, teamdata['PenaltyYards']])
return pens
def PenaltyWins(penalties):
x = MostPenalties(penalties)
mostPenalized = []
for temp in x:
mostPenalized.append(team[0])
least = penalties[mostPenalized[0]]['Penalties']
mostandleast = [[mostPenalized[0], penalties[mostPenalized[0]]['Wins'],
penalties[mostPenalized[0]]['Losses']]]
leastTeam = []
for teamname, teamdata in penalties.items():
if teamdata['Penalties'] < least:
least = teamdata['Penalties']
leastTeam = [[teamname, teamdata['Wins'], teamdata['Losses']]]
elif teamdata['Penalties'] == least:
leastTeam.append([teamname, teamdata['Wins'], teamdata['Losses']])
mostandleast.append(leastTeam[0])
return mostandleast
def AverageNumberOfPlays():
games = 0
plays = 0
for filename in os.listdir(os.path.dirname(os.path.abspath(__file__)) +
'/stats'):
with open(os.path.dirname(os.path.abspath(__file__)) + '/stats/' +
filename, 'r') as json_file:
try:
data = json.load(json_file)
except:
pass
else:
for gameid, gamedata in data.items():
if gameid != 'nextupdate':
games += 1
for driveid, drivedata in gamedata['drives'].items():
if driveid != 'crntdrv':
plays += drivedata['numplays']
avgplays = plays / games
return avgplays
def LongestFG(fg):
fgs = []
length = 0
for playerid, playerdata in fg.items():
if playerdata['Long'] > length:
length = playerdata['Long']
fgs = [[playerdata['Name'], playerdata['Long']]]
elif playerdata['Long'] == length:
fgs.append([playerdata['Name'], playerdata['Long']])
return fgs
def MostFG(fg):
fgs = []
count = 0
for playerid, playerdata in fg.items():
if playerdata['FG'] > count:
count = playerdata['FG']
fgs = [[playerdata['Name'], playerdata['FG']]]
elif playerdata['FG'] == count:
fgs.append([playerdata['Name'], playerdata['FG']])
return fgs
def MostMFG(fg):
fgs = []
count = 0
for playerid, playerdata in fg.items():
if playerdata['MFG'] > count:
count = playerdata['MFG']
fgs = [[playerdata['Name'], playerdata['MFG']]]
elif playerdata['MFG'] == count:
fgs.append([playerdata['Name'], playerdata['MFG']])
return fgs
def MostDrops(OffAndDef):
drops = []
count = 0
for player in OffAndDef:
if OffAndDef[player]['Drops'] > count:
count = OffAndDef[player]['Drops']
drops = [[OffAndDef[player]['name'], OffAndDef[player]['Drops']]]
elif OffAndDef[player]['Drops'] == count:
drops.append([OffAndDef[player]['name'], OffAndDef[player][
'Drops']])
return drops
path = os.path.dirname(os.path.abspath(__file__))
f = open(path + '/OffAndDef.json', 'r')
OffAndDef = json.load(f)
f.close()
f = open(path + '/Penalties.json', 'r')
penalties = json.load(f)
f.close()
f = open(path + '/FG.json', 'r')
fg = json.load(f)
f.close()
print('\n')
print('Name: Zac Conley')
print('Assignment: A03 - Nfl Stats')
print('Date: 2/10/19')
print(
'=================================================================================='
)
print('Question 1: Find the player(s) that played for the most teams.')
playerlist = MostTeams(OffAndDef)
for p in playerlist:
print(str(p[0]) + ': ' + str(p[1]) + ' teams\n')
print(
'=================================================================================='
)
print(
'Question 2: Find the player(s) that played for multiple teams in one year.'
)
ans = MostTeamsOneYear(OffAndDef)
count = 0
for numteams in ans.items():
for player in numteams[1].items():
print(player[1] + ': ' + player[0] + ' ' + str(numteams[0]) + ' teams.'
)
print
print(
'=================================================================================='
)
print(
'Question 3: Find the player(s) that had the most yards rushed for a loss.'
)
ans = NegativeRushingYards(OffAndDef)
for player in ans:
print(player[0] + ': ' + str(player[1]) + ' rushing yards.\n')
print(
'=================================================================================='
)
print('Question 4: Find the player(s) that had the most rushes for a loss.')
ans = NegativeRushes(OffAndDef)
for player in ans:
print(player[0] + ': ' + str(player[1]) + ' negative rushes.\n')
print(
'=================================================================================='
)
print(
'Question 5: Find the player(s) with the most number of passes for a loss.'
)
ans = MostPassForLoss(OffAndDef)
for player in ans:
print(player[0] + ': ' + str(player[1]) + ' negative passes.\n')
temp = []
print(
'=================================================================================='
)
print('Question 6: Find the team with the most penalties.')
ans = MostPenalties(penalties)
for team in ans:
print(str(team[0]) + ' had ' + str(team[1]) + ' penalties.\n')
print(
'=================================================================================='
)
print('Question 7: Find the team with the most yards in penalties.')
ans = TeamPenaltyYards(penalties)
for team in ans:
print(team[0] + ': ' + str(int(team[1])) + ' penalty yards.\n')
print(
'=================================================================================='
)
print(
'Question 8: Find the correlation between most penalized teams and games won / lost.'
)
ans = PenaltyWins(penalties)
print('Most Penalties: ' + ans[0][0] + ': ' + str(ans[0][1]) + '-' + str(
ans[0][2]))
print('Least Penalties: ' + ans[1][0] + ' : ' + str(ans[1][1]) + '-' + str(
ans[1][2]) + '\n')
print(
'=================================================================================='
)
print(
'Question 9: Average number of plays in a game. (This may take up to a minute.)'
)
ans = AverageNumberOfPlays()
print('On average, there are ' + str(ans) + ' plays each game. \n')
print(
'=================================================================================='
)
print('Question 10: Longest field goal.')
ans = LongestFG(fg)
for player in ans:
print(player[0] + ': ' + str(player[1]) + ' yards.\n')
print(
'=================================================================================='
)
print('Question 11: Most field goals.')
ans = MostFG(fg)
for player in ans:
print(player[0] + ': ' + str(player[1]) + ' FGs.\n')
print(
'=================================================================================='
)
print('Question 12: Most missed field goals.')
ans = MostMFG(fg)
for player in ans:
print(player[0] + ': ' + str(player[1]) + ' missed FGs.\n')
print(
'=================================================================================='
)
print('Question 13: Most dropped passes.')
ans = MostDrops(OffAndDef)
for player in ans:
print(player[0] + ': ' + str(player[1]) + ' drops.')
<|reserved_special_token_1|>
import json
import os
import sys
"""
Course: cmps 4883
Assignemt: A03
Date: 2/10/19
Github username: acdczlc
Repo url: https://github.com/acdczlc/4883-SWTools-Conley
Name: Zac Conley
Description:
Calculates all stats for questions about stats
"""
##############################################################
# MostTeams(dict of off and def players)
# gets player who played for most teams
#
# Params:
# dict of players
# Returns:
# player with most teams
def MostTeams(OffAndDef):
most=[]
count=0 # set comparison
for playerid, playerdata in OffAndDef.items():
if(playerdata['name']!=''): #only get real players
if(len(playerdata['Teams'])>count):
count=len(playerdata['Teams']) #get count
most=[[playerdata['name'],len(playerdata['Teams'])]] # replace with player
elif(len(playerdata['Teams'])==count):
most.append([playerdata['name'],len(playerdata['Teams'])]) # add multiple teams
return most
##############################################################
# MostTeamsOneYear(dict of off and def players)
# gets player who played for most teams in one year
#
# Params:
# dict of players
# Returns:
# player with most teams
def MostTeamsOneYear(OffAndDef):
teams={}
maximum={}
count=0
for playerid, playerdata in OffAndDef.items():
if(playerdata['name']!=''):
for years in playerdata: #avoids all keys except years
if(years!='Drops' and years!='NegRushYards' and years!='NegRush' and years!='Teams' and years!='PassForLoss' and years!="name"):
try: #try block to avoid nonplayers
if(len(playerdata[years])>count): # if player has most teams so far
if((len(playerdata[years]) not in teams.keys())):
teams.clear() # delete all previous players
teams[len(playerdata[years])]={}
teams[len(playerdata[years])][playerdata['name']]=years
count=len(playerdata[years])
elif(len(playerdata[years])==count): #multiple players have the same number of teams
teams[len(playerdata[years])].append(playerdata['name'],years)
except:
pass
return teams
##############################################################
# NegativeRushingYards(dict of off and def players)
# gets player with most negative rushing yards
#
# Params:
# dict of players
# Returns:
# player with most negative rushing yards
def NegativeRushingYards(OffAndDef):
NegRushYds=[]
yds=0
for playerid, playerdata in OffAndDef.items():
if(playerdata['NegRushYards']<yds):
yds=playerdata['NegRushYards']
NegRushYds=[[playerdata['name'],playerdata['NegRushYards']]]
elif(playerdata['NegRushYards']==yds):
NegRushYds.append([playerdata['name'],playerdata['NegRushYards']])
return NegRushYds
##############################################################
# NegativeRushes(dict of off and def players)
# gets player with most negative rushes
#
# Params:
# dict of players
# Returns:
# player with most negative rushes
def NegativeRushes(OffAndDef):
rushes=[]
att=0 #attempts
for player in OffAndDef:
if(OffAndDef[player]['NegRush']>att):
att=OffAndDef[player]['NegRush']
rushes=[[OffAndDef[player]['name'],OffAndDef[player]['NegRush']]]
elif(OffAndDef[player]['NegRush']==att):
rushes.append([OffAndDef[player]['name'],OffAndDef[player]['NegRush']])
return rushes
##############################################################
# MostPassForLoss(dict of off and def players)
# gets player with most negative rushes
#
# Params:
# dict of players
# Returns:
# player with most negative rushes
def MostPassForLoss(OffAndDef):
PassForLoss=[]
att=0 #attempts
for player in OffAndDef:
if(OffAndDef[player]['PassForLoss']>att):
att=OffAndDef[player]['PassForLoss']
PassForLoss=[[OffAndDef[player]['name'],OffAndDef[player]['PassForLoss']]]
elif(OffAndDef[player]['PassForLoss']==att):
PassForLoss.append([OffAndDef[player]['name'],OffAndDef[player]['PassForLoss']])
return PassForLoss
##############################################################
# MostPenalties(dict of team penalties)
# gets team with most penalties
#
# Params:
# dict of teams
# Returns:
# player with most negative rushes
def MostPenalties(penalties):
pens=[]
num=0
for teamname,teamdata in penalties.items():
if(teamdata['Penalties']>num):
num=teamdata['Penalties']
pens=[[teamname,teamdata['Penalties']]]
elif (teamdata['Penalties']==num):
pens.append([teamname,teamdata['Penalties']])
return pens
##############################################################
# TeamPenaltyYards(dict of team penalties)
# gets team with most penaltiy yards
#
# Params:
# dict of teams
# Returns:
# team with most penalty yards
def TeamPenaltyYards(penalties):
pens=[]
num=0
for teamname,teamdata in penalties.items():
if(teamdata['PenaltyYards']>num):
num=teamdata['PenaltyYards']
pens=[[teamname,teamdata['PenaltyYards']]]
elif (teamdata['PenaltyYards']==num):
pens.append([teamname,teamdata['PenaltyYards']])
return pens
##############################################################
# PenaltyWins(most penalized team,dict of team penalties)
# shows correlation between penalty and record
#
# Params:
# dict of teams, most penalized team
# Returns:
# team with most penaltys and least
def PenaltyWins(penalties):
x=MostPenalties(penalties) #calls function to get most penalized team
mostPenalized=[]
for temp in x:
mostPenalized.append(team[0])
least=penalties[mostPenalized[0]]['Penalties']
mostandleast=[[mostPenalized[0],penalties[mostPenalized[0]]['Wins'],penalties[mostPenalized[0]]['Losses']]] # sets most penalized record
leastTeam=[]
for teamname, teamdata in penalties.items():
if(teamdata['Penalties']<least):
least=teamdata['Penalties']
leastTeam=[[teamname,teamdata['Wins'],teamdata['Losses']]]
elif (teamdata['Penalties']==least):
leastTeam.append([teamname,teamdata['Wins'],teamdata['Losses']])
mostandleast.append(leastTeam[0]) #adds team and record to list at end
return mostandleast
##############################################################
# AverageNumberOfPlays()
# shows average number of plays
#
# Params:
# none
# Returns:
# avg number of plays
def AverageNumberOfPlays():
games=0
plays=0
for filename in os.listdir(os.path.dirname(os.path.abspath(__file__))+'/stats'): # sets path to all stats
with open(os.path.dirname(os.path.abspath(__file__))+"/stats/"+filename,"r") as json_file:
try: #gets all stats and stores each game in a dict
data=json.load(json_file)
except:
pass
else:
for gameid, gamedata in data.items():
if(gameid!="nextupdate"):
games+=1 #increment number of games
for driveid, drivedata in gamedata['drives'].items():
if(driveid!="crntdrv"):
plays+=drivedata['numplays'] #increment number of plays
avgplays=plays/games
return avgplays
##############################################################
# LongestFG(dict of fgs)
# longest field goal
#
# Params:
# dict of fgs
# Returns:
# longest field goal and kicker
def LongestFG(fg):
fgs=[]
length=0 #longest fg
for playerid,playerdata in fg.items():
if(playerdata['Long']>length):
length=playerdata['Long']
fgs=[[playerdata['Name'],playerdata['Long']]]
elif (playerdata['Long']==length):
fgs.append([playerdata['Name'],playerdata['Long']])
return fgs
##############################################################
# MostFG(dict of fgs)
# most made field goals
#
# Params:
# dict of fgs
# Returns:
# most made field goals and kicker
def MostFG(fg):
fgs=[]
count=0 #sets counter to 0
for playerid,playerdata in fg.items():
if(playerdata['FG']>count): #if largest number of fg so far
count=playerdata['FG']
fgs=[[playerdata['Name'],playerdata['FG']]]
elif (playerdata['FG']==count): #if same number of fg
fgs.append([playerdata['Name'],playerdata['FG']])
return fgs
##############################################################
# MostMFG(dict of fgs)
# most missed field goals
#
# Params:
# dict of fgs
# Returns:
# most missed field goals and kicker
def MostMFG(fg):
fgs=[]
count=0 #set counter to 0
for playerid,playerdata in fg.items():
if(playerdata['MFG']>count): #if most misses so far
count=playerdata['MFG']
fgs=[[playerdata['Name'],playerdata['MFG']]]
elif (playerdata['MFG']==count): #if same as most misses
fgs.append([playerdata['Name'],playerdata['MFG']])
return fgs
##############################################################
# MostDrops(dict of players)
# most drops
#
# Params:
# dict of players
# Returns:
# most drops
def MostDrops(OffAndDef):
drops=[]
count=0 #set drops to 0
for player in OffAndDef:
if(OffAndDef[player]['Drops']>count):
count=OffAndDef[player]['Drops']
drops=[[OffAndDef[player]['name'],OffAndDef[player]['Drops']]]
elif(OffAndDef[player]['Drops']==count):
drops.append([OffAndDef[player]['name'],OffAndDef[player]['Drops']])
return drops
path= os.path.dirname(os.path.abspath(__file__)) #set path to current location
f=open(path+'/OffAndDef.json','r') #open separated files
OffAndDef=json.load(f)
f.close()
f=open(path+'/Penalties.json','r')
penalties=json.load(f)
f.close()
f=open(path+'/FG.json','r')
fg=json.load(f)
f.close()
print("\n")
print("Name: Zac Conley")
print("Assignment: A03 - Nfl Stats")
print("Date: 2/10/19")
print("==================================================================================")
print("Question 1: Find the player(s) that played for the most teams.")
playerlist=MostTeams(OffAndDef)
for p in playerlist:
print(str(p[0]) + ": "+ str(p[1]) +" teams\n")
print("==================================================================================")
print("Question 2: Find the player(s) that played for multiple teams in one year.")
ans=MostTeamsOneYear(OffAndDef)
count=0
for numteams in ans.items():
for player in numteams[1].items():
print(player[1]+": " +player[0]+" "+str(numteams[0])+" teams." )
print
print("==================================================================================")
print("Question 3: Find the player(s) that had the most yards rushed for a loss.")
ans=NegativeRushingYards(OffAndDef)
for player in ans:
print(player[0]+": "+str(player[1])+" rushing yards.\n")
print("==================================================================================")
print("Question 4: Find the player(s) that had the most rushes for a loss.")
ans=NegativeRushes(OffAndDef)
for player in ans:
print(player[0]+": "+str(player[1])+" negative rushes.\n")
print("==================================================================================")
print("Question 5: Find the player(s) with the most number of passes for a loss.")
ans=MostPassForLoss(OffAndDef)
for player in ans:
print(player[0]+": "+str(player[1])+" negative passes.\n")
temp=[]
print("==================================================================================")
print("Question 6: Find the team with the most penalties.")
ans=MostPenalties(penalties)
for team in ans:
print(str(team[0])+" had "+str(team[1])+" penalties.\n")
print("==================================================================================")
print("Question 7: Find the team with the most yards in penalties.")
ans=TeamPenaltyYards(penalties)
for team in ans:
print(team[0]+": "+str(int(team[1]))+" penalty yards.\n")
print("==================================================================================")
print("Question 8: Find the correlation between most penalized teams and games won / lost.")
ans=PenaltyWins(penalties)
print("Most Penalties: "+ans[0][0]+": "+str(ans[0][1]) +"-" +str(ans[0][2]))
print("Least Penalties: "+ans[1][0]+" : "+str(ans[1][1])+"-" +str(ans[1][2])+"\n")
print("==================================================================================")
print("Question 9: Average number of plays in a game. (This may take up to a minute.)")
ans=AverageNumberOfPlays()
print("On average, there are " +str(ans) +" plays each game. \n")
print("==================================================================================")
print("Question 10: Longest field goal.")
ans=LongestFG(fg)
for player in ans:
print(player[0]+": "+str(player[1])+" yards.\n")
print("==================================================================================")
print("Question 11: Most field goals.")
ans=MostFG(fg)
for player in ans:
print(player[0]+": "+str(player[1])+" FGs.\n")
print("==================================================================================")
print("Question 12: Most missed field goals.")
ans=MostMFG(fg)
for player in ans:
print(player[0]+": "+str(player[1])+" missed FGs.\n")
print("==================================================================================")
print("Question 13: Most dropped passes.")
ans=MostDrops(OffAndDef)
for player in ans:
print(player[0]+": "+str(player[1])+" drops.")
|
flexible
|
{
"blob_id": "2a4f57cd0fc1c50cba06c285849432c6f71f28e2",
"index": 2642,
"step-1": "<mask token>\n\n\ndef MostTeams(OffAndDef):\n most = []\n count = 0\n for playerid, playerdata in OffAndDef.items():\n if playerdata['name'] != '':\n if len(playerdata['Teams']) > count:\n count = len(playerdata['Teams'])\n most = [[playerdata['name'], len(playerdata['Teams'])]]\n elif len(playerdata['Teams']) == count:\n most.append([playerdata['name'], len(playerdata['Teams'])])\n return most\n\n\ndef MostTeamsOneYear(OffAndDef):\n teams = {}\n maximum = {}\n count = 0\n for playerid, playerdata in OffAndDef.items():\n if playerdata['name'] != '':\n for years in playerdata:\n if (years != 'Drops' and years != 'NegRushYards' and years !=\n 'NegRush' and years != 'Teams' and years !=\n 'PassForLoss' and years != 'name'):\n try:\n if len(playerdata[years]) > count:\n if len(playerdata[years]) not in teams.keys():\n teams.clear()\n teams[len(playerdata[years])] = {}\n teams[len(playerdata[years])][playerdata['name']\n ] = years\n count = len(playerdata[years])\n elif len(playerdata[years]) == count:\n teams[len(playerdata[years])].append(playerdata\n ['name'], years)\n except:\n pass\n return teams\n\n\n<mask token>\n\n\ndef MostPassForLoss(OffAndDef):\n PassForLoss = []\n att = 0\n for player in OffAndDef:\n if OffAndDef[player]['PassForLoss'] > att:\n att = OffAndDef[player]['PassForLoss']\n PassForLoss = [[OffAndDef[player]['name'], OffAndDef[player][\n 'PassForLoss']]]\n elif OffAndDef[player]['PassForLoss'] == att:\n PassForLoss.append([OffAndDef[player]['name'], OffAndDef[player\n ]['PassForLoss']])\n return PassForLoss\n\n\n<mask token>\n\n\ndef TeamPenaltyYards(penalties):\n pens = []\n num = 0\n for teamname, teamdata in penalties.items():\n if teamdata['PenaltyYards'] > num:\n num = teamdata['PenaltyYards']\n pens = [[teamname, teamdata['PenaltyYards']]]\n elif teamdata['PenaltyYards'] == num:\n pens.append([teamname, teamdata['PenaltyYards']])\n return pens\n\n\n<mask token>\n\n\ndef AverageNumberOfPlays():\n games = 0\n plays = 0\n for filename in os.listdir(os.path.dirname(os.path.abspath(__file__)) +\n '/stats'):\n with open(os.path.dirname(os.path.abspath(__file__)) + '/stats/' +\n filename, 'r') as json_file:\n try:\n data = json.load(json_file)\n except:\n pass\n else:\n for gameid, gamedata in data.items():\n if gameid != 'nextupdate':\n games += 1\n for driveid, drivedata in gamedata['drives'].items():\n if driveid != 'crntdrv':\n plays += drivedata['numplays']\n avgplays = plays / games\n return avgplays\n\n\ndef LongestFG(fg):\n fgs = []\n length = 0\n for playerid, playerdata in fg.items():\n if playerdata['Long'] > length:\n length = playerdata['Long']\n fgs = [[playerdata['Name'], playerdata['Long']]]\n elif playerdata['Long'] == length:\n fgs.append([playerdata['Name'], playerdata['Long']])\n return fgs\n\n\ndef MostFG(fg):\n fgs = []\n count = 0\n for playerid, playerdata in fg.items():\n if playerdata['FG'] > count:\n count = playerdata['FG']\n fgs = [[playerdata['Name'], playerdata['FG']]]\n elif playerdata['FG'] == count:\n fgs.append([playerdata['Name'], playerdata['FG']])\n return fgs\n\n\ndef MostMFG(fg):\n fgs = []\n count = 0\n for playerid, playerdata in fg.items():\n if playerdata['MFG'] > count:\n count = playerdata['MFG']\n fgs = [[playerdata['Name'], playerdata['MFG']]]\n elif playerdata['MFG'] == count:\n fgs.append([playerdata['Name'], playerdata['MFG']])\n return fgs\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef MostTeams(OffAndDef):\n most = []\n count = 0\n for playerid, playerdata in OffAndDef.items():\n if playerdata['name'] != '':\n if len(playerdata['Teams']) > count:\n count = len(playerdata['Teams'])\n most = [[playerdata['name'], len(playerdata['Teams'])]]\n elif len(playerdata['Teams']) == count:\n most.append([playerdata['name'], len(playerdata['Teams'])])\n return most\n\n\ndef MostTeamsOneYear(OffAndDef):\n teams = {}\n maximum = {}\n count = 0\n for playerid, playerdata in OffAndDef.items():\n if playerdata['name'] != '':\n for years in playerdata:\n if (years != 'Drops' and years != 'NegRushYards' and years !=\n 'NegRush' and years != 'Teams' and years !=\n 'PassForLoss' and years != 'name'):\n try:\n if len(playerdata[years]) > count:\n if len(playerdata[years]) not in teams.keys():\n teams.clear()\n teams[len(playerdata[years])] = {}\n teams[len(playerdata[years])][playerdata['name']\n ] = years\n count = len(playerdata[years])\n elif len(playerdata[years]) == count:\n teams[len(playerdata[years])].append(playerdata\n ['name'], years)\n except:\n pass\n return teams\n\n\n<mask token>\n\n\ndef MostPassForLoss(OffAndDef):\n PassForLoss = []\n att = 0\n for player in OffAndDef:\n if OffAndDef[player]['PassForLoss'] > att:\n att = OffAndDef[player]['PassForLoss']\n PassForLoss = [[OffAndDef[player]['name'], OffAndDef[player][\n 'PassForLoss']]]\n elif OffAndDef[player]['PassForLoss'] == att:\n PassForLoss.append([OffAndDef[player]['name'], OffAndDef[player\n ]['PassForLoss']])\n return PassForLoss\n\n\n<mask token>\n\n\ndef TeamPenaltyYards(penalties):\n pens = []\n num = 0\n for teamname, teamdata in penalties.items():\n if teamdata['PenaltyYards'] > num:\n num = teamdata['PenaltyYards']\n pens = [[teamname, teamdata['PenaltyYards']]]\n elif teamdata['PenaltyYards'] == num:\n pens.append([teamname, teamdata['PenaltyYards']])\n return pens\n\n\ndef PenaltyWins(penalties):\n x = MostPenalties(penalties)\n mostPenalized = []\n for temp in x:\n mostPenalized.append(team[0])\n least = penalties[mostPenalized[0]]['Penalties']\n mostandleast = [[mostPenalized[0], penalties[mostPenalized[0]]['Wins'],\n penalties[mostPenalized[0]]['Losses']]]\n leastTeam = []\n for teamname, teamdata in penalties.items():\n if teamdata['Penalties'] < least:\n least = teamdata['Penalties']\n leastTeam = [[teamname, teamdata['Wins'], teamdata['Losses']]]\n elif teamdata['Penalties'] == least:\n leastTeam.append([teamname, teamdata['Wins'], teamdata['Losses']])\n mostandleast.append(leastTeam[0])\n return mostandleast\n\n\ndef AverageNumberOfPlays():\n games = 0\n plays = 0\n for filename in os.listdir(os.path.dirname(os.path.abspath(__file__)) +\n '/stats'):\n with open(os.path.dirname(os.path.abspath(__file__)) + '/stats/' +\n filename, 'r') as json_file:\n try:\n data = json.load(json_file)\n except:\n pass\n else:\n for gameid, gamedata in data.items():\n if gameid != 'nextupdate':\n games += 1\n for driveid, drivedata in gamedata['drives'].items():\n if driveid != 'crntdrv':\n plays += drivedata['numplays']\n avgplays = plays / games\n return avgplays\n\n\ndef LongestFG(fg):\n fgs = []\n length = 0\n for playerid, playerdata in fg.items():\n if playerdata['Long'] > length:\n length = playerdata['Long']\n fgs = [[playerdata['Name'], playerdata['Long']]]\n elif playerdata['Long'] == length:\n fgs.append([playerdata['Name'], playerdata['Long']])\n return fgs\n\n\ndef MostFG(fg):\n fgs = []\n count = 0\n for playerid, playerdata in fg.items():\n if playerdata['FG'] > count:\n count = playerdata['FG']\n fgs = [[playerdata['Name'], playerdata['FG']]]\n elif playerdata['FG'] == count:\n fgs.append([playerdata['Name'], playerdata['FG']])\n return fgs\n\n\ndef MostMFG(fg):\n fgs = []\n count = 0\n for playerid, playerdata in fg.items():\n if playerdata['MFG'] > count:\n count = playerdata['MFG']\n fgs = [[playerdata['Name'], playerdata['MFG']]]\n elif playerdata['MFG'] == count:\n fgs.append([playerdata['Name'], playerdata['MFG']])\n return fgs\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef MostTeams(OffAndDef):\n most = []\n count = 0\n for playerid, playerdata in OffAndDef.items():\n if playerdata['name'] != '':\n if len(playerdata['Teams']) > count:\n count = len(playerdata['Teams'])\n most = [[playerdata['name'], len(playerdata['Teams'])]]\n elif len(playerdata['Teams']) == count:\n most.append([playerdata['name'], len(playerdata['Teams'])])\n return most\n\n\ndef MostTeamsOneYear(OffAndDef):\n teams = {}\n maximum = {}\n count = 0\n for playerid, playerdata in OffAndDef.items():\n if playerdata['name'] != '':\n for years in playerdata:\n if (years != 'Drops' and years != 'NegRushYards' and years !=\n 'NegRush' and years != 'Teams' and years !=\n 'PassForLoss' and years != 'name'):\n try:\n if len(playerdata[years]) > count:\n if len(playerdata[years]) not in teams.keys():\n teams.clear()\n teams[len(playerdata[years])] = {}\n teams[len(playerdata[years])][playerdata['name']\n ] = years\n count = len(playerdata[years])\n elif len(playerdata[years]) == count:\n teams[len(playerdata[years])].append(playerdata\n ['name'], years)\n except:\n pass\n return teams\n\n\ndef NegativeRushingYards(OffAndDef):\n NegRushYds = []\n yds = 0\n for playerid, playerdata in OffAndDef.items():\n if playerdata['NegRushYards'] < yds:\n yds = playerdata['NegRushYards']\n NegRushYds = [[playerdata['name'], playerdata['NegRushYards']]]\n elif playerdata['NegRushYards'] == yds:\n NegRushYds.append([playerdata['name'], playerdata['NegRushYards']])\n return NegRushYds\n\n\n<mask token>\n\n\ndef MostPassForLoss(OffAndDef):\n PassForLoss = []\n att = 0\n for player in OffAndDef:\n if OffAndDef[player]['PassForLoss'] > att:\n att = OffAndDef[player]['PassForLoss']\n PassForLoss = [[OffAndDef[player]['name'], OffAndDef[player][\n 'PassForLoss']]]\n elif OffAndDef[player]['PassForLoss'] == att:\n PassForLoss.append([OffAndDef[player]['name'], OffAndDef[player\n ]['PassForLoss']])\n return PassForLoss\n\n\n<mask token>\n\n\ndef TeamPenaltyYards(penalties):\n pens = []\n num = 0\n for teamname, teamdata in penalties.items():\n if teamdata['PenaltyYards'] > num:\n num = teamdata['PenaltyYards']\n pens = [[teamname, teamdata['PenaltyYards']]]\n elif teamdata['PenaltyYards'] == num:\n pens.append([teamname, teamdata['PenaltyYards']])\n return pens\n\n\ndef PenaltyWins(penalties):\n x = MostPenalties(penalties)\n mostPenalized = []\n for temp in x:\n mostPenalized.append(team[0])\n least = penalties[mostPenalized[0]]['Penalties']\n mostandleast = [[mostPenalized[0], penalties[mostPenalized[0]]['Wins'],\n penalties[mostPenalized[0]]['Losses']]]\n leastTeam = []\n for teamname, teamdata in penalties.items():\n if teamdata['Penalties'] < least:\n least = teamdata['Penalties']\n leastTeam = [[teamname, teamdata['Wins'], teamdata['Losses']]]\n elif teamdata['Penalties'] == least:\n leastTeam.append([teamname, teamdata['Wins'], teamdata['Losses']])\n mostandleast.append(leastTeam[0])\n return mostandleast\n\n\ndef AverageNumberOfPlays():\n games = 0\n plays = 0\n for filename in os.listdir(os.path.dirname(os.path.abspath(__file__)) +\n '/stats'):\n with open(os.path.dirname(os.path.abspath(__file__)) + '/stats/' +\n filename, 'r') as json_file:\n try:\n data = json.load(json_file)\n except:\n pass\n else:\n for gameid, gamedata in data.items():\n if gameid != 'nextupdate':\n games += 1\n for driveid, drivedata in gamedata['drives'].items():\n if driveid != 'crntdrv':\n plays += drivedata['numplays']\n avgplays = plays / games\n return avgplays\n\n\ndef LongestFG(fg):\n fgs = []\n length = 0\n for playerid, playerdata in fg.items():\n if playerdata['Long'] > length:\n length = playerdata['Long']\n fgs = [[playerdata['Name'], playerdata['Long']]]\n elif playerdata['Long'] == length:\n fgs.append([playerdata['Name'], playerdata['Long']])\n return fgs\n\n\ndef MostFG(fg):\n fgs = []\n count = 0\n for playerid, playerdata in fg.items():\n if playerdata['FG'] > count:\n count = playerdata['FG']\n fgs = [[playerdata['Name'], playerdata['FG']]]\n elif playerdata['FG'] == count:\n fgs.append([playerdata['Name'], playerdata['FG']])\n return fgs\n\n\ndef MostMFG(fg):\n fgs = []\n count = 0\n for playerid, playerdata in fg.items():\n if playerdata['MFG'] > count:\n count = playerdata['MFG']\n fgs = [[playerdata['Name'], playerdata['MFG']]]\n elif playerdata['MFG'] == count:\n fgs.append([playerdata['Name'], playerdata['MFG']])\n return fgs\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef MostTeams(OffAndDef):\n most = []\n count = 0\n for playerid, playerdata in OffAndDef.items():\n if playerdata['name'] != '':\n if len(playerdata['Teams']) > count:\n count = len(playerdata['Teams'])\n most = [[playerdata['name'], len(playerdata['Teams'])]]\n elif len(playerdata['Teams']) == count:\n most.append([playerdata['name'], len(playerdata['Teams'])])\n return most\n\n\ndef MostTeamsOneYear(OffAndDef):\n teams = {}\n maximum = {}\n count = 0\n for playerid, playerdata in OffAndDef.items():\n if playerdata['name'] != '':\n for years in playerdata:\n if (years != 'Drops' and years != 'NegRushYards' and years !=\n 'NegRush' and years != 'Teams' and years !=\n 'PassForLoss' and years != 'name'):\n try:\n if len(playerdata[years]) > count:\n if len(playerdata[years]) not in teams.keys():\n teams.clear()\n teams[len(playerdata[years])] = {}\n teams[len(playerdata[years])][playerdata['name']\n ] = years\n count = len(playerdata[years])\n elif len(playerdata[years]) == count:\n teams[len(playerdata[years])].append(playerdata\n ['name'], years)\n except:\n pass\n return teams\n\n\ndef NegativeRushingYards(OffAndDef):\n NegRushYds = []\n yds = 0\n for playerid, playerdata in OffAndDef.items():\n if playerdata['NegRushYards'] < yds:\n yds = playerdata['NegRushYards']\n NegRushYds = [[playerdata['name'], playerdata['NegRushYards']]]\n elif playerdata['NegRushYards'] == yds:\n NegRushYds.append([playerdata['name'], playerdata['NegRushYards']])\n return NegRushYds\n\n\ndef NegativeRushes(OffAndDef):\n rushes = []\n att = 0\n for player in OffAndDef:\n if OffAndDef[player]['NegRush'] > att:\n att = OffAndDef[player]['NegRush']\n rushes = [[OffAndDef[player]['name'], OffAndDef[player]['NegRush']]\n ]\n elif OffAndDef[player]['NegRush'] == att:\n rushes.append([OffAndDef[player]['name'], OffAndDef[player][\n 'NegRush']])\n return rushes\n\n\ndef MostPassForLoss(OffAndDef):\n PassForLoss = []\n att = 0\n for player in OffAndDef:\n if OffAndDef[player]['PassForLoss'] > att:\n att = OffAndDef[player]['PassForLoss']\n PassForLoss = [[OffAndDef[player]['name'], OffAndDef[player][\n 'PassForLoss']]]\n elif OffAndDef[player]['PassForLoss'] == att:\n PassForLoss.append([OffAndDef[player]['name'], OffAndDef[player\n ]['PassForLoss']])\n return PassForLoss\n\n\ndef MostPenalties(penalties):\n pens = []\n num = 0\n for teamname, teamdata in penalties.items():\n if teamdata['Penalties'] > num:\n num = teamdata['Penalties']\n pens = [[teamname, teamdata['Penalties']]]\n elif teamdata['Penalties'] == num:\n pens.append([teamname, teamdata['Penalties']])\n return pens\n\n\ndef TeamPenaltyYards(penalties):\n pens = []\n num = 0\n for teamname, teamdata in penalties.items():\n if teamdata['PenaltyYards'] > num:\n num = teamdata['PenaltyYards']\n pens = [[teamname, teamdata['PenaltyYards']]]\n elif teamdata['PenaltyYards'] == num:\n pens.append([teamname, teamdata['PenaltyYards']])\n return pens\n\n\ndef PenaltyWins(penalties):\n x = MostPenalties(penalties)\n mostPenalized = []\n for temp in x:\n mostPenalized.append(team[0])\n least = penalties[mostPenalized[0]]['Penalties']\n mostandleast = [[mostPenalized[0], penalties[mostPenalized[0]]['Wins'],\n penalties[mostPenalized[0]]['Losses']]]\n leastTeam = []\n for teamname, teamdata in penalties.items():\n if teamdata['Penalties'] < least:\n least = teamdata['Penalties']\n leastTeam = [[teamname, teamdata['Wins'], teamdata['Losses']]]\n elif teamdata['Penalties'] == least:\n leastTeam.append([teamname, teamdata['Wins'], teamdata['Losses']])\n mostandleast.append(leastTeam[0])\n return mostandleast\n\n\ndef AverageNumberOfPlays():\n games = 0\n plays = 0\n for filename in os.listdir(os.path.dirname(os.path.abspath(__file__)) +\n '/stats'):\n with open(os.path.dirname(os.path.abspath(__file__)) + '/stats/' +\n filename, 'r') as json_file:\n try:\n data = json.load(json_file)\n except:\n pass\n else:\n for gameid, gamedata in data.items():\n if gameid != 'nextupdate':\n games += 1\n for driveid, drivedata in gamedata['drives'].items():\n if driveid != 'crntdrv':\n plays += drivedata['numplays']\n avgplays = plays / games\n return avgplays\n\n\ndef LongestFG(fg):\n fgs = []\n length = 0\n for playerid, playerdata in fg.items():\n if playerdata['Long'] > length:\n length = playerdata['Long']\n fgs = [[playerdata['Name'], playerdata['Long']]]\n elif playerdata['Long'] == length:\n fgs.append([playerdata['Name'], playerdata['Long']])\n return fgs\n\n\ndef MostFG(fg):\n fgs = []\n count = 0\n for playerid, playerdata in fg.items():\n if playerdata['FG'] > count:\n count = playerdata['FG']\n fgs = [[playerdata['Name'], playerdata['FG']]]\n elif playerdata['FG'] == count:\n fgs.append([playerdata['Name'], playerdata['FG']])\n return fgs\n\n\ndef MostMFG(fg):\n fgs = []\n count = 0\n for playerid, playerdata in fg.items():\n if playerdata['MFG'] > count:\n count = playerdata['MFG']\n fgs = [[playerdata['Name'], playerdata['MFG']]]\n elif playerdata['MFG'] == count:\n fgs.append([playerdata['Name'], playerdata['MFG']])\n return fgs\n\n\ndef MostDrops(OffAndDef):\n drops = []\n count = 0\n for player in OffAndDef:\n if OffAndDef[player]['Drops'] > count:\n count = OffAndDef[player]['Drops']\n drops = [[OffAndDef[player]['name'], OffAndDef[player]['Drops']]]\n elif OffAndDef[player]['Drops'] == count:\n drops.append([OffAndDef[player]['name'], OffAndDef[player][\n 'Drops']])\n return drops\n\n\npath = os.path.dirname(os.path.abspath(__file__))\nf = open(path + '/OffAndDef.json', 'r')\nOffAndDef = json.load(f)\nf.close()\nf = open(path + '/Penalties.json', 'r')\npenalties = json.load(f)\nf.close()\nf = open(path + '/FG.json', 'r')\nfg = json.load(f)\nf.close()\nprint('\\n')\nprint('Name: Zac Conley')\nprint('Assignment: A03 - Nfl Stats')\nprint('Date: 2/10/19')\nprint(\n '=================================================================================='\n )\nprint('Question 1: Find the player(s) that played for the most teams.')\nplayerlist = MostTeams(OffAndDef)\nfor p in playerlist:\n print(str(p[0]) + ': ' + str(p[1]) + ' teams\\n')\nprint(\n '=================================================================================='\n )\nprint(\n 'Question 2: Find the player(s) that played for multiple teams in one year.'\n )\nans = MostTeamsOneYear(OffAndDef)\ncount = 0\nfor numteams in ans.items():\n for player in numteams[1].items():\n print(player[1] + ': ' + player[0] + ' ' + str(numteams[0]) + ' teams.'\n )\nprint\nprint(\n '=================================================================================='\n )\nprint(\n 'Question 3: Find the player(s) that had the most yards rushed for a loss.'\n )\nans = NegativeRushingYards(OffAndDef)\nfor player in ans:\n print(player[0] + ': ' + str(player[1]) + ' rushing yards.\\n')\nprint(\n '=================================================================================='\n )\nprint('Question 4: Find the player(s) that had the most rushes for a loss.')\nans = NegativeRushes(OffAndDef)\nfor player in ans:\n print(player[0] + ': ' + str(player[1]) + ' negative rushes.\\n')\nprint(\n '=================================================================================='\n )\nprint(\n 'Question 5: Find the player(s) with the most number of passes for a loss.'\n )\nans = MostPassForLoss(OffAndDef)\nfor player in ans:\n print(player[0] + ': ' + str(player[1]) + ' negative passes.\\n')\ntemp = []\nprint(\n '=================================================================================='\n )\nprint('Question 6: Find the team with the most penalties.')\nans = MostPenalties(penalties)\nfor team in ans:\n print(str(team[0]) + ' had ' + str(team[1]) + ' penalties.\\n')\nprint(\n '=================================================================================='\n )\nprint('Question 7: Find the team with the most yards in penalties.')\nans = TeamPenaltyYards(penalties)\nfor team in ans:\n print(team[0] + ': ' + str(int(team[1])) + ' penalty yards.\\n')\nprint(\n '=================================================================================='\n )\nprint(\n 'Question 8: Find the correlation between most penalized teams and games won / lost.'\n )\nans = PenaltyWins(penalties)\nprint('Most Penalties: ' + ans[0][0] + ': ' + str(ans[0][1]) + '-' + str(\n ans[0][2]))\nprint('Least Penalties: ' + ans[1][0] + ' : ' + str(ans[1][1]) + '-' + str(\n ans[1][2]) + '\\n')\nprint(\n '=================================================================================='\n )\nprint(\n 'Question 9: Average number of plays in a game. (This may take up to a minute.)'\n )\nans = AverageNumberOfPlays()\nprint('On average, there are ' + str(ans) + ' plays each game. \\n')\nprint(\n '=================================================================================='\n )\nprint('Question 10: Longest field goal.')\nans = LongestFG(fg)\nfor player in ans:\n print(player[0] + ': ' + str(player[1]) + ' yards.\\n')\nprint(\n '=================================================================================='\n )\nprint('Question 11: Most field goals.')\nans = MostFG(fg)\nfor player in ans:\n print(player[0] + ': ' + str(player[1]) + ' FGs.\\n')\nprint(\n '=================================================================================='\n )\nprint('Question 12: Most missed field goals.')\nans = MostMFG(fg)\nfor player in ans:\n print(player[0] + ': ' + str(player[1]) + ' missed FGs.\\n')\nprint(\n '=================================================================================='\n )\nprint('Question 13: Most dropped passes.')\nans = MostDrops(OffAndDef)\nfor player in ans:\n print(player[0] + ': ' + str(player[1]) + ' drops.')\n",
"step-5": "import json\nimport os\nimport sys\n\"\"\"\nCourse: cmps 4883\nAssignemt: A03\nDate: 2/10/19\nGithub username: acdczlc\nRepo url: https://github.com/acdczlc/4883-SWTools-Conley\nName: Zac Conley\nDescription: \n Calculates all stats for questions about stats\n\n\"\"\"\n##############################################################\n# MostTeams(dict of off and def players)\n# gets player who played for most teams\n# \n# Params: \n# dict of players\n# Returns: \n# player with most teams\ndef MostTeams(OffAndDef):\n most=[]\n count=0 # set comparison\n for playerid, playerdata in OffAndDef.items():\n if(playerdata['name']!=''): #only get real players\n if(len(playerdata['Teams'])>count):\n count=len(playerdata['Teams']) #get count\n most=[[playerdata['name'],len(playerdata['Teams'])]] # replace with player\n elif(len(playerdata['Teams'])==count):\n most.append([playerdata['name'],len(playerdata['Teams'])]) # add multiple teams\n return most\n\n##############################################################\n# MostTeamsOneYear(dict of off and def players)\n# gets player who played for most teams in one year\n# \n# Params: \n# dict of players\n# Returns: \n# player with most teams\ndef MostTeamsOneYear(OffAndDef):\n teams={}\n maximum={}\n count=0\n for playerid, playerdata in OffAndDef.items():\n if(playerdata['name']!=''):\n for years in playerdata: #avoids all keys except years \n if(years!='Drops' and years!='NegRushYards' and years!='NegRush' and years!='Teams' and years!='PassForLoss' and years!=\"name\"):\n try: #try block to avoid nonplayers\n if(len(playerdata[years])>count): # if player has most teams so far\n if((len(playerdata[years]) not in teams.keys())): \n teams.clear() # delete all previous players\n teams[len(playerdata[years])]={}\n teams[len(playerdata[years])][playerdata['name']]=years\n count=len(playerdata[years])\n elif(len(playerdata[years])==count): #multiple players have the same number of teams\n teams[len(playerdata[years])].append(playerdata['name'],years)\n except:\n pass\n\n return teams\n##############################################################\n# NegativeRushingYards(dict of off and def players)\n# gets player with most negative rushing yards\n# \n# Params: \n# dict of players\n# Returns: \n# player with most negative rushing yards\ndef NegativeRushingYards(OffAndDef):\n NegRushYds=[]\n yds=0\n for playerid, playerdata in OffAndDef.items():\n if(playerdata['NegRushYards']<yds):\n yds=playerdata['NegRushYards']\n NegRushYds=[[playerdata['name'],playerdata['NegRushYards']]]\n elif(playerdata['NegRushYards']==yds):\n NegRushYds.append([playerdata['name'],playerdata['NegRushYards']])\n return NegRushYds\n##############################################################\n# NegativeRushes(dict of off and def players)\n# gets player with most negative rushes\n# \n# Params: \n# dict of players\n# Returns: \n# player with most negative rushes\ndef NegativeRushes(OffAndDef):\n rushes=[]\n att=0 #attempts\n for player in OffAndDef:\n if(OffAndDef[player]['NegRush']>att):\n att=OffAndDef[player]['NegRush']\n rushes=[[OffAndDef[player]['name'],OffAndDef[player]['NegRush']]]\n elif(OffAndDef[player]['NegRush']==att):\n rushes.append([OffAndDef[player]['name'],OffAndDef[player]['NegRush']])\n return rushes \n##############################################################\n# MostPassForLoss(dict of off and def players)\n# gets player with most negative rushes\n# \n# Params: \n# dict of players\n# Returns: \n# player with most negative rushes\ndef MostPassForLoss(OffAndDef):\n PassForLoss=[]\n att=0 #attempts\n for player in OffAndDef:\n if(OffAndDef[player]['PassForLoss']>att):\n att=OffAndDef[player]['PassForLoss']\n PassForLoss=[[OffAndDef[player]['name'],OffAndDef[player]['PassForLoss']]]\n elif(OffAndDef[player]['PassForLoss']==att):\n PassForLoss.append([OffAndDef[player]['name'],OffAndDef[player]['PassForLoss']])\n return PassForLoss \n\n##############################################################\n# MostPenalties(dict of team penalties)\n# gets team with most penalties\n# \n# Params: \n# dict of teams\n# Returns: \n# player with most negative rushes\ndef MostPenalties(penalties):\n pens=[]\n num=0\n for teamname,teamdata in penalties.items():\n if(teamdata['Penalties']>num):\n num=teamdata['Penalties']\n pens=[[teamname,teamdata['Penalties']]]\n elif (teamdata['Penalties']==num):\n pens.append([teamname,teamdata['Penalties']])\n return pens\n \n##############################################################\n# TeamPenaltyYards(dict of team penalties)\n# gets team with most penaltiy yards\n# \n# Params: \n# dict of teams\n# Returns: \n# team with most penalty yards\ndef TeamPenaltyYards(penalties):\n pens=[]\n num=0\n for teamname,teamdata in penalties.items():\n if(teamdata['PenaltyYards']>num):\n num=teamdata['PenaltyYards']\n pens=[[teamname,teamdata['PenaltyYards']]]\n elif (teamdata['PenaltyYards']==num):\n pens.append([teamname,teamdata['PenaltyYards']])\n return pens\n##############################################################\n# PenaltyWins(most penalized team,dict of team penalties)\n# shows correlation between penalty and record\n# \n# Params: \n# dict of teams, most penalized team\n# Returns: \n# team with most penaltys and least\ndef PenaltyWins(penalties):\n x=MostPenalties(penalties) #calls function to get most penalized team\n mostPenalized=[]\n for temp in x:\n mostPenalized.append(team[0])\n least=penalties[mostPenalized[0]]['Penalties']\n mostandleast=[[mostPenalized[0],penalties[mostPenalized[0]]['Wins'],penalties[mostPenalized[0]]['Losses']]] # sets most penalized record\n leastTeam=[]\n for teamname, teamdata in penalties.items():\n if(teamdata['Penalties']<least):\n least=teamdata['Penalties']\n leastTeam=[[teamname,teamdata['Wins'],teamdata['Losses']]]\n elif (teamdata['Penalties']==least):\n leastTeam.append([teamname,teamdata['Wins'],teamdata['Losses']])\n mostandleast.append(leastTeam[0]) #adds team and record to list at end\n return mostandleast\n\n##############################################################\n# AverageNumberOfPlays()\n# shows average number of plays\n# \n# Params: \n# none\n# Returns: \n# avg number of plays\ndef AverageNumberOfPlays():\n games=0\n plays=0\n for filename in os.listdir(os.path.dirname(os.path.abspath(__file__))+'/stats'): # sets path to all stats\n with open(os.path.dirname(os.path.abspath(__file__))+\"/stats/\"+filename,\"r\") as json_file:\n try: #gets all stats and stores each game in a dict\n data=json.load(json_file)\n except:\n pass\n else:\n for gameid, gamedata in data.items(): \n if(gameid!=\"nextupdate\"):\n games+=1 #increment number of games\n for driveid, drivedata in gamedata['drives'].items():\n if(driveid!=\"crntdrv\"):\n plays+=drivedata['numplays'] #increment number of plays\n avgplays=plays/games\n return avgplays\n##############################################################\n# LongestFG(dict of fgs)\n# longest field goal\n# \n# Params: \n# dict of fgs\n# Returns: \n# longest field goal and kicker\ndef LongestFG(fg):\n fgs=[]\n length=0 #longest fg\n for playerid,playerdata in fg.items():\n if(playerdata['Long']>length):\n length=playerdata['Long']\n fgs=[[playerdata['Name'],playerdata['Long']]]\n elif (playerdata['Long']==length):\n fgs.append([playerdata['Name'],playerdata['Long']])\n return fgs\n##############################################################\n# MostFG(dict of fgs)\n# most made field goals\n# \n# Params: \n# dict of fgs\n# Returns: \n# most made field goals and kicker\ndef MostFG(fg):\n fgs=[]\n count=0 #sets counter to 0\n for playerid,playerdata in fg.items():\n if(playerdata['FG']>count): #if largest number of fg so far\n count=playerdata['FG']\n fgs=[[playerdata['Name'],playerdata['FG']]]\n elif (playerdata['FG']==count): #if same number of fg\n fgs.append([playerdata['Name'],playerdata['FG']])\n return fgs\n##############################################################\n# MostMFG(dict of fgs)\n# most missed field goals\n# \n# Params: \n# dict of fgs\n# Returns: \n# most missed field goals and kicker\ndef MostMFG(fg):\n fgs=[]\n count=0 #set counter to 0\n for playerid,playerdata in fg.items():\n if(playerdata['MFG']>count): #if most misses so far\n count=playerdata['MFG']\n fgs=[[playerdata['Name'],playerdata['MFG']]]\n elif (playerdata['MFG']==count): #if same as most misses\n fgs.append([playerdata['Name'],playerdata['MFG']])\n return fgs\n##############################################################\n# MostDrops(dict of players)\n# most drops\n# \n# Params: \n# dict of players\n# Returns: \n# most drops\ndef MostDrops(OffAndDef):\n drops=[] \n count=0 #set drops to 0\n for player in OffAndDef:\n if(OffAndDef[player]['Drops']>count):\n count=OffAndDef[player]['Drops']\n drops=[[OffAndDef[player]['name'],OffAndDef[player]['Drops']]]\n elif(OffAndDef[player]['Drops']==count):\n drops.append([OffAndDef[player]['name'],OffAndDef[player]['Drops']])\n return drops\n\npath= os.path.dirname(os.path.abspath(__file__)) #set path to current location\nf=open(path+'/OffAndDef.json','r') #open separated files\nOffAndDef=json.load(f)\nf.close()\nf=open(path+'/Penalties.json','r') \npenalties=json.load(f)\nf.close()\nf=open(path+'/FG.json','r')\nfg=json.load(f)\nf.close()\nprint(\"\\n\")\nprint(\"Name: Zac Conley\")\nprint(\"Assignment: A03 - Nfl Stats\")\nprint(\"Date: 2/10/19\")\nprint(\"==================================================================================\")\nprint(\"Question 1: Find the player(s) that played for the most teams.\")\nplayerlist=MostTeams(OffAndDef)\nfor p in playerlist:\n print(str(p[0]) + \": \"+ str(p[1]) +\" teams\\n\")\nprint(\"==================================================================================\")\nprint(\"Question 2: Find the player(s) that played for multiple teams in one year.\")\nans=MostTeamsOneYear(OffAndDef)\ncount=0\nfor numteams in ans.items():\n for player in numteams[1].items():\n print(player[1]+\": \" +player[0]+\" \"+str(numteams[0])+\" teams.\" )\nprint\nprint(\"==================================================================================\")\nprint(\"Question 3: Find the player(s) that had the most yards rushed for a loss.\")\nans=NegativeRushingYards(OffAndDef)\nfor player in ans:\n print(player[0]+\": \"+str(player[1])+\" rushing yards.\\n\")\nprint(\"==================================================================================\")\nprint(\"Question 4: Find the player(s) that had the most rushes for a loss.\")\nans=NegativeRushes(OffAndDef)\nfor player in ans:\n print(player[0]+\": \"+str(player[1])+\" negative rushes.\\n\")\nprint(\"==================================================================================\")\nprint(\"Question 5: Find the player(s) with the most number of passes for a loss.\")\nans=MostPassForLoss(OffAndDef)\nfor player in ans:\n print(player[0]+\": \"+str(player[1])+\" negative passes.\\n\")\ntemp=[]\nprint(\"==================================================================================\")\nprint(\"Question 6: Find the team with the most penalties.\")\nans=MostPenalties(penalties)\nfor team in ans:\n print(str(team[0])+\" had \"+str(team[1])+\" penalties.\\n\")\nprint(\"==================================================================================\")\nprint(\"Question 7: Find the team with the most yards in penalties.\")\nans=TeamPenaltyYards(penalties)\nfor team in ans:\n print(team[0]+\": \"+str(int(team[1]))+\" penalty yards.\\n\")\nprint(\"==================================================================================\")\nprint(\"Question 8: Find the correlation between most penalized teams and games won / lost.\")\nans=PenaltyWins(penalties)\nprint(\"Most Penalties: \"+ans[0][0]+\": \"+str(ans[0][1]) +\"-\" +str(ans[0][2]))\nprint(\"Least Penalties: \"+ans[1][0]+\" : \"+str(ans[1][1])+\"-\" +str(ans[1][2])+\"\\n\")\nprint(\"==================================================================================\")\nprint(\"Question 9: Average number of plays in a game. (This may take up to a minute.)\")\nans=AverageNumberOfPlays()\nprint(\"On average, there are \" +str(ans) +\" plays each game. \\n\")\nprint(\"==================================================================================\")\nprint(\"Question 10: Longest field goal.\")\nans=LongestFG(fg)\nfor player in ans:\n print(player[0]+\": \"+str(player[1])+\" yards.\\n\")\nprint(\"==================================================================================\")\nprint(\"Question 11: Most field goals.\")\nans=MostFG(fg)\nfor player in ans:\n print(player[0]+\": \"+str(player[1])+\" FGs.\\n\")\nprint(\"==================================================================================\")\nprint(\"Question 12: Most missed field goals.\")\nans=MostMFG(fg)\nfor player in ans:\n print(player[0]+\": \"+str(player[1])+\" missed FGs.\\n\")\nprint(\"==================================================================================\")\nprint(\"Question 13: Most dropped passes.\")\nans=MostDrops(OffAndDef)\nfor player in ans:\n print(player[0]+\": \"+str(player[1])+\" drops.\")",
"step-ids": [
8,
9,
10,
15,
17
]
}
|
[
8,
9,
10,
15,
17
] |
import xmlrpclib
import socket
import time
import math
import re
from roundup.exceptions import Reject
REVPAT = re.compile(r'(r[0-9]+\b|rev(ision)? [0-9]+\b)')
def extract_classinfo(db, klass, nodeid, newvalues):
if None == nodeid:
node = newvalues
content = newvalues['content']
else:
node = db.getnode(klass.classname, nodeid)
content = klass.get(nodeid, 'content')
if node.has_key('creation') or node.has_key('date'):
nodets = node.get('creation', node.get('date')).timestamp()
else:
nodets = time.time()
if node.has_key('author') or node.has_key('creator'):
authorid = node.get('author', node.get('creator'))
else:
authorid = db.getuid()
authorage = nodets - db.getnode('user', authorid)['creation'].timestamp()
tokens = ["klass:%s" % klass.classname,
"author:%s" % authorid,
"authorage:%d" % int(math.log(authorage)),
"hasrev:%s" % (REVPAT.search(content) is not None)]
return (content, tokens)
def check_spambayes(db, content, tokens):
try:
spambayes_uri = db.config.detectors['SPAMBAYES_URI']
except KeyError, e:
return (False, str(e))
try:
server = xmlrpclib.ServerProxy(spambayes_uri, verbose=False)
except IOError, e:
return (False, str(e))
try:
prob = server.score({'content':content}, tokens, {})
return (True, prob)
except (socket.error, xmlrpclib.Error), e:
return (False, str(e))
def check_spam(db, klass, nodeid, newvalues):
"""Auditor to score a website submission."""
if newvalues.has_key('spambayes_score'):
if not db.security.hasPermission('SB: May Classify', db.getuid()):
raise ValueError, "You don't have permission to spamclassify messages"
# Don't do anything if we're explicitly setting the score
return
if not newvalues.has_key('content'):
# No need to invoke spambayes if the content of the message
# is unchanged.
return
(content, tokens) = extract_classinfo(db, klass, nodeid, newvalues)
(success, other) = check_spambayes(db, content, tokens)
if success:
newvalues['spambayes_score'] = other
newvalues['spambayes_misclassified'] = False
else:
newvalues['spambayes_score'] = -1
newvalues['spambayes_misclassified'] = True
def init(database):
"""Initialize auditor."""
database.msg.audit('create', check_spam)
database.msg.audit('set', check_spam)
database.file.audit('create', check_spam)
database.file.audit('set', check_spam)
|
normal
|
{
"blob_id": "3ec0c20fb2dfed9930885885288cc5d47f4f5ee5",
"index": 6196,
"step-1": "\nimport xmlrpclib\nimport socket\nimport time\nimport math\nimport re\n\nfrom roundup.exceptions import Reject\n\nREVPAT = re.compile(r'(r[0-9]+\\b|rev(ision)? [0-9]+\\b)')\n\ndef extract_classinfo(db, klass, nodeid, newvalues):\n if None == nodeid:\n node = newvalues\n content = newvalues['content']\n else:\n node = db.getnode(klass.classname, nodeid)\n content = klass.get(nodeid, 'content')\n\n if node.has_key('creation') or node.has_key('date'):\n nodets = node.get('creation', node.get('date')).timestamp()\n else:\n nodets = time.time()\n\n if node.has_key('author') or node.has_key('creator'):\n authorid = node.get('author', node.get('creator'))\n else:\n authorid = db.getuid()\n\n authorage = nodets - db.getnode('user', authorid)['creation'].timestamp()\n\n tokens = [\"klass:%s\" % klass.classname,\n \"author:%s\" % authorid,\n \"authorage:%d\" % int(math.log(authorage)),\n \"hasrev:%s\" % (REVPAT.search(content) is not None)]\n\n\n return (content, tokens)\n\ndef check_spambayes(db, content, tokens):\n try:\n spambayes_uri = db.config.detectors['SPAMBAYES_URI']\n except KeyError, e:\n return (False, str(e))\n\n try:\n server = xmlrpclib.ServerProxy(spambayes_uri, verbose=False)\n except IOError, e:\n return (False, str(e))\n\n\n try:\n prob = server.score({'content':content}, tokens, {})\n return (True, prob)\n except (socket.error, xmlrpclib.Error), e:\n return (False, str(e))\n\n\ndef check_spam(db, klass, nodeid, newvalues):\n \"\"\"Auditor to score a website submission.\"\"\"\n\n\n if newvalues.has_key('spambayes_score'):\n if not db.security.hasPermission('SB: May Classify', db.getuid()):\n raise ValueError, \"You don't have permission to spamclassify messages\"\n # Don't do anything if we're explicitly setting the score\n return\n\n if not newvalues.has_key('content'):\n # No need to invoke spambayes if the content of the message\n # is unchanged.\n return\n\n (content, tokens) = extract_classinfo(db, klass, nodeid, newvalues)\n (success, other) = check_spambayes(db, content, tokens)\n if success:\n newvalues['spambayes_score'] = other\n newvalues['spambayes_misclassified'] = False\n else:\n newvalues['spambayes_score'] = -1\n newvalues['spambayes_misclassified'] = True\n\ndef init(database):\n \"\"\"Initialize auditor.\"\"\"\n database.msg.audit('create', check_spam)\n database.msg.audit('set', check_spam)\n database.file.audit('create', check_spam)\n database.file.audit('set', check_spam)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# coding=utf-8
"""
@Author: Freshield
@Contact: yangyufresh@163.com
@File: a1_test_call.py
@Time: 2021-01-20 17:40
@Last_update: 2021-01-20 17:40
@Desc: None
@==============================================@
@ _____ _ _ _ _ @
@ | __|___ ___ ___| |_|_|___| |_| | @
@ | __| _| -_|_ -| | | -_| | . | @
@ |__| |_| |___|___|_|_|_|___|_|___| @
@ Freshield @
@==============================================@
"""
import requests
import json
url = 'https://www.baidu.com'
url = 'http://www.baidu.com/s?wd=python'
r = requests.get(url)
print(r.url)
print(r.text)
url = 'http://www.baidu.com/s'
params = {'wd': 'python'}
r = requests.get(url, params=params)
print(r.text)
print(r.url)
data = {'key1': 'value1', 'key2': 'value2'}
data = json.dumps(data)
r = requests.post('https://www.baidu.com', data=data)
print(r.text)
print(r)
|
normal
|
{
"blob_id": "325770130473153d092d3058587e9666625e12d0",
"index": 5670,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(r.url)\nprint(r.text)\n<mask token>\nprint(r.text)\nprint(r.url)\n<mask token>\nprint(r.text)\nprint(r)\n",
"step-3": "<mask token>\nurl = 'https://www.baidu.com'\nurl = 'http://www.baidu.com/s?wd=python'\nr = requests.get(url)\nprint(r.url)\nprint(r.text)\nurl = 'http://www.baidu.com/s'\nparams = {'wd': 'python'}\nr = requests.get(url, params=params)\nprint(r.text)\nprint(r.url)\ndata = {'key1': 'value1', 'key2': 'value2'}\ndata = json.dumps(data)\nr = requests.post('https://www.baidu.com', data=data)\nprint(r.text)\nprint(r)\n",
"step-4": "<mask token>\nimport requests\nimport json\nurl = 'https://www.baidu.com'\nurl = 'http://www.baidu.com/s?wd=python'\nr = requests.get(url)\nprint(r.url)\nprint(r.text)\nurl = 'http://www.baidu.com/s'\nparams = {'wd': 'python'}\nr = requests.get(url, params=params)\nprint(r.text)\nprint(r.url)\ndata = {'key1': 'value1', 'key2': 'value2'}\ndata = json.dumps(data)\nr = requests.post('https://www.baidu.com', data=data)\nprint(r.text)\nprint(r)\n",
"step-5": "# coding=utf-8\n\"\"\"\n@Author: Freshield\n@Contact: yangyufresh@163.com\n@File: a1_test_call.py\n@Time: 2021-01-20 17:40\n@Last_update: 2021-01-20 17:40\n@Desc: None\n@==============================================@\n@ _____ _ _ _ _ @\n@ | __|___ ___ ___| |_|_|___| |_| | @\n@ | __| _| -_|_ -| | | -_| | . | @\n@ |__| |_| |___|___|_|_|_|___|_|___| @\n@ Freshield @\n@==============================================@\n\"\"\"\nimport requests\nimport json\n\nurl = 'https://www.baidu.com'\nurl = 'http://www.baidu.com/s?wd=python'\n\nr = requests.get(url)\n\nprint(r.url)\nprint(r.text)\n\nurl = 'http://www.baidu.com/s'\nparams = {'wd': 'python'}\nr = requests.get(url, params=params)\nprint(r.text)\nprint(r.url)\n\ndata = {'key1': 'value1', 'key2': 'value2'}\ndata = json.dumps(data)\nr = requests.post('https://www.baidu.com', data=data)\nprint(r.text)\nprint(r)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
import csv
import re
totWords = 0
wordLen = 0
totSentWithPunctuation = 0
sourceFile = os.path.join('Resources', 'paragraph_2.txt')
with open(sourceFile, 'r') as paragraph:
paragraph = paragraph.read().split("\n\n")
for sentence in paragraph:
# Remove punctuation from sentences
sentWithPunctuation = sentence
sentNoPunctuation = re.sub(r'[^\w\s]','',sentence)
#Split sentence with no punctuation by words using spaces
words = sentNoPunctuation.split(" ")
for word in words:
wordLen = wordLen + len(word)
# Compute totals for output message
totWords = totWords + len(words) # Total words for all sentences
avgSentLen_Words = round(totWords / len(paragraph),2) # Average words for all sentences
avgLetterCount = round(wordLen/totWords,2) # Average letter by word for all sentences
totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)
avgSentLen_chars = round(totSentWithPunctuation / len(paragraph),2)
#Validate output by printing a test line
# print(f"words: {len(words)} S w Punct. len: {len(sentWithPunctuation)} Sentence: {sentWithPunctuation}")
print(f"\n\nParagraph Analysis of '{sourceFile}' file")
print(f"---------------------------------------------------------")
print(f" Approximate Word Count: {totWords} ")
print(f" Approximate Sentence Count: {len(paragraph)} ")
print(f" Average Letter Count: {avgLetterCount} ")
print(f" Average Sentence Length (words): {avgSentLen_Words} ")
print(f" Average Sentence Length (chars): {avgSentLen_chars} ")
|
normal
|
{
"blob_id": "3cd7abf9659fe1db0ef3aa58df8dd7fd959e10a6",
"index": 386,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(sourceFile, 'r') as paragraph:\n paragraph = paragraph.read().split('\\n\\n')\nfor sentence in paragraph:\n sentWithPunctuation = sentence\n sentNoPunctuation = re.sub('[^\\\\w\\\\s]', '', sentence)\n words = sentNoPunctuation.split(' ')\n for word in words:\n wordLen = wordLen + len(word)\n totWords = totWords + len(words)\n avgSentLen_Words = round(totWords / len(paragraph), 2)\n avgLetterCount = round(wordLen / totWords, 2)\n totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)\n avgSentLen_chars = round(totSentWithPunctuation / len(paragraph), 2)\nprint(f\"\"\"\n\nParagraph Analysis of '{sourceFile}' file\"\"\")\nprint(f'---------------------------------------------------------')\nprint(f' Approximate Word Count: {totWords} ')\nprint(f' Approximate Sentence Count: {len(paragraph)} ')\nprint(f' Average Letter Count: {avgLetterCount} ')\nprint(f' Average Sentence Length (words): {avgSentLen_Words} ')\nprint(f' Average Sentence Length (chars): {avgSentLen_chars} ')\n",
"step-3": "<mask token>\ntotWords = 0\nwordLen = 0\ntotSentWithPunctuation = 0\nsourceFile = os.path.join('Resources', 'paragraph_2.txt')\nwith open(sourceFile, 'r') as paragraph:\n paragraph = paragraph.read().split('\\n\\n')\nfor sentence in paragraph:\n sentWithPunctuation = sentence\n sentNoPunctuation = re.sub('[^\\\\w\\\\s]', '', sentence)\n words = sentNoPunctuation.split(' ')\n for word in words:\n wordLen = wordLen + len(word)\n totWords = totWords + len(words)\n avgSentLen_Words = round(totWords / len(paragraph), 2)\n avgLetterCount = round(wordLen / totWords, 2)\n totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)\n avgSentLen_chars = round(totSentWithPunctuation / len(paragraph), 2)\nprint(f\"\"\"\n\nParagraph Analysis of '{sourceFile}' file\"\"\")\nprint(f'---------------------------------------------------------')\nprint(f' Approximate Word Count: {totWords} ')\nprint(f' Approximate Sentence Count: {len(paragraph)} ')\nprint(f' Average Letter Count: {avgLetterCount} ')\nprint(f' Average Sentence Length (words): {avgSentLen_Words} ')\nprint(f' Average Sentence Length (chars): {avgSentLen_chars} ')\n",
"step-4": "import os\nimport csv\nimport re\ntotWords = 0\nwordLen = 0\ntotSentWithPunctuation = 0\nsourceFile = os.path.join('Resources', 'paragraph_2.txt')\nwith open(sourceFile, 'r') as paragraph:\n paragraph = paragraph.read().split('\\n\\n')\nfor sentence in paragraph:\n sentWithPunctuation = sentence\n sentNoPunctuation = re.sub('[^\\\\w\\\\s]', '', sentence)\n words = sentNoPunctuation.split(' ')\n for word in words:\n wordLen = wordLen + len(word)\n totWords = totWords + len(words)\n avgSentLen_Words = round(totWords / len(paragraph), 2)\n avgLetterCount = round(wordLen / totWords, 2)\n totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)\n avgSentLen_chars = round(totSentWithPunctuation / len(paragraph), 2)\nprint(f\"\"\"\n\nParagraph Analysis of '{sourceFile}' file\"\"\")\nprint(f'---------------------------------------------------------')\nprint(f' Approximate Word Count: {totWords} ')\nprint(f' Approximate Sentence Count: {len(paragraph)} ')\nprint(f' Average Letter Count: {avgLetterCount} ')\nprint(f' Average Sentence Length (words): {avgSentLen_Words} ')\nprint(f' Average Sentence Length (chars): {avgSentLen_chars} ')\n",
"step-5": "import os\nimport csv\nimport re\n\ntotWords = 0\nwordLen = 0\ntotSentWithPunctuation = 0\n\nsourceFile = os.path.join('Resources', 'paragraph_2.txt')\n\nwith open(sourceFile, 'r') as paragraph:\n paragraph = paragraph.read().split(\"\\n\\n\")\n\n\nfor sentence in paragraph:\n # Remove punctuation from sentences\n sentWithPunctuation = sentence\n sentNoPunctuation = re.sub(r'[^\\w\\s]','',sentence)\n\n #Split sentence with no punctuation by words using spaces\n words = sentNoPunctuation.split(\" \")\n for word in words:\n wordLen = wordLen + len(word)\n\n # Compute totals for output message \n totWords = totWords + len(words) # Total words for all sentences\n avgSentLen_Words = round(totWords / len(paragraph),2) # Average words for all sentences\n avgLetterCount = round(wordLen/totWords,2) # Average letter by word for all sentences\n totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)\n avgSentLen_chars = round(totSentWithPunctuation / len(paragraph),2)\n\n #Validate output by printing a test line\n # print(f\"words: {len(words)} S w Punct. len: {len(sentWithPunctuation)} Sentence: {sentWithPunctuation}\")\n\nprint(f\"\\n\\nParagraph Analysis of '{sourceFile}' file\")\nprint(f\"---------------------------------------------------------\")\nprint(f\" Approximate Word Count: {totWords} \")\nprint(f\" Approximate Sentence Count: {len(paragraph)} \")\nprint(f\" Average Letter Count: {avgLetterCount} \")\nprint(f\" Average Sentence Length (words): {avgSentLen_Words} \")\nprint(f\" Average Sentence Length (chars): {avgSentLen_chars} \")\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@outputSchema('word:chararray')
def reverse(word):
"""
Return the reverse text of the provided word
"""
return word[::-1]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@outputSchema('word:chararray')
def reverse(word):
"""
Return the reverse text of the provided word
"""
return word[::-1]
@outputSchema('length:int')
def num_chars(word):
"""
Return the length of the provided word
"""
return len(word)
<|reserved_special_token_1|>
from pig_util import outputSchema
@outputSchema('word:chararray')
def reverse(word):
"""
Return the reverse text of the provided word
"""
return word[::-1]
@outputSchema('length:int')
def num_chars(word):
"""
Return the length of the provided word
"""
return len(word)
|
flexible
|
{
"blob_id": "94560d8f6528a222e771ca6aa60349d9682e8f4b",
"index": 6558,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@outputSchema('word:chararray')\ndef reverse(word):\n \"\"\"\n Return the reverse text of the provided word\n \"\"\"\n return word[::-1]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@outputSchema('word:chararray')\ndef reverse(word):\n \"\"\"\n Return the reverse text of the provided word\n \"\"\"\n return word[::-1]\n\n\n@outputSchema('length:int')\ndef num_chars(word):\n \"\"\"\n Return the length of the provided word\n \"\"\"\n return len(word)\n",
"step-4": "from pig_util import outputSchema\n\n\n@outputSchema('word:chararray')\ndef reverse(word):\n \"\"\"\n Return the reverse text of the provided word\n \"\"\"\n return word[::-1]\n\n\n@outputSchema('length:int')\ndef num_chars(word):\n \"\"\"\n Return the length of the provided word\n \"\"\"\n return len(word)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def list(request):
techniques = Technique.objects.annotate(num_images=Count('images')
).order_by('-num_images')
return render_to_response('technique/list.html', {'techniques':
techniques}, RequestContext(request))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def list(request):
techniques = Technique.objects.annotate(num_images=Count('images')
).order_by('-num_images')
return render_to_response('technique/list.html', {'techniques':
techniques}, RequestContext(request))
def view(request, pk):
t = Technique.objects.get(pk=pk)
related = filter(lambda x: x, [t2 for t2 in t.starting_at.all()] + [t2 for
t2 in t.ending_at.all()] + [t2 for t2 in t.children.all()] + [t.
start, t.end])
return render_to_response('technique/view.html', {'t': t, 'related':
related}, RequestContext(request))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def list(request):
techniques = Technique.objects.annotate(num_images=Count('images')
).order_by('-num_images')
return render_to_response('technique/list.html', {'techniques':
techniques}, RequestContext(request))
def view(request, pk):
t = Technique.objects.get(pk=pk)
related = filter(lambda x: x, [t2 for t2 in t.starting_at.all()] + [t2 for
t2 in t.ending_at.all()] + [t2 for t2 in t.children.all()] + [t.
start, t.end])
return render_to_response('technique/view.html', {'t': t, 'related':
related}, RequestContext(request))
@login_required
def create(request, pk=None):
if pk:
t = Technique.objects.get(pk=pk)
else:
t = Technique(created_by=request.user)
if request.method == 'POST':
f = TechniqueForm(request.POST, instance=t)
image_formset = TechniqueImageFormset(request.POST, request.FILES,
instance=t)
if f.is_valid():
t = f.save(commit=False)
image_formset = TechniqueImageFormset(request.POST, request.
FILES, instance=t)
if image_formset.is_valid():
t.save()
for i in image_formset.save(commit=False):
i.created_by = request.user
i.technique = t
i.save()
return redirect(reverse('technique.views.view', args=(t.pk,)))
else:
f = TechniqueForm(instance=t)
image_formset = TechniqueImageFormset(instance=t)
return render_to_response('technique/create.html', {'f': f,
'image_formset': image_formset}, RequestContext(request))
<|reserved_special_token_1|>
import pdb
from django.db.models import Count
from django.shortcuts import render_to_response, redirect
from django.contrib.auth.decorators import login_required
from django.contrib.contenttypes.models import ContentType
from django.template import RequestContext
from models import *
from forms import *
from django.http import HttpResponse
def list(request):
techniques = Technique.objects.annotate(num_images=Count('images')
).order_by('-num_images')
return render_to_response('technique/list.html', {'techniques':
techniques}, RequestContext(request))
def view(request, pk):
t = Technique.objects.get(pk=pk)
related = filter(lambda x: x, [t2 for t2 in t.starting_at.all()] + [t2 for
t2 in t.ending_at.all()] + [t2 for t2 in t.children.all()] + [t.
start, t.end])
return render_to_response('technique/view.html', {'t': t, 'related':
related}, RequestContext(request))
@login_required
def create(request, pk=None):
if pk:
t = Technique.objects.get(pk=pk)
else:
t = Technique(created_by=request.user)
if request.method == 'POST':
f = TechniqueForm(request.POST, instance=t)
image_formset = TechniqueImageFormset(request.POST, request.FILES,
instance=t)
if f.is_valid():
t = f.save(commit=False)
image_formset = TechniqueImageFormset(request.POST, request.
FILES, instance=t)
if image_formset.is_valid():
t.save()
for i in image_formset.save(commit=False):
i.created_by = request.user
i.technique = t
i.save()
return redirect(reverse('technique.views.view', args=(t.pk,)))
else:
f = TechniqueForm(instance=t)
image_formset = TechniqueImageFormset(instance=t)
return render_to_response('technique/create.html', {'f': f,
'image_formset': image_formset}, RequestContext(request))
|
flexible
|
{
"blob_id": "565e994576a57f8bbdcb201f2439bd7e595fa53e",
"index": 9679,
"step-1": "<mask token>\n\n\ndef list(request):\n techniques = Technique.objects.annotate(num_images=Count('images')\n ).order_by('-num_images')\n return render_to_response('technique/list.html', {'techniques':\n techniques}, RequestContext(request))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef list(request):\n techniques = Technique.objects.annotate(num_images=Count('images')\n ).order_by('-num_images')\n return render_to_response('technique/list.html', {'techniques':\n techniques}, RequestContext(request))\n\n\ndef view(request, pk):\n t = Technique.objects.get(pk=pk)\n related = filter(lambda x: x, [t2 for t2 in t.starting_at.all()] + [t2 for\n t2 in t.ending_at.all()] + [t2 for t2 in t.children.all()] + [t.\n start, t.end])\n return render_to_response('technique/view.html', {'t': t, 'related':\n related}, RequestContext(request))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef list(request):\n techniques = Technique.objects.annotate(num_images=Count('images')\n ).order_by('-num_images')\n return render_to_response('technique/list.html', {'techniques':\n techniques}, RequestContext(request))\n\n\ndef view(request, pk):\n t = Technique.objects.get(pk=pk)\n related = filter(lambda x: x, [t2 for t2 in t.starting_at.all()] + [t2 for\n t2 in t.ending_at.all()] + [t2 for t2 in t.children.all()] + [t.\n start, t.end])\n return render_to_response('technique/view.html', {'t': t, 'related':\n related}, RequestContext(request))\n\n\n@login_required\ndef create(request, pk=None):\n if pk:\n t = Technique.objects.get(pk=pk)\n else:\n t = Technique(created_by=request.user)\n if request.method == 'POST':\n f = TechniqueForm(request.POST, instance=t)\n image_formset = TechniqueImageFormset(request.POST, request.FILES,\n instance=t)\n if f.is_valid():\n t = f.save(commit=False)\n image_formset = TechniqueImageFormset(request.POST, request.\n FILES, instance=t)\n if image_formset.is_valid():\n t.save()\n for i in image_formset.save(commit=False):\n i.created_by = request.user\n i.technique = t\n i.save()\n return redirect(reverse('technique.views.view', args=(t.pk,)))\n else:\n f = TechniqueForm(instance=t)\n image_formset = TechniqueImageFormset(instance=t)\n return render_to_response('technique/create.html', {'f': f,\n 'image_formset': image_formset}, RequestContext(request))\n",
"step-4": "import pdb\nfrom django.db.models import Count\nfrom django.shortcuts import render_to_response, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.template import RequestContext\nfrom models import *\nfrom forms import *\nfrom django.http import HttpResponse\n\n\ndef list(request):\n techniques = Technique.objects.annotate(num_images=Count('images')\n ).order_by('-num_images')\n return render_to_response('technique/list.html', {'techniques':\n techniques}, RequestContext(request))\n\n\ndef view(request, pk):\n t = Technique.objects.get(pk=pk)\n related = filter(lambda x: x, [t2 for t2 in t.starting_at.all()] + [t2 for\n t2 in t.ending_at.all()] + [t2 for t2 in t.children.all()] + [t.\n start, t.end])\n return render_to_response('technique/view.html', {'t': t, 'related':\n related}, RequestContext(request))\n\n\n@login_required\ndef create(request, pk=None):\n if pk:\n t = Technique.objects.get(pk=pk)\n else:\n t = Technique(created_by=request.user)\n if request.method == 'POST':\n f = TechniqueForm(request.POST, instance=t)\n image_formset = TechniqueImageFormset(request.POST, request.FILES,\n instance=t)\n if f.is_valid():\n t = f.save(commit=False)\n image_formset = TechniqueImageFormset(request.POST, request.\n FILES, instance=t)\n if image_formset.is_valid():\n t.save()\n for i in image_formset.save(commit=False):\n i.created_by = request.user\n i.technique = t\n i.save()\n return redirect(reverse('technique.views.view', args=(t.pk,)))\n else:\n f = TechniqueForm(instance=t)\n image_formset = TechniqueImageFormset(instance=t)\n return render_to_response('technique/create.html', {'f': f,\n 'image_formset': image_formset}, RequestContext(request))\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_rep_name(string):
return string[-1:]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_rep_name(string):
return string[-1:]
<|reserved_special_token_0|>
for name in column_names:
if get_rep_name(name) not in biological_rep:
biological_rep.append(name[-1:])
for i in range(0, len(biological_rep), 1):
mean_replicates['mean_replicate_' + biological_rep[i]] = [0] * len(df)
mean_replicates['nb_itteration_' + biological_rep[i]] = [0] * len(df)
for k in range(0, len(df), 1):
for i in range(0, len(column_names), 1):
for j in biological_rep:
if j in get_rep_name(column_names[i]):
mean_replicates['mean_replicate_' + j][k] += df.loc[k,
column_names[i]]
mean_replicates['nb_itteration_' + j][k] += 1
<|reserved_special_token_0|>
for i in range(0, len(biological_rep), 1):
dico3['mean_replicate_' + biological_rep[i]] = [0] * len(df)
<|reserved_special_token_0|>
for i in range(0, len(biological_rep), 1):
list_mean_replicates.append('mean_replicate_' + biological_rep[i])
for key in list_mean_replicates:
for key2 in mean_replicates:
if key != key2 and get_rep_name(key) == get_rep_name(key2):
print(key, key2)
dico2[key] = list(zip(mean_replicates[key], mean_replicates[key2]))
for key in dico2:
for i in range(0, len(df), 1):
cal = round(dico2[key][i][0] / dico2[key][i][1])
dico3[key][i] = cal
<|reserved_special_token_0|>
for mean in list_mean_replicates:
final_df[mean] = 0
for i in range(0, len(final_df), 1):
for key in list_mean_replicates:
final_df.loc[i, key] = dico3[key][i]
final_df.to_excel(
'/home/kaouther/Documents/Internship/pre_process/output_files/brain_matrix.xlsx'
, index=False, header=True)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
path = (
'/home/kaouther/Documents/Internship/pre_process/input_files/clean/TabulaMuris_Senis_Brain.xlsx'
)
df = pd.read_excel(path)
def get_rep_name(string):
return string[-1:]
column_names = df.columns
column_names = column_names.delete([0])
biological_rep = []
mean_replicates = dict()
for name in column_names:
if get_rep_name(name) not in biological_rep:
biological_rep.append(name[-1:])
for i in range(0, len(biological_rep), 1):
mean_replicates['mean_replicate_' + biological_rep[i]] = [0] * len(df)
mean_replicates['nb_itteration_' + biological_rep[i]] = [0] * len(df)
for k in range(0, len(df), 1):
for i in range(0, len(column_names), 1):
for j in biological_rep:
if j in get_rep_name(column_names[i]):
mean_replicates['mean_replicate_' + j][k] += df.loc[k,
column_names[i]]
mean_replicates['nb_itteration_' + j][k] += 1
dico2 = dict()
dico3 = dict()
for i in range(0, len(biological_rep), 1):
dico3['mean_replicate_' + biological_rep[i]] = [0] * len(df)
list_mean_replicates = []
for i in range(0, len(biological_rep), 1):
list_mean_replicates.append('mean_replicate_' + biological_rep[i])
for key in list_mean_replicates:
for key2 in mean_replicates:
if key != key2 and get_rep_name(key) == get_rep_name(key2):
print(key, key2)
dico2[key] = list(zip(mean_replicates[key], mean_replicates[key2]))
for key in dico2:
for i in range(0, len(df), 1):
cal = round(dico2[key][i][0] / dico2[key][i][1])
dico3[key][i] = cal
final_df = df.copy()
for mean in list_mean_replicates:
final_df[mean] = 0
for i in range(0, len(final_df), 1):
for key in list_mean_replicates:
final_df.loc[i, key] = dico3[key][i]
final_df.to_excel(
'/home/kaouther/Documents/Internship/pre_process/output_files/brain_matrix.xlsx'
, index=False, header=True)
duplicateRowsDF = final_df[final_df.iloc[:, 0].duplicated()]
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 14 09:53:10 2021
@author: kaouther
"""
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import pandas as pd
#path = '/home/kaouther/Documents/Internship/pre_process/input_files/heart_forKaouther.xlsx'
#path = '/home/kaouther/Documents/Internship/pre_process/input_files/tissues_9m_forKaouther3.xlsx'
path = '/home/kaouther/Documents/Internship/pre_process/input_files/clean/TabulaMuris_Senis_Brain.xlsx'
#path=input('enter the complete path to your input file')
#path = input('Paste the absolute path to the file') #enter the path to the heart_forKaouther.xlsx
#df = pd.read_csv(path, delimiter = "\t")
df = pd.read_excel(path)
#function de extract the last caracterfrom a string
def get_rep_name(string):
return (string[-1:])
#get columns names (samples & biological replicates)
column_names = df.columns
column_names = column_names.delete([0]) #remove gene
#get only biological replicates
biological_rep=[]
mean_replicates= dict()
for name in column_names:
if get_rep_name(name) not in biological_rep:
#print(get_rep_name(name))
biological_rep.append(name[-1:])
#dictionnary to store the sum of values of a type of biological rep and nb of iteration
for i in range (0,len(biological_rep),1):
mean_replicates['mean_replicate_'+biological_rep[i]] = [0]*len(df)
mean_replicates['nb_itteration_'+biological_rep[i]] = [0]*len(df)
for k in range (0,len(df),1):
for i in range (0, len(column_names),1):
for j in biological_rep:
if j in get_rep_name(column_names[i]):
mean_replicates['mean_replicate_'+j][k]+= df.loc[k,column_names[i]]
mean_replicates['nb_itteration_'+j][k]+=1
dico2 = dict() #store tuples sum and iteration on each line
dico3 = dict() #store the mean calculation
for i in range (0,len(biological_rep),1):
dico3['mean_replicate_'+biological_rep[i]] = [0]*len(df)
#get list of mean replicates
list_mean_replicates =[]
for i in range (0,len(biological_rep),1):
list_mean_replicates.append('mean_replicate_'+biological_rep[i])
#dico to store as a tuple the sum and iteration for each mean rep
for key in list_mean_replicates:
for key2 in mean_replicates:
if key != key2 and get_rep_name(key) == get_rep_name(key2):
print( key,key2)
dico2[key]= list(zip((mean_replicates[key]),mean_replicates[key2]))
#dico to calculate the average per gene per mean replicate
for key in dico2:
for i in range(0,len(df),1):
cal = round(dico2[key][i][0]/ dico2[key][i][1])
dico3[key][i]= cal
#store results in new df in new columns
final_df = df.copy()
for mean in list_mean_replicates:
final_df[mean] = 0
for i in range(0,len(final_df),1):
for key in list_mean_replicates:
final_df.loc[i,key] = dico3[key][i]
#export as excel the df
final_df.to_excel ('/home/kaouther/Documents/Internship/pre_process/output_files/brain_matrix.xlsx', index = False, header=True)
#final_df.to_csv('/home/kaouther/Documents/Internship/pre_process/output_files/'+'tissues_mean.csv', index = False, header=True)
#final_df.to_excel('/home/kaouther/Documents/Internship/pre_process/output_files/'+'tissues_matrix.xlsx', index = False, header=True)
#file_name= input('file name')
#final_df.to_excel(file_name+'.xlsx', index = False, header=True)
duplicateRowsDF = final_df[final_df.iloc[:,0].duplicated()]
|
flexible
|
{
"blob_id": "a3588a521a87765d215fd2048407e5e54fb87e94",
"index": 4276,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_rep_name(string):\n return string[-1:]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_rep_name(string):\n return string[-1:]\n\n\n<mask token>\nfor name in column_names:\n if get_rep_name(name) not in biological_rep:\n biological_rep.append(name[-1:])\nfor i in range(0, len(biological_rep), 1):\n mean_replicates['mean_replicate_' + biological_rep[i]] = [0] * len(df)\n mean_replicates['nb_itteration_' + biological_rep[i]] = [0] * len(df)\nfor k in range(0, len(df), 1):\n for i in range(0, len(column_names), 1):\n for j in biological_rep:\n if j in get_rep_name(column_names[i]):\n mean_replicates['mean_replicate_' + j][k] += df.loc[k,\n column_names[i]]\n mean_replicates['nb_itteration_' + j][k] += 1\n<mask token>\nfor i in range(0, len(biological_rep), 1):\n dico3['mean_replicate_' + biological_rep[i]] = [0] * len(df)\n<mask token>\nfor i in range(0, len(biological_rep), 1):\n list_mean_replicates.append('mean_replicate_' + biological_rep[i])\nfor key in list_mean_replicates:\n for key2 in mean_replicates:\n if key != key2 and get_rep_name(key) == get_rep_name(key2):\n print(key, key2)\n dico2[key] = list(zip(mean_replicates[key], mean_replicates[key2]))\nfor key in dico2:\n for i in range(0, len(df), 1):\n cal = round(dico2[key][i][0] / dico2[key][i][1])\n dico3[key][i] = cal\n<mask token>\nfor mean in list_mean_replicates:\n final_df[mean] = 0\nfor i in range(0, len(final_df), 1):\n for key in list_mean_replicates:\n final_df.loc[i, key] = dico3[key][i]\nfinal_df.to_excel(\n '/home/kaouther/Documents/Internship/pre_process/output_files/brain_matrix.xlsx'\n , index=False, header=True)\n<mask token>\n",
"step-4": "<mask token>\npath = (\n '/home/kaouther/Documents/Internship/pre_process/input_files/clean/TabulaMuris_Senis_Brain.xlsx'\n )\ndf = pd.read_excel(path)\n\n\ndef get_rep_name(string):\n return string[-1:]\n\n\ncolumn_names = df.columns\ncolumn_names = column_names.delete([0])\nbiological_rep = []\nmean_replicates = dict()\nfor name in column_names:\n if get_rep_name(name) not in biological_rep:\n biological_rep.append(name[-1:])\nfor i in range(0, len(biological_rep), 1):\n mean_replicates['mean_replicate_' + biological_rep[i]] = [0] * len(df)\n mean_replicates['nb_itteration_' + biological_rep[i]] = [0] * len(df)\nfor k in range(0, len(df), 1):\n for i in range(0, len(column_names), 1):\n for j in biological_rep:\n if j in get_rep_name(column_names[i]):\n mean_replicates['mean_replicate_' + j][k] += df.loc[k,\n column_names[i]]\n mean_replicates['nb_itteration_' + j][k] += 1\ndico2 = dict()\ndico3 = dict()\nfor i in range(0, len(biological_rep), 1):\n dico3['mean_replicate_' + biological_rep[i]] = [0] * len(df)\nlist_mean_replicates = []\nfor i in range(0, len(biological_rep), 1):\n list_mean_replicates.append('mean_replicate_' + biological_rep[i])\nfor key in list_mean_replicates:\n for key2 in mean_replicates:\n if key != key2 and get_rep_name(key) == get_rep_name(key2):\n print(key, key2)\n dico2[key] = list(zip(mean_replicates[key], mean_replicates[key2]))\nfor key in dico2:\n for i in range(0, len(df), 1):\n cal = round(dico2[key][i][0] / dico2[key][i][1])\n dico3[key][i] = cal\nfinal_df = df.copy()\nfor mean in list_mean_replicates:\n final_df[mean] = 0\nfor i in range(0, len(final_df), 1):\n for key in list_mean_replicates:\n final_df.loc[i, key] = dico3[key][i]\nfinal_df.to_excel(\n '/home/kaouther/Documents/Internship/pre_process/output_files/brain_matrix.xlsx'\n , index=False, header=True)\nduplicateRowsDF = final_df[final_df.iloc[:, 0].duplicated()]\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 14 09:53:10 2021\n\n@author: kaouther\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nimport pandas as pd\n#path = '/home/kaouther/Documents/Internship/pre_process/input_files/heart_forKaouther.xlsx'\n#path = '/home/kaouther/Documents/Internship/pre_process/input_files/tissues_9m_forKaouther3.xlsx'\n\npath = '/home/kaouther/Documents/Internship/pre_process/input_files/clean/TabulaMuris_Senis_Brain.xlsx'\n#path=input('enter the complete path to your input file')\n\n#path = input('Paste the absolute path to the file') #enter the path to the heart_forKaouther.xlsx\n#df = pd.read_csv(path, delimiter = \"\\t\")\ndf = pd.read_excel(path)\n#function de extract the last caracterfrom a string\ndef get_rep_name(string):\n return (string[-1:])\n\n#get columns names (samples & biological replicates)\ncolumn_names = df.columns\ncolumn_names = column_names.delete([0]) #remove gene\n\n#get only biological replicates \nbiological_rep=[]\nmean_replicates= dict()\nfor name in column_names:\n if get_rep_name(name) not in biological_rep:\n #print(get_rep_name(name))\n biological_rep.append(name[-1:])\n \n#dictionnary to store the sum of values of a type of biological rep and nb of iteration\nfor i in range (0,len(biological_rep),1): \n mean_replicates['mean_replicate_'+biological_rep[i]] = [0]*len(df)\n mean_replicates['nb_itteration_'+biological_rep[i]] = [0]*len(df)\nfor k in range (0,len(df),1):\n \n for i in range (0, len(column_names),1):\n for j in biological_rep:\n if j in get_rep_name(column_names[i]):\n mean_replicates['mean_replicate_'+j][k]+= df.loc[k,column_names[i]]\n mean_replicates['nb_itteration_'+j][k]+=1\n\n\ndico2 = dict() #store tuples sum and iteration on each line\ndico3 = dict() #store the mean calculation \n\nfor i in range (0,len(biological_rep),1):\n dico3['mean_replicate_'+biological_rep[i]] = [0]*len(df)\n\n#get list of mean replicates\nlist_mean_replicates =[]\nfor i in range (0,len(biological_rep),1):\n list_mean_replicates.append('mean_replicate_'+biological_rep[i])\n#dico to store as a tuple the sum and iteration for each mean rep\nfor key in list_mean_replicates:\n for key2 in mean_replicates:\n if key != key2 and get_rep_name(key) == get_rep_name(key2):\n print( key,key2)\n \n dico2[key]= list(zip((mean_replicates[key]),mean_replicates[key2]))\n#dico to calculate the average per gene per mean replicate \nfor key in dico2:\n for i in range(0,len(df),1): \n cal = round(dico2[key][i][0]/ dico2[key][i][1])\n dico3[key][i]= cal\n#store results in new df in new columns\nfinal_df = df.copy()\nfor mean in list_mean_replicates:\n final_df[mean] = 0\n \nfor i in range(0,len(final_df),1):\n for key in list_mean_replicates:\n final_df.loc[i,key] = dico3[key][i]\n#export as excel the df \nfinal_df.to_excel ('/home/kaouther/Documents/Internship/pre_process/output_files/brain_matrix.xlsx', index = False, header=True)\n#final_df.to_csv('/home/kaouther/Documents/Internship/pre_process/output_files/'+'tissues_mean.csv', index = False, header=True)\n#final_df.to_excel('/home/kaouther/Documents/Internship/pre_process/output_files/'+'tissues_matrix.xlsx', index = False, header=True)\n#file_name= input('file name')\n#final_df.to_excel(file_name+'.xlsx', index = False, header=True)\n\nduplicateRowsDF = final_df[final_df.iloc[:,0].duplicated()]\n",
"step-ids": [
0,
1,
2,
3,
5
]
}
|
[
0,
1,
2,
3,
5
] |
#this apps is open
#Let's start with introduction
print "Hi, I am x0x. Could we introduce ourselves? (yes/no)"
answer = raw_input()
if answer.lower() == 'yes':
print "Okay, what is your name?"
name = raw_input()
print "Hi", name
print "Nice to meet you."
print "What are you going to do?"
print '1. Say "good bye"'
print '2. Say "Thank you"'
answer = raw_input()
if answer == '1':
print 'Well, good bye', name
elif answer == '2':
print 'Sakalangkong', name
else:
print 'You choose wrong answer, I am terminated.'
print 'bye'
elif answer.lower() == 'no':
print "thank you"
else:
print "your answer is wrong"
print "Please come back later. Thank you!"
print "yoyoi oke"
|
normal
|
{
"blob_id": "a28c62a18d793fb285353902d01801c720bcb454",
"index": 1653,
"step-1": "#this apps is open\n\n#Let's start with introduction\n\nprint \"Hi, I am x0x. Could we introduce ourselves? (yes/no)\"\nanswer = raw_input()\nif answer.lower() == 'yes':\n print \"Okay, what is your name?\"\n name = raw_input()\n print \"Hi\", name\n print \"Nice to meet you.\"\n print \"What are you going to do?\"\n print '1. Say \"good bye\"'\n print '2. Say \"Thank you\"'\n answer = raw_input()\n if answer == '1':\n print 'Well, good bye', name\n elif answer == '2':\n print 'Sakalangkong', name\n else:\n print 'You choose wrong answer, I am terminated.'\n print 'bye'\nelif answer.lower() == 'no':\n print \"thank you\"\nelse:\n print \"your answer is wrong\"\n print \"Please come back later. Thank you!\"\n print \"yoyoi oke\"\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import models
# from applications.models import ApplicationReview
# from profiles.models import Restaurant, Program, Courier
# Enum for Admin
BASIC_ADMIN = 'ADMIN'
SUPER_ADMIN = 'SUPER'
MANAGER = 'MNGR'
DEVELOPER = 'DEV'
STAFF = 'STAFF'
ADMIN_ROLE_OPTIONS = [
(BASIC_ADMIN, 'basic admin'),
(SUPER_ADMIN, 'super admin'),
(MANAGER, 'manager'),
(DEVELOPER, 'developer'),
(STAFF, 'stuff'),
]
PROGRAM = "PR"
RESTAURANT = "RE"
USER_TYPE_OPTIONS = [
(PROGRAM, 'Program'),
(RESTAURANT, 'Restaurant'),
]
PHONE = "PH"
EMAIL = "EM"
PREFERRED_CONTACT = [
(PHONE, 'Phone'),
(EMAIL, 'Email'),
]
ADMIN = "ADM"
BASIC_USER = "BSC"
USER_TYPES = [
(ADMIN, 'Admin'),
(BASIC_USER, 'Basic User'),
]
class UserClassManager(BaseUserManager):
"""Manager for User class"""
# method for creatig admins, but not super admins
def create_staffuser(self, last_name, first_name, email, password, role, phone_number=''):
new_account = self.create_user(phone_number=phone_number, last_name=last_name, first_name=first_name,
email=email, password=password)
new_account.staff = True
admin_object = AdminUser.objects.create(role=role)
new_account.admin_object = admin_object
new_account.user_type = ADMIN
admin_object.save(using=self._db)
new_account.save(using=self._db)
return new_account
def create_basic_user(self, type, last_name, first_name, email, password, phone_number=''):
new_account = self.create_user(phone_number=phone_number, last_name=last_name, first_name=first_name,
email=email, password=password)
user_object = BasicUser.objects.create(type=type)
new_account.user_object = user_object
new_account.user_type = BASIC_USER
user_object.save(using=self._db)
new_account.save(using=self._db)
return new_account
# method for creating restaurants, schools, etc.
def create_user(self, last_name, first_name, email, password, phone_number=''):
new_account = self.model(email=self.normalize_email(email),)
new_account.set_password(password)
new_account.last_name = last_name
new_account.first_name = first_name
new_account.phone_number = phone_number
new_account.save(using=self._db)
return new_account
# method for creating superadmins
def create_superuser(self, last_name, first_name, email, password, phone_number=''):
new_account = self.create_user(phone_number=phone_number, last_name=last_name, first_name=first_name,
email=email, password=password)
new_account.staff = True
new_account.admin = True
admin_object = AdminUser.objects.create(role=SUPER_ADMIN)
new_account.admin_object = admin_object
new_account.user_type = ADMIN
admin_object.save(using=self._db)
new_account.save(using=self._db)
return new_account
# add any required fields here other than email and password
REQUIRED_FIELDS = []
USERNAME_FIELD = 'email'
class UserClass(AbstractBaseUser):
"""Class for general user - can be basic user or admin"""
phone_number = models.CharField(verbose_name='phone number', max_length=255, unique=False, default='')
active = models.BooleanField(default=True)
is_active = models.BooleanField(default=True)
email = models.EmailField(verbose_name='email', max_length=255, unique=True, )
last_name = models.CharField(verbose_name='last name', max_length=255, unique=False, )
first_name = models.CharField(verbose_name='first name', max_length=255, unique=False, )
objects = UserClassManager()
staff = models.BooleanField(default=False)
admin = models.BooleanField(default=False)
image = models.CharField(verbose_name='user image', max_length=255, unique=False, default='defaultIcon.png')
USERNAME_FIELD = "email"
REQUIRED_FIELDS = ['first_name', 'last_name']
user_type = models.CharField(
max_length=20,
choices=USER_TYPES,
default=BASIC_USER,
)
user_object = models.ForeignKey('profiles.BasicUser', on_delete=models.DO_NOTHING, null=True, related_name='basic_user_parent')
admin_object = models.ForeignKey('profiles.AdminUser', on_delete=models.DO_NOTHING, null=True, related_name='admin_user_parent')
def has_module_perms(self, app_label):
return True
@property
def is_admin(self):
return self.admin
def get_full_name(self):
return self.first_name + ' ' + self.last_name
def get_short_name(self):
return self.first_name
@property
def is_staff(self):
return self.staff
def __str__(self):
return self.email
class AdminUser(models.Model):
"""Model for admin user data"""
role = models.CharField(
max_length=20,
choices=ADMIN_ROLE_OPTIONS,
default=STAFF,
)
class BasicUser(models.Model):
"""Model for basic user data"""
type = models.CharField(
max_length=20,
choices=USER_TYPE_OPTIONS,
default=RESTAURANT,
)
preferred_contact = models.CharField(
max_length=20,
choices=PREFERRED_CONTACT,
default=EMAIL,
)
position = models.CharField(verbose_name='position/title', max_length=255, unique=False, null=True)
restaurant = models.ForeignKey('profiles.Restaurant', on_delete=models.CASCADE, null=True)
program = models.ForeignKey('profiles.Program', on_delete=models.CASCADE, null=True)
courier = models.ForeignKey('profiles.Courier', on_delete=models.CASCADE, null=True)
class Schedule(models.Model):
monday_start = models.TimeField(auto_now=False, null=True, blank=True)
monday_end = models.TimeField(auto_now=False, null=True, blank=True)
tuesday_start = models.TimeField(auto_now=False, null=True, blank=True)
tuesday_end = models.TimeField(auto_now=False, null=True, blank=True)
wednesday_start = models.TimeField(auto_now=False, null=True, blank=True)
wednesday_end = models.TimeField(auto_now=False, null=True, blank=True)
thursday_start = models.TimeField(auto_now=False, null=True, blank=True)
thursday_end = models.TimeField(auto_now=False, null=True, blank=True)
friday_start = models.TimeField(auto_now=False, null=True, blank=True)
friday_end = models.TimeField(auto_now=False, null=True, blank=True)
saturday_start = models.TimeField(auto_now=False, null=True, blank=True)
saturday_end = models.TimeField(auto_now=False, null=True, blank=True)
sunday_start = models.TimeField(auto_now=False, null=True, blank=True)
sunday_end = models.TimeField(auto_now=False, null=True, blank=True)
def getSchedule(self):
schedule = {}
if self.monday_start:
schedule['monday_start'] = self.monday_start.strftime("%-I:%M %p")
else:
schedule['monday_start'] = ''
if self.monday_end:
schedule['monday_end'] = self.monday_end.strftime("%-I:%M %p")
else:
schedule['monday_end'] = ''
if self.tuesday_start:
schedule['tuesday_start'] = self.tuesday_start.strftime("%-I:%M %p")
else:
schedule['tuesday_start'] = ''
if self.tuesday_end:
schedule['tuesday_end'] = self.tuesday_end.strftime("%-I:%M %p")
else:
schedule['tuesday_end'] = ''
if self.wednesday_start:
schedule['wednesday_start'] = self.wednesday_start.strftime("%-I:%M %p")
else:
schedule['wednesday_start'] = ''
if self.wednesday_end:
schedule['wednesday_end'] = self.wednesday_end.strftime("%-I:%M %p")
else:
schedule['wednesday_end'] = ''
if self.thursday_start:
schedule['thursday_start'] = self.thursday_start.strftime("%-I:%M %p")
else:
schedule['thursday_start'] = ''
if self.thursday_end:
schedule['thursday_end'] = self.thursday_end.strftime("%-I:%M %p")
else:
schedule['thursday_end'] = ''
if self.friday_start:
schedule['friday_start'] = self.friday_start.strftime("%-I:%M %p")
else:
schedule['friday_start'] = ''
if self.friday_end:
schedule['friday_end'] = self.friday_end.strftime("%-I:%M %p")
else:
schedule['friday_end'] = ''
if self.saturday_start:
schedule['saturday_start'] = self.saturday_start.strftime("%-I:%M %p")
else:
schedule['saturday_start'] = ''
if self.saturday_end:
schedule['saturday_end'] = self.saturday_end.strftime("%-I:%M %p")
else:
schedule['saturday_end'] = ''
if self.sunday_start:
schedule['sunday_start'] = self.sunday_start.strftime("%-I:%M %p")
else:
schedule['sunday_start'] = ''
if self.sunday_end:
schedule['sunday_end'] = self.sunday_end.strftime("%-I:%M %p")
else:
schedule['sunday_end'] = ''
return schedule
class Restaurant(models.Model):
created_at = models.DateTimeField(auto_now=True)
company_name = models.CharField(verbose_name='company name', max_length=255, unique=False, )
main_contact = models.ForeignKey('profiles.UserClass', on_delete=models.DO_NOTHING, related_name="restaurant_object", null=True)
phone_number = models.CharField(verbose_name='phone number', max_length=255, unique=False, )
schedule = models.ForeignKey('profiles.Schedule', on_delete=models.DO_NOTHING, null=True)
meals = models.IntegerField()
uber_eats = models.BooleanField(default=False)
delivery_capacity = models.BooleanField(default=False)
packaging = models.BooleanField(default=False)
health_certificate = models.CharField(verbose_name='health certificate', max_length=255, unique=False, )
address = models.CharField(verbose_name='address', max_length=255, unique=False, )
coordinates = models.CharField(verbose_name='coordinates', max_length=255, unique=False, null=True)
latitude = models.CharField(verbose_name='latitude', max_length=255, unique=False, null=True)
longitude = models.CharField(verbose_name='longitude', max_length=255, unique=False, null=True)
review = models.ForeignKey('applications.ApplicationReview', related_name='restaurants',
on_delete=models.DO_NOTHING, null=True)
class Program(models.Model):
created_at = models.DateTimeField(auto_now=True)
program_name = models.CharField(verbose_name='program name', max_length=255, unique=False, )
main_contact = models.ForeignKey('profiles.UserClass', on_delete=models.DO_NOTHING, related_name="program_object", null=True)
phone_number = models.CharField(verbose_name='phone number', max_length=255, unique=False, )
schedule = models.ForeignKey('profiles.Schedule', on_delete=models.DO_NOTHING, null=True)
meals = models.IntegerField(default=0, null=True)
address = models.CharField(verbose_name='address', max_length=255, unique=False, )
coordinates = models.CharField(verbose_name='address', max_length=255, unique=False, null=True)
latitude = models.CharField(verbose_name='latitude', max_length=255, unique=False, null=True)
longitude = models.CharField(verbose_name='longitude', max_length=255, unique=False, null=True)
review = models.ForeignKey('applications.ApplicationReview', related_name="programs",
on_delete=models.DO_NOTHING, null=True)
class Courier(models.Model):
created_at = models.DateTimeField(auto_now=True)
class Profile(models.Model):
user = models.OneToOneField(BasicUser, on_delete=models.CASCADE)
avatar = models.ImageField(upload_to='avatars', blank=True)
def __str__(self):
return self.user.username
|
normal
|
{
"blob_id": "8a1f024be00200218782c919b21161bf48fc817e",
"index": 7805,
"step-1": "<mask token>\n\n\nclass UserClass(AbstractBaseUser):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_full_name(self):\n return self.first_name + ' ' + self.last_name\n <mask token>\n <mask token>\n <mask token>\n\n\nclass AdminUser(models.Model):\n \"\"\"Model for admin user data\"\"\"\n role = models.CharField(max_length=20, choices=ADMIN_ROLE_OPTIONS,\n default=STAFF)\n\n\nclass BasicUser(models.Model):\n \"\"\"Model for basic user data\"\"\"\n type = models.CharField(max_length=20, choices=USER_TYPE_OPTIONS,\n default=RESTAURANT)\n preferred_contact = models.CharField(max_length=20, choices=\n PREFERRED_CONTACT, default=EMAIL)\n position = models.CharField(verbose_name='position/title', max_length=\n 255, unique=False, null=True)\n restaurant = models.ForeignKey('profiles.Restaurant', on_delete=models.\n CASCADE, null=True)\n program = models.ForeignKey('profiles.Program', on_delete=models.\n CASCADE, null=True)\n courier = models.ForeignKey('profiles.Courier', on_delete=models.\n CASCADE, null=True)\n\n\nclass Schedule(models.Model):\n monday_start = models.TimeField(auto_now=False, null=True, blank=True)\n monday_end = models.TimeField(auto_now=False, null=True, blank=True)\n tuesday_start = models.TimeField(auto_now=False, null=True, blank=True)\n tuesday_end = models.TimeField(auto_now=False, null=True, blank=True)\n wednesday_start = models.TimeField(auto_now=False, null=True, blank=True)\n wednesday_end = models.TimeField(auto_now=False, null=True, blank=True)\n thursday_start = models.TimeField(auto_now=False, null=True, blank=True)\n thursday_end = models.TimeField(auto_now=False, null=True, blank=True)\n friday_start = models.TimeField(auto_now=False, null=True, blank=True)\n friday_end = models.TimeField(auto_now=False, null=True, blank=True)\n saturday_start = models.TimeField(auto_now=False, null=True, blank=True)\n saturday_end = models.TimeField(auto_now=False, null=True, blank=True)\n sunday_start = models.TimeField(auto_now=False, null=True, blank=True)\n sunday_end = models.TimeField(auto_now=False, null=True, blank=True)\n\n def getSchedule(self):\n schedule = {}\n if self.monday_start:\n schedule['monday_start'] = self.monday_start.strftime('%-I:%M %p')\n else:\n schedule['monday_start'] = ''\n if self.monday_end:\n schedule['monday_end'] = self.monday_end.strftime('%-I:%M %p')\n else:\n schedule['monday_end'] = ''\n if self.tuesday_start:\n schedule['tuesday_start'] = self.tuesday_start.strftime('%-I:%M %p'\n )\n else:\n schedule['tuesday_start'] = ''\n if self.tuesday_end:\n schedule['tuesday_end'] = self.tuesday_end.strftime('%-I:%M %p')\n else:\n schedule['tuesday_end'] = ''\n if self.wednesday_start:\n schedule['wednesday_start'] = self.wednesday_start.strftime(\n '%-I:%M %p')\n else:\n schedule['wednesday_start'] = ''\n if self.wednesday_end:\n schedule['wednesday_end'] = self.wednesday_end.strftime('%-I:%M %p'\n )\n else:\n schedule['wednesday_end'] = ''\n if self.thursday_start:\n schedule['thursday_start'] = self.thursday_start.strftime(\n '%-I:%M %p')\n else:\n schedule['thursday_start'] = ''\n if self.thursday_end:\n schedule['thursday_end'] = self.thursday_end.strftime('%-I:%M %p')\n else:\n schedule['thursday_end'] = ''\n if self.friday_start:\n schedule['friday_start'] = self.friday_start.strftime('%-I:%M %p')\n else:\n schedule['friday_start'] = ''\n if self.friday_end:\n schedule['friday_end'] = self.friday_end.strftime('%-I:%M %p')\n else:\n schedule['friday_end'] = ''\n if self.saturday_start:\n schedule['saturday_start'] = self.saturday_start.strftime(\n '%-I:%M %p')\n else:\n schedule['saturday_start'] = ''\n if self.saturday_end:\n schedule['saturday_end'] = self.saturday_end.strftime('%-I:%M %p')\n else:\n schedule['saturday_end'] = ''\n if self.sunday_start:\n schedule['sunday_start'] = self.sunday_start.strftime('%-I:%M %p')\n else:\n schedule['sunday_start'] = ''\n if self.sunday_end:\n schedule['sunday_end'] = self.sunday_end.strftime('%-I:%M %p')\n else:\n schedule['sunday_end'] = ''\n return schedule\n\n\nclass Restaurant(models.Model):\n created_at = models.DateTimeField(auto_now=True)\n company_name = models.CharField(verbose_name='company name', max_length\n =255, unique=False)\n main_contact = models.ForeignKey('profiles.UserClass', on_delete=models\n .DO_NOTHING, related_name='restaurant_object', null=True)\n phone_number = models.CharField(verbose_name='phone number', max_length\n =255, unique=False)\n schedule = models.ForeignKey('profiles.Schedule', on_delete=models.\n DO_NOTHING, null=True)\n meals = models.IntegerField()\n uber_eats = models.BooleanField(default=False)\n delivery_capacity = models.BooleanField(default=False)\n packaging = models.BooleanField(default=False)\n health_certificate = models.CharField(verbose_name='health certificate',\n max_length=255, unique=False)\n address = models.CharField(verbose_name='address', max_length=255,\n unique=False)\n coordinates = models.CharField(verbose_name='coordinates', max_length=\n 255, unique=False, null=True)\n latitude = models.CharField(verbose_name='latitude', max_length=255,\n unique=False, null=True)\n longitude = models.CharField(verbose_name='longitude', max_length=255,\n unique=False, null=True)\n review = models.ForeignKey('applications.ApplicationReview',\n related_name='restaurants', on_delete=models.DO_NOTHING, null=True)\n\n\nclass Program(models.Model):\n created_at = models.DateTimeField(auto_now=True)\n program_name = models.CharField(verbose_name='program name', max_length\n =255, unique=False)\n main_contact = models.ForeignKey('profiles.UserClass', on_delete=models\n .DO_NOTHING, related_name='program_object', null=True)\n phone_number = models.CharField(verbose_name='phone number', max_length\n =255, unique=False)\n schedule = models.ForeignKey('profiles.Schedule', on_delete=models.\n DO_NOTHING, null=True)\n meals = models.IntegerField(default=0, null=True)\n address = models.CharField(verbose_name='address', max_length=255,\n unique=False)\n coordinates = models.CharField(verbose_name='address', max_length=255,\n unique=False, null=True)\n latitude = models.CharField(verbose_name='latitude', max_length=255,\n unique=False, null=True)\n longitude = models.CharField(verbose_name='longitude', max_length=255,\n unique=False, null=True)\n review = models.ForeignKey('applications.ApplicationReview',\n related_name='programs', on_delete=models.DO_NOTHING, null=True)\n\n\nclass Courier(models.Model):\n created_at = models.DateTimeField(auto_now=True)\n\n\nclass Profile(models.Model):\n user = models.OneToOneField(BasicUser, on_delete=models.CASCADE)\n avatar = models.ImageField(upload_to='avatars', blank=True)\n\n def __str__(self):\n return self.user.username\n",
"step-2": "<mask token>\n\n\nclass UserClassManager(BaseUserManager):\n <mask token>\n <mask token>\n\n def create_basic_user(self, type, last_name, first_name, email,\n password, phone_number=''):\n new_account = self.create_user(phone_number=phone_number, last_name\n =last_name, first_name=first_name, email=email, password=password)\n user_object = BasicUser.objects.create(type=type)\n new_account.user_object = user_object\n new_account.user_type = BASIC_USER\n user_object.save(using=self._db)\n new_account.save(using=self._db)\n return new_account\n\n def create_user(self, last_name, first_name, email, password,\n phone_number=''):\n new_account = self.model(email=self.normalize_email(email))\n new_account.set_password(password)\n new_account.last_name = last_name\n new_account.first_name = first_name\n new_account.phone_number = phone_number\n new_account.save(using=self._db)\n return new_account\n\n def create_superuser(self, last_name, first_name, email, password,\n phone_number=''):\n new_account = self.create_user(phone_number=phone_number, last_name\n =last_name, first_name=first_name, email=email, password=password)\n new_account.staff = True\n new_account.admin = True\n admin_object = AdminUser.objects.create(role=SUPER_ADMIN)\n new_account.admin_object = admin_object\n new_account.user_type = ADMIN\n admin_object.save(using=self._db)\n new_account.save(using=self._db)\n return new_account\n <mask token>\n <mask token>\n\n\nclass UserClass(AbstractBaseUser):\n \"\"\"Class for general user - can be basic user or admin\"\"\"\n phone_number = models.CharField(verbose_name='phone number', max_length\n =255, unique=False, default='')\n active = models.BooleanField(default=True)\n is_active = models.BooleanField(default=True)\n email = models.EmailField(verbose_name='email', max_length=255, unique=True\n )\n last_name = models.CharField(verbose_name='last name', max_length=255,\n unique=False)\n first_name = models.CharField(verbose_name='first name', max_length=255,\n unique=False)\n objects = UserClassManager()\n staff = models.BooleanField(default=False)\n admin = models.BooleanField(default=False)\n image = models.CharField(verbose_name='user image', max_length=255,\n unique=False, default='defaultIcon.png')\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = ['first_name', 'last_name']\n user_type = models.CharField(max_length=20, choices=USER_TYPES, default\n =BASIC_USER)\n user_object = models.ForeignKey('profiles.BasicUser', on_delete=models.\n DO_NOTHING, null=True, related_name='basic_user_parent')\n admin_object = models.ForeignKey('profiles.AdminUser', on_delete=models\n .DO_NOTHING, null=True, related_name='admin_user_parent')\n\n def has_module_perms(self, app_label):\n return True\n\n @property\n def is_admin(self):\n return self.admin\n\n def get_full_name(self):\n return self.first_name + ' ' + self.last_name\n\n def get_short_name(self):\n return self.first_name\n\n @property\n def is_staff(self):\n return self.staff\n\n def __str__(self):\n return self.email\n\n\nclass AdminUser(models.Model):\n \"\"\"Model for admin user data\"\"\"\n role = models.CharField(max_length=20, choices=ADMIN_ROLE_OPTIONS,\n default=STAFF)\n\n\nclass BasicUser(models.Model):\n \"\"\"Model for basic user data\"\"\"\n type = models.CharField(max_length=20, choices=USER_TYPE_OPTIONS,\n default=RESTAURANT)\n preferred_contact = models.CharField(max_length=20, choices=\n PREFERRED_CONTACT, default=EMAIL)\n position = models.CharField(verbose_name='position/title', max_length=\n 255, unique=False, null=True)\n restaurant = models.ForeignKey('profiles.Restaurant', on_delete=models.\n CASCADE, null=True)\n program = models.ForeignKey('profiles.Program', on_delete=models.\n CASCADE, null=True)\n courier = models.ForeignKey('profiles.Courier', on_delete=models.\n CASCADE, null=True)\n\n\nclass Schedule(models.Model):\n monday_start = models.TimeField(auto_now=False, null=True, blank=True)\n monday_end = models.TimeField(auto_now=False, null=True, blank=True)\n tuesday_start = models.TimeField(auto_now=False, null=True, blank=True)\n tuesday_end = models.TimeField(auto_now=False, null=True, blank=True)\n wednesday_start = models.TimeField(auto_now=False, null=True, blank=True)\n wednesday_end = models.TimeField(auto_now=False, null=True, blank=True)\n thursday_start = models.TimeField(auto_now=False, null=True, blank=True)\n thursday_end = models.TimeField(auto_now=False, null=True, blank=True)\n friday_start = models.TimeField(auto_now=False, null=True, blank=True)\n friday_end = models.TimeField(auto_now=False, null=True, blank=True)\n saturday_start = models.TimeField(auto_now=False, null=True, blank=True)\n saturday_end = models.TimeField(auto_now=False, null=True, blank=True)\n sunday_start = models.TimeField(auto_now=False, null=True, blank=True)\n sunday_end = models.TimeField(auto_now=False, null=True, blank=True)\n\n def getSchedule(self):\n schedule = {}\n if self.monday_start:\n schedule['monday_start'] = self.monday_start.strftime('%-I:%M %p')\n else:\n schedule['monday_start'] = ''\n if self.monday_end:\n schedule['monday_end'] = self.monday_end.strftime('%-I:%M %p')\n else:\n schedule['monday_end'] = ''\n if self.tuesday_start:\n schedule['tuesday_start'] = self.tuesday_start.strftime('%-I:%M %p'\n )\n else:\n schedule['tuesday_start'] = ''\n if self.tuesday_end:\n schedule['tuesday_end'] = self.tuesday_end.strftime('%-I:%M %p')\n else:\n schedule['tuesday_end'] = ''\n if self.wednesday_start:\n schedule['wednesday_start'] = self.wednesday_start.strftime(\n '%-I:%M %p')\n else:\n schedule['wednesday_start'] = ''\n if self.wednesday_end:\n schedule['wednesday_end'] = self.wednesday_end.strftime('%-I:%M %p'\n )\n else:\n schedule['wednesday_end'] = ''\n if self.thursday_start:\n schedule['thursday_start'] = self.thursday_start.strftime(\n '%-I:%M %p')\n else:\n schedule['thursday_start'] = ''\n if self.thursday_end:\n schedule['thursday_end'] = self.thursday_end.strftime('%-I:%M %p')\n else:\n schedule['thursday_end'] = ''\n if self.friday_start:\n schedule['friday_start'] = self.friday_start.strftime('%-I:%M %p')\n else:\n schedule['friday_start'] = ''\n if self.friday_end:\n schedule['friday_end'] = self.friday_end.strftime('%-I:%M %p')\n else:\n schedule['friday_end'] = ''\n if self.saturday_start:\n schedule['saturday_start'] = self.saturday_start.strftime(\n '%-I:%M %p')\n else:\n schedule['saturday_start'] = ''\n if self.saturday_end:\n schedule['saturday_end'] = self.saturday_end.strftime('%-I:%M %p')\n else:\n schedule['saturday_end'] = ''\n if self.sunday_start:\n schedule['sunday_start'] = self.sunday_start.strftime('%-I:%M %p')\n else:\n schedule['sunday_start'] = ''\n if self.sunday_end:\n schedule['sunday_end'] = self.sunday_end.strftime('%-I:%M %p')\n else:\n schedule['sunday_end'] = ''\n return schedule\n\n\nclass Restaurant(models.Model):\n created_at = models.DateTimeField(auto_now=True)\n company_name = models.CharField(verbose_name='company name', max_length\n =255, unique=False)\n main_contact = models.ForeignKey('profiles.UserClass', on_delete=models\n .DO_NOTHING, related_name='restaurant_object', null=True)\n phone_number = models.CharField(verbose_name='phone number', max_length\n =255, unique=False)\n schedule = models.ForeignKey('profiles.Schedule', on_delete=models.\n DO_NOTHING, null=True)\n meals = models.IntegerField()\n uber_eats = models.BooleanField(default=False)\n delivery_capacity = models.BooleanField(default=False)\n packaging = models.BooleanField(default=False)\n health_certificate = models.CharField(verbose_name='health certificate',\n max_length=255, unique=False)\n address = models.CharField(verbose_name='address', max_length=255,\n unique=False)\n coordinates = models.CharField(verbose_name='coordinates', max_length=\n 255, unique=False, null=True)\n latitude = models.CharField(verbose_name='latitude', max_length=255,\n unique=False, null=True)\n longitude = models.CharField(verbose_name='longitude', max_length=255,\n unique=False, null=True)\n review = models.ForeignKey('applications.ApplicationReview',\n related_name='restaurants', on_delete=models.DO_NOTHING, null=True)\n\n\nclass Program(models.Model):\n created_at = models.DateTimeField(auto_now=True)\n program_name = models.CharField(verbose_name='program name', max_length\n =255, unique=False)\n main_contact = models.ForeignKey('profiles.UserClass', on_delete=models\n .DO_NOTHING, related_name='program_object', null=True)\n phone_number = models.CharField(verbose_name='phone number', max_length\n =255, unique=False)\n schedule = models.ForeignKey('profiles.Schedule', on_delete=models.\n DO_NOTHING, null=True)\n meals = models.IntegerField(default=0, null=True)\n address = models.CharField(verbose_name='address', max_length=255,\n unique=False)\n coordinates = models.CharField(verbose_name='address', max_length=255,\n unique=False, null=True)\n latitude = models.CharField(verbose_name='latitude', max_length=255,\n unique=False, null=True)\n longitude = models.CharField(verbose_name='longitude', max_length=255,\n unique=False, null=True)\n review = models.ForeignKey('applications.ApplicationReview',\n related_name='programs', on_delete=models.DO_NOTHING, null=True)\n\n\nclass Courier(models.Model):\n created_at = models.DateTimeField(auto_now=True)\n\n\nclass Profile(models.Model):\n user = models.OneToOneField(BasicUser, on_delete=models.CASCADE)\n avatar = models.ImageField(upload_to='avatars', blank=True)\n\n def __str__(self):\n return self.user.username\n",
"step-3": "<mask token>\n\n\nclass UserClassManager(BaseUserManager):\n <mask token>\n\n def create_staffuser(self, last_name, first_name, email, password, role,\n phone_number=''):\n new_account = self.create_user(phone_number=phone_number, last_name\n =last_name, first_name=first_name, email=email, password=password)\n new_account.staff = True\n admin_object = AdminUser.objects.create(role=role)\n new_account.admin_object = admin_object\n new_account.user_type = ADMIN\n admin_object.save(using=self._db)\n new_account.save(using=self._db)\n return new_account\n\n def create_basic_user(self, type, last_name, first_name, email,\n password, phone_number=''):\n new_account = self.create_user(phone_number=phone_number, last_name\n =last_name, first_name=first_name, email=email, password=password)\n user_object = BasicUser.objects.create(type=type)\n new_account.user_object = user_object\n new_account.user_type = BASIC_USER\n user_object.save(using=self._db)\n new_account.save(using=self._db)\n return new_account\n\n def create_user(self, last_name, first_name, email, password,\n phone_number=''):\n new_account = self.model(email=self.normalize_email(email))\n new_account.set_password(password)\n new_account.last_name = last_name\n new_account.first_name = first_name\n new_account.phone_number = phone_number\n new_account.save(using=self._db)\n return new_account\n\n def create_superuser(self, last_name, first_name, email, password,\n phone_number=''):\n new_account = self.create_user(phone_number=phone_number, last_name\n =last_name, first_name=first_name, email=email, password=password)\n new_account.staff = True\n new_account.admin = True\n admin_object = AdminUser.objects.create(role=SUPER_ADMIN)\n new_account.admin_object = admin_object\n new_account.user_type = ADMIN\n admin_object.save(using=self._db)\n new_account.save(using=self._db)\n return new_account\n <mask token>\n <mask token>\n\n\nclass UserClass(AbstractBaseUser):\n \"\"\"Class for general user - can be basic user or admin\"\"\"\n phone_number = models.CharField(verbose_name='phone number', max_length\n =255, unique=False, default='')\n active = models.BooleanField(default=True)\n is_active = models.BooleanField(default=True)\n email = models.EmailField(verbose_name='email', max_length=255, unique=True\n )\n last_name = models.CharField(verbose_name='last name', max_length=255,\n unique=False)\n first_name = models.CharField(verbose_name='first name', max_length=255,\n unique=False)\n objects = UserClassManager()\n staff = models.BooleanField(default=False)\n admin = models.BooleanField(default=False)\n image = models.CharField(verbose_name='user image', max_length=255,\n unique=False, default='defaultIcon.png')\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = ['first_name', 'last_name']\n user_type = models.CharField(max_length=20, choices=USER_TYPES, default\n =BASIC_USER)\n user_object = models.ForeignKey('profiles.BasicUser', on_delete=models.\n DO_NOTHING, null=True, related_name='basic_user_parent')\n admin_object = models.ForeignKey('profiles.AdminUser', on_delete=models\n .DO_NOTHING, null=True, related_name='admin_user_parent')\n\n def has_module_perms(self, app_label):\n return True\n\n @property\n def is_admin(self):\n return self.admin\n\n def get_full_name(self):\n return self.first_name + ' ' + self.last_name\n\n def get_short_name(self):\n return self.first_name\n\n @property\n def is_staff(self):\n return self.staff\n\n def __str__(self):\n return self.email\n\n\nclass AdminUser(models.Model):\n \"\"\"Model for admin user data\"\"\"\n role = models.CharField(max_length=20, choices=ADMIN_ROLE_OPTIONS,\n default=STAFF)\n\n\nclass BasicUser(models.Model):\n \"\"\"Model for basic user data\"\"\"\n type = models.CharField(max_length=20, choices=USER_TYPE_OPTIONS,\n default=RESTAURANT)\n preferred_contact = models.CharField(max_length=20, choices=\n PREFERRED_CONTACT, default=EMAIL)\n position = models.CharField(verbose_name='position/title', max_length=\n 255, unique=False, null=True)\n restaurant = models.ForeignKey('profiles.Restaurant', on_delete=models.\n CASCADE, null=True)\n program = models.ForeignKey('profiles.Program', on_delete=models.\n CASCADE, null=True)\n courier = models.ForeignKey('profiles.Courier', on_delete=models.\n CASCADE, null=True)\n\n\nclass Schedule(models.Model):\n monday_start = models.TimeField(auto_now=False, null=True, blank=True)\n monday_end = models.TimeField(auto_now=False, null=True, blank=True)\n tuesday_start = models.TimeField(auto_now=False, null=True, blank=True)\n tuesday_end = models.TimeField(auto_now=False, null=True, blank=True)\n wednesday_start = models.TimeField(auto_now=False, null=True, blank=True)\n wednesday_end = models.TimeField(auto_now=False, null=True, blank=True)\n thursday_start = models.TimeField(auto_now=False, null=True, blank=True)\n thursday_end = models.TimeField(auto_now=False, null=True, blank=True)\n friday_start = models.TimeField(auto_now=False, null=True, blank=True)\n friday_end = models.TimeField(auto_now=False, null=True, blank=True)\n saturday_start = models.TimeField(auto_now=False, null=True, blank=True)\n saturday_end = models.TimeField(auto_now=False, null=True, blank=True)\n sunday_start = models.TimeField(auto_now=False, null=True, blank=True)\n sunday_end = models.TimeField(auto_now=False, null=True, blank=True)\n\n def getSchedule(self):\n schedule = {}\n if self.monday_start:\n schedule['monday_start'] = self.monday_start.strftime('%-I:%M %p')\n else:\n schedule['monday_start'] = ''\n if self.monday_end:\n schedule['monday_end'] = self.monday_end.strftime('%-I:%M %p')\n else:\n schedule['monday_end'] = ''\n if self.tuesday_start:\n schedule['tuesday_start'] = self.tuesday_start.strftime('%-I:%M %p'\n )\n else:\n schedule['tuesday_start'] = ''\n if self.tuesday_end:\n schedule['tuesday_end'] = self.tuesday_end.strftime('%-I:%M %p')\n else:\n schedule['tuesday_end'] = ''\n if self.wednesday_start:\n schedule['wednesday_start'] = self.wednesday_start.strftime(\n '%-I:%M %p')\n else:\n schedule['wednesday_start'] = ''\n if self.wednesday_end:\n schedule['wednesday_end'] = self.wednesday_end.strftime('%-I:%M %p'\n )\n else:\n schedule['wednesday_end'] = ''\n if self.thursday_start:\n schedule['thursday_start'] = self.thursday_start.strftime(\n '%-I:%M %p')\n else:\n schedule['thursday_start'] = ''\n if self.thursday_end:\n schedule['thursday_end'] = self.thursday_end.strftime('%-I:%M %p')\n else:\n schedule['thursday_end'] = ''\n if self.friday_start:\n schedule['friday_start'] = self.friday_start.strftime('%-I:%M %p')\n else:\n schedule['friday_start'] = ''\n if self.friday_end:\n schedule['friday_end'] = self.friday_end.strftime('%-I:%M %p')\n else:\n schedule['friday_end'] = ''\n if self.saturday_start:\n schedule['saturday_start'] = self.saturday_start.strftime(\n '%-I:%M %p')\n else:\n schedule['saturday_start'] = ''\n if self.saturday_end:\n schedule['saturday_end'] = self.saturday_end.strftime('%-I:%M %p')\n else:\n schedule['saturday_end'] = ''\n if self.sunday_start:\n schedule['sunday_start'] = self.sunday_start.strftime('%-I:%M %p')\n else:\n schedule['sunday_start'] = ''\n if self.sunday_end:\n schedule['sunday_end'] = self.sunday_end.strftime('%-I:%M %p')\n else:\n schedule['sunday_end'] = ''\n return schedule\n\n\nclass Restaurant(models.Model):\n created_at = models.DateTimeField(auto_now=True)\n company_name = models.CharField(verbose_name='company name', max_length\n =255, unique=False)\n main_contact = models.ForeignKey('profiles.UserClass', on_delete=models\n .DO_NOTHING, related_name='restaurant_object', null=True)\n phone_number = models.CharField(verbose_name='phone number', max_length\n =255, unique=False)\n schedule = models.ForeignKey('profiles.Schedule', on_delete=models.\n DO_NOTHING, null=True)\n meals = models.IntegerField()\n uber_eats = models.BooleanField(default=False)\n delivery_capacity = models.BooleanField(default=False)\n packaging = models.BooleanField(default=False)\n health_certificate = models.CharField(verbose_name='health certificate',\n max_length=255, unique=False)\n address = models.CharField(verbose_name='address', max_length=255,\n unique=False)\n coordinates = models.CharField(verbose_name='coordinates', max_length=\n 255, unique=False, null=True)\n latitude = models.CharField(verbose_name='latitude', max_length=255,\n unique=False, null=True)\n longitude = models.CharField(verbose_name='longitude', max_length=255,\n unique=False, null=True)\n review = models.ForeignKey('applications.ApplicationReview',\n related_name='restaurants', on_delete=models.DO_NOTHING, null=True)\n\n\nclass Program(models.Model):\n created_at = models.DateTimeField(auto_now=True)\n program_name = models.CharField(verbose_name='program name', max_length\n =255, unique=False)\n main_contact = models.ForeignKey('profiles.UserClass', on_delete=models\n .DO_NOTHING, related_name='program_object', null=True)\n phone_number = models.CharField(verbose_name='phone number', max_length\n =255, unique=False)\n schedule = models.ForeignKey('profiles.Schedule', on_delete=models.\n DO_NOTHING, null=True)\n meals = models.IntegerField(default=0, null=True)\n address = models.CharField(verbose_name='address', max_length=255,\n unique=False)\n coordinates = models.CharField(verbose_name='address', max_length=255,\n unique=False, null=True)\n latitude = models.CharField(verbose_name='latitude', max_length=255,\n unique=False, null=True)\n longitude = models.CharField(verbose_name='longitude', max_length=255,\n unique=False, null=True)\n review = models.ForeignKey('applications.ApplicationReview',\n related_name='programs', on_delete=models.DO_NOTHING, null=True)\n\n\nclass Courier(models.Model):\n created_at = models.DateTimeField(auto_now=True)\n\n\nclass Profile(models.Model):\n user = models.OneToOneField(BasicUser, on_delete=models.CASCADE)\n avatar = models.ImageField(upload_to='avatars', blank=True)\n\n def __str__(self):\n return self.user.username\n",
"step-4": "from django.db.models.signals import post_save\nfrom django.contrib.auth.models import AbstractBaseUser, BaseUserManager\nfrom django.db import models\nBASIC_ADMIN = 'ADMIN'\nSUPER_ADMIN = 'SUPER'\nMANAGER = 'MNGR'\nDEVELOPER = 'DEV'\nSTAFF = 'STAFF'\nADMIN_ROLE_OPTIONS = [(BASIC_ADMIN, 'basic admin'), (SUPER_ADMIN,\n 'super admin'), (MANAGER, 'manager'), (DEVELOPER, 'developer'), (STAFF,\n 'stuff')]\nPROGRAM = 'PR'\nRESTAURANT = 'RE'\nUSER_TYPE_OPTIONS = [(PROGRAM, 'Program'), (RESTAURANT, 'Restaurant')]\nPHONE = 'PH'\nEMAIL = 'EM'\nPREFERRED_CONTACT = [(PHONE, 'Phone'), (EMAIL, 'Email')]\nADMIN = 'ADM'\nBASIC_USER = 'BSC'\nUSER_TYPES = [(ADMIN, 'Admin'), (BASIC_USER, 'Basic User')]\n\n\nclass UserClassManager(BaseUserManager):\n \"\"\"Manager for User class\"\"\"\n\n def create_staffuser(self, last_name, first_name, email, password, role,\n phone_number=''):\n new_account = self.create_user(phone_number=phone_number, last_name\n =last_name, first_name=first_name, email=email, password=password)\n new_account.staff = True\n admin_object = AdminUser.objects.create(role=role)\n new_account.admin_object = admin_object\n new_account.user_type = ADMIN\n admin_object.save(using=self._db)\n new_account.save(using=self._db)\n return new_account\n\n def create_basic_user(self, type, last_name, first_name, email,\n password, phone_number=''):\n new_account = self.create_user(phone_number=phone_number, last_name\n =last_name, first_name=first_name, email=email, password=password)\n user_object = BasicUser.objects.create(type=type)\n new_account.user_object = user_object\n new_account.user_type = BASIC_USER\n user_object.save(using=self._db)\n new_account.save(using=self._db)\n return new_account\n\n def create_user(self, last_name, first_name, email, password,\n phone_number=''):\n new_account = self.model(email=self.normalize_email(email))\n new_account.set_password(password)\n new_account.last_name = last_name\n new_account.first_name = first_name\n new_account.phone_number = phone_number\n new_account.save(using=self._db)\n return new_account\n\n def create_superuser(self, last_name, first_name, email, password,\n phone_number=''):\n new_account = self.create_user(phone_number=phone_number, last_name\n =last_name, first_name=first_name, email=email, password=password)\n new_account.staff = True\n new_account.admin = True\n admin_object = AdminUser.objects.create(role=SUPER_ADMIN)\n new_account.admin_object = admin_object\n new_account.user_type = ADMIN\n admin_object.save(using=self._db)\n new_account.save(using=self._db)\n return new_account\n REQUIRED_FIELDS = []\n USERNAME_FIELD = 'email'\n\n\nclass UserClass(AbstractBaseUser):\n \"\"\"Class for general user - can be basic user or admin\"\"\"\n phone_number = models.CharField(verbose_name='phone number', max_length\n =255, unique=False, default='')\n active = models.BooleanField(default=True)\n is_active = models.BooleanField(default=True)\n email = models.EmailField(verbose_name='email', max_length=255, unique=True\n )\n last_name = models.CharField(verbose_name='last name', max_length=255,\n unique=False)\n first_name = models.CharField(verbose_name='first name', max_length=255,\n unique=False)\n objects = UserClassManager()\n staff = models.BooleanField(default=False)\n admin = models.BooleanField(default=False)\n image = models.CharField(verbose_name='user image', max_length=255,\n unique=False, default='defaultIcon.png')\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = ['first_name', 'last_name']\n user_type = models.CharField(max_length=20, choices=USER_TYPES, default\n =BASIC_USER)\n user_object = models.ForeignKey('profiles.BasicUser', on_delete=models.\n DO_NOTHING, null=True, related_name='basic_user_parent')\n admin_object = models.ForeignKey('profiles.AdminUser', on_delete=models\n .DO_NOTHING, null=True, related_name='admin_user_parent')\n\n def has_module_perms(self, app_label):\n return True\n\n @property\n def is_admin(self):\n return self.admin\n\n def get_full_name(self):\n return self.first_name + ' ' + self.last_name\n\n def get_short_name(self):\n return self.first_name\n\n @property\n def is_staff(self):\n return self.staff\n\n def __str__(self):\n return self.email\n\n\nclass AdminUser(models.Model):\n \"\"\"Model for admin user data\"\"\"\n role = models.CharField(max_length=20, choices=ADMIN_ROLE_OPTIONS,\n default=STAFF)\n\n\nclass BasicUser(models.Model):\n \"\"\"Model for basic user data\"\"\"\n type = models.CharField(max_length=20, choices=USER_TYPE_OPTIONS,\n default=RESTAURANT)\n preferred_contact = models.CharField(max_length=20, choices=\n PREFERRED_CONTACT, default=EMAIL)\n position = models.CharField(verbose_name='position/title', max_length=\n 255, unique=False, null=True)\n restaurant = models.ForeignKey('profiles.Restaurant', on_delete=models.\n CASCADE, null=True)\n program = models.ForeignKey('profiles.Program', on_delete=models.\n CASCADE, null=True)\n courier = models.ForeignKey('profiles.Courier', on_delete=models.\n CASCADE, null=True)\n\n\nclass Schedule(models.Model):\n monday_start = models.TimeField(auto_now=False, null=True, blank=True)\n monday_end = models.TimeField(auto_now=False, null=True, blank=True)\n tuesday_start = models.TimeField(auto_now=False, null=True, blank=True)\n tuesday_end = models.TimeField(auto_now=False, null=True, blank=True)\n wednesday_start = models.TimeField(auto_now=False, null=True, blank=True)\n wednesday_end = models.TimeField(auto_now=False, null=True, blank=True)\n thursday_start = models.TimeField(auto_now=False, null=True, blank=True)\n thursday_end = models.TimeField(auto_now=False, null=True, blank=True)\n friday_start = models.TimeField(auto_now=False, null=True, blank=True)\n friday_end = models.TimeField(auto_now=False, null=True, blank=True)\n saturday_start = models.TimeField(auto_now=False, null=True, blank=True)\n saturday_end = models.TimeField(auto_now=False, null=True, blank=True)\n sunday_start = models.TimeField(auto_now=False, null=True, blank=True)\n sunday_end = models.TimeField(auto_now=False, null=True, blank=True)\n\n def getSchedule(self):\n schedule = {}\n if self.monday_start:\n schedule['monday_start'] = self.monday_start.strftime('%-I:%M %p')\n else:\n schedule['monday_start'] = ''\n if self.monday_end:\n schedule['monday_end'] = self.monday_end.strftime('%-I:%M %p')\n else:\n schedule['monday_end'] = ''\n if self.tuesday_start:\n schedule['tuesday_start'] = self.tuesday_start.strftime('%-I:%M %p'\n )\n else:\n schedule['tuesday_start'] = ''\n if self.tuesday_end:\n schedule['tuesday_end'] = self.tuesday_end.strftime('%-I:%M %p')\n else:\n schedule['tuesday_end'] = ''\n if self.wednesday_start:\n schedule['wednesday_start'] = self.wednesday_start.strftime(\n '%-I:%M %p')\n else:\n schedule['wednesday_start'] = ''\n if self.wednesday_end:\n schedule['wednesday_end'] = self.wednesday_end.strftime('%-I:%M %p'\n )\n else:\n schedule['wednesday_end'] = ''\n if self.thursday_start:\n schedule['thursday_start'] = self.thursday_start.strftime(\n '%-I:%M %p')\n else:\n schedule['thursday_start'] = ''\n if self.thursday_end:\n schedule['thursday_end'] = self.thursday_end.strftime('%-I:%M %p')\n else:\n schedule['thursday_end'] = ''\n if self.friday_start:\n schedule['friday_start'] = self.friday_start.strftime('%-I:%M %p')\n else:\n schedule['friday_start'] = ''\n if self.friday_end:\n schedule['friday_end'] = self.friday_end.strftime('%-I:%M %p')\n else:\n schedule['friday_end'] = ''\n if self.saturday_start:\n schedule['saturday_start'] = self.saturday_start.strftime(\n '%-I:%M %p')\n else:\n schedule['saturday_start'] = ''\n if self.saturday_end:\n schedule['saturday_end'] = self.saturday_end.strftime('%-I:%M %p')\n else:\n schedule['saturday_end'] = ''\n if self.sunday_start:\n schedule['sunday_start'] = self.sunday_start.strftime('%-I:%M %p')\n else:\n schedule['sunday_start'] = ''\n if self.sunday_end:\n schedule['sunday_end'] = self.sunday_end.strftime('%-I:%M %p')\n else:\n schedule['sunday_end'] = ''\n return schedule\n\n\nclass Restaurant(models.Model):\n created_at = models.DateTimeField(auto_now=True)\n company_name = models.CharField(verbose_name='company name', max_length\n =255, unique=False)\n main_contact = models.ForeignKey('profiles.UserClass', on_delete=models\n .DO_NOTHING, related_name='restaurant_object', null=True)\n phone_number = models.CharField(verbose_name='phone number', max_length\n =255, unique=False)\n schedule = models.ForeignKey('profiles.Schedule', on_delete=models.\n DO_NOTHING, null=True)\n meals = models.IntegerField()\n uber_eats = models.BooleanField(default=False)\n delivery_capacity = models.BooleanField(default=False)\n packaging = models.BooleanField(default=False)\n health_certificate = models.CharField(verbose_name='health certificate',\n max_length=255, unique=False)\n address = models.CharField(verbose_name='address', max_length=255,\n unique=False)\n coordinates = models.CharField(verbose_name='coordinates', max_length=\n 255, unique=False, null=True)\n latitude = models.CharField(verbose_name='latitude', max_length=255,\n unique=False, null=True)\n longitude = models.CharField(verbose_name='longitude', max_length=255,\n unique=False, null=True)\n review = models.ForeignKey('applications.ApplicationReview',\n related_name='restaurants', on_delete=models.DO_NOTHING, null=True)\n\n\nclass Program(models.Model):\n created_at = models.DateTimeField(auto_now=True)\n program_name = models.CharField(verbose_name='program name', max_length\n =255, unique=False)\n main_contact = models.ForeignKey('profiles.UserClass', on_delete=models\n .DO_NOTHING, related_name='program_object', null=True)\n phone_number = models.CharField(verbose_name='phone number', max_length\n =255, unique=False)\n schedule = models.ForeignKey('profiles.Schedule', on_delete=models.\n DO_NOTHING, null=True)\n meals = models.IntegerField(default=0, null=True)\n address = models.CharField(verbose_name='address', max_length=255,\n unique=False)\n coordinates = models.CharField(verbose_name='address', max_length=255,\n unique=False, null=True)\n latitude = models.CharField(verbose_name='latitude', max_length=255,\n unique=False, null=True)\n longitude = models.CharField(verbose_name='longitude', max_length=255,\n unique=False, null=True)\n review = models.ForeignKey('applications.ApplicationReview',\n related_name='programs', on_delete=models.DO_NOTHING, null=True)\n\n\nclass Courier(models.Model):\n created_at = models.DateTimeField(auto_now=True)\n\n\nclass Profile(models.Model):\n user = models.OneToOneField(BasicUser, on_delete=models.CASCADE)\n avatar = models.ImageField(upload_to='avatars', blank=True)\n\n def __str__(self):\n return self.user.username\n",
"step-5": "# from django.contrib.auth.models import User\nfrom django.db.models.signals import post_save\nfrom django.contrib.auth.models import AbstractBaseUser, BaseUserManager\nfrom django.db import models\n\n# from applications.models import ApplicationReview\n# from profiles.models import Restaurant, Program, Courier\n\n\n\n\n\n# Enum for Admin\nBASIC_ADMIN = 'ADMIN'\nSUPER_ADMIN = 'SUPER'\nMANAGER = 'MNGR'\nDEVELOPER = 'DEV'\nSTAFF = 'STAFF'\n\n\nADMIN_ROLE_OPTIONS = [\n (BASIC_ADMIN, 'basic admin'),\n (SUPER_ADMIN, 'super admin'),\n (MANAGER, 'manager'),\n (DEVELOPER, 'developer'),\n (STAFF, 'stuff'),\n]\n\n\nPROGRAM = \"PR\"\nRESTAURANT = \"RE\"\n\nUSER_TYPE_OPTIONS = [\n (PROGRAM, 'Program'),\n (RESTAURANT, 'Restaurant'),\n]\n\n\nPHONE = \"PH\"\nEMAIL = \"EM\"\n\n\n\nPREFERRED_CONTACT = [\n (PHONE, 'Phone'),\n (EMAIL, 'Email'),\n]\n\n\nADMIN = \"ADM\"\nBASIC_USER = \"BSC\"\n\nUSER_TYPES = [\n (ADMIN, 'Admin'),\n (BASIC_USER, 'Basic User'),\n]\n\n\nclass UserClassManager(BaseUserManager):\n \"\"\"Manager for User class\"\"\"\n\n # method for creatig admins, but not super admins\n def create_staffuser(self, last_name, first_name, email, password, role, phone_number=''):\n new_account = self.create_user(phone_number=phone_number, last_name=last_name, first_name=first_name,\n email=email, password=password)\n new_account.staff = True\n\n admin_object = AdminUser.objects.create(role=role)\n new_account.admin_object = admin_object\n new_account.user_type = ADMIN\n admin_object.save(using=self._db)\n\n new_account.save(using=self._db)\n return new_account\n\n def create_basic_user(self, type, last_name, first_name, email, password, phone_number=''):\n new_account = self.create_user(phone_number=phone_number, last_name=last_name, first_name=first_name,\n email=email, password=password)\n user_object = BasicUser.objects.create(type=type)\n new_account.user_object = user_object\n new_account.user_type = BASIC_USER\n\n user_object.save(using=self._db)\n new_account.save(using=self._db)\n\n return new_account\n\n # method for creating restaurants, schools, etc.\n def create_user(self, last_name, first_name, email, password, phone_number=''):\n new_account = self.model(email=self.normalize_email(email),)\n new_account.set_password(password)\n\n new_account.last_name = last_name\n new_account.first_name = first_name\n\n new_account.phone_number = phone_number\n\n new_account.save(using=self._db)\n return new_account\n\n # method for creating superadmins\n def create_superuser(self, last_name, first_name, email, password, phone_number=''):\n new_account = self.create_user(phone_number=phone_number, last_name=last_name, first_name=first_name,\n email=email, password=password)\n new_account.staff = True\n new_account.admin = True\n\n admin_object = AdminUser.objects.create(role=SUPER_ADMIN)\n new_account.admin_object = admin_object\n new_account.user_type = ADMIN\n admin_object.save(using=self._db)\n\n new_account.save(using=self._db)\n return new_account\n\n # add any required fields here other than email and password\n REQUIRED_FIELDS = []\n USERNAME_FIELD = 'email'\n\n\nclass UserClass(AbstractBaseUser):\n \"\"\"Class for general user - can be basic user or admin\"\"\"\n phone_number = models.CharField(verbose_name='phone number', max_length=255, unique=False, default='')\n active = models.BooleanField(default=True)\n\n is_active = models.BooleanField(default=True)\n\n email = models.EmailField(verbose_name='email', max_length=255, unique=True, )\n last_name = models.CharField(verbose_name='last name', max_length=255, unique=False, )\n first_name = models.CharField(verbose_name='first name', max_length=255, unique=False, )\n objects = UserClassManager()\n staff = models.BooleanField(default=False)\n admin = models.BooleanField(default=False)\n image = models.CharField(verbose_name='user image', max_length=255, unique=False, default='defaultIcon.png')\n USERNAME_FIELD = \"email\"\n REQUIRED_FIELDS = ['first_name', 'last_name']\n\n user_type = models.CharField(\n max_length=20,\n choices=USER_TYPES,\n default=BASIC_USER,\n )\n\n user_object = models.ForeignKey('profiles.BasicUser', on_delete=models.DO_NOTHING, null=True, related_name='basic_user_parent')\n admin_object = models.ForeignKey('profiles.AdminUser', on_delete=models.DO_NOTHING, null=True, related_name='admin_user_parent')\n\n def has_module_perms(self, app_label):\n return True\n\n @property\n def is_admin(self):\n return self.admin\n\n def get_full_name(self):\n return self.first_name + ' ' + self.last_name\n\n def get_short_name(self):\n return self.first_name\n\n @property\n def is_staff(self):\n return self.staff\n\n def __str__(self):\n return self.email\n\nclass AdminUser(models.Model):\n \"\"\"Model for admin user data\"\"\"\n role = models.CharField(\n max_length=20,\n choices=ADMIN_ROLE_OPTIONS,\n default=STAFF,\n )\n\n\nclass BasicUser(models.Model):\n \"\"\"Model for basic user data\"\"\"\n type = models.CharField(\n max_length=20,\n choices=USER_TYPE_OPTIONS,\n default=RESTAURANT,\n )\n\n preferred_contact = models.CharField(\n max_length=20,\n choices=PREFERRED_CONTACT,\n default=EMAIL,\n )\n\n position = models.CharField(verbose_name='position/title', max_length=255, unique=False, null=True)\n\n restaurant = models.ForeignKey('profiles.Restaurant', on_delete=models.CASCADE, null=True)\n program = models.ForeignKey('profiles.Program', on_delete=models.CASCADE, null=True)\n courier = models.ForeignKey('profiles.Courier', on_delete=models.CASCADE, null=True)\n\n\nclass Schedule(models.Model):\n monday_start = models.TimeField(auto_now=False, null=True, blank=True)\n monday_end = models.TimeField(auto_now=False, null=True, blank=True)\n tuesday_start = models.TimeField(auto_now=False, null=True, blank=True)\n tuesday_end = models.TimeField(auto_now=False, null=True, blank=True)\n wednesday_start = models.TimeField(auto_now=False, null=True, blank=True)\n wednesday_end = models.TimeField(auto_now=False, null=True, blank=True)\n thursday_start = models.TimeField(auto_now=False, null=True, blank=True)\n thursday_end = models.TimeField(auto_now=False, null=True, blank=True)\n friday_start = models.TimeField(auto_now=False, null=True, blank=True)\n friday_end = models.TimeField(auto_now=False, null=True, blank=True)\n saturday_start = models.TimeField(auto_now=False, null=True, blank=True)\n saturday_end = models.TimeField(auto_now=False, null=True, blank=True)\n sunday_start = models.TimeField(auto_now=False, null=True, blank=True)\n sunday_end = models.TimeField(auto_now=False, null=True, blank=True)\n\n def getSchedule(self):\n schedule = {}\n if self.monday_start:\n schedule['monday_start'] = self.monday_start.strftime(\"%-I:%M %p\")\n else:\n schedule['monday_start'] = ''\n if self.monday_end:\n schedule['monday_end'] = self.monday_end.strftime(\"%-I:%M %p\")\n else:\n schedule['monday_end'] = ''\n if self.tuesday_start:\n schedule['tuesday_start'] = self.tuesday_start.strftime(\"%-I:%M %p\")\n else:\n schedule['tuesday_start'] = ''\n if self.tuesday_end:\n schedule['tuesday_end'] = self.tuesday_end.strftime(\"%-I:%M %p\")\n else:\n schedule['tuesday_end'] = ''\n if self.wednesday_start:\n schedule['wednesday_start'] = self.wednesday_start.strftime(\"%-I:%M %p\")\n else:\n schedule['wednesday_start'] = ''\n if self.wednesday_end:\n schedule['wednesday_end'] = self.wednesday_end.strftime(\"%-I:%M %p\")\n else:\n schedule['wednesday_end'] = ''\n if self.thursday_start:\n schedule['thursday_start'] = self.thursday_start.strftime(\"%-I:%M %p\")\n else:\n schedule['thursday_start'] = ''\n if self.thursday_end:\n schedule['thursday_end'] = self.thursday_end.strftime(\"%-I:%M %p\")\n else:\n schedule['thursday_end'] = ''\n if self.friday_start:\n schedule['friday_start'] = self.friday_start.strftime(\"%-I:%M %p\")\n else:\n schedule['friday_start'] = ''\n if self.friday_end:\n schedule['friday_end'] = self.friday_end.strftime(\"%-I:%M %p\")\n else:\n schedule['friday_end'] = ''\n if self.saturday_start:\n schedule['saturday_start'] = self.saturday_start.strftime(\"%-I:%M %p\")\n else:\n schedule['saturday_start'] = ''\n if self.saturday_end:\n schedule['saturday_end'] = self.saturday_end.strftime(\"%-I:%M %p\")\n else:\n schedule['saturday_end'] = ''\n if self.sunday_start:\n schedule['sunday_start'] = self.sunday_start.strftime(\"%-I:%M %p\")\n else:\n schedule['sunday_start'] = ''\n if self.sunday_end:\n schedule['sunday_end'] = self.sunday_end.strftime(\"%-I:%M %p\")\n else:\n schedule['sunday_end'] = ''\n\n return schedule\n\n\nclass Restaurant(models.Model):\n created_at = models.DateTimeField(auto_now=True)\n company_name = models.CharField(verbose_name='company name', max_length=255, unique=False, )\n main_contact = models.ForeignKey('profiles.UserClass', on_delete=models.DO_NOTHING, related_name=\"restaurant_object\", null=True)\n phone_number = models.CharField(verbose_name='phone number', max_length=255, unique=False, )\n schedule = models.ForeignKey('profiles.Schedule', on_delete=models.DO_NOTHING, null=True)\n meals = models.IntegerField()\n uber_eats = models.BooleanField(default=False)\n delivery_capacity = models.BooleanField(default=False)\n packaging = models.BooleanField(default=False)\n health_certificate = models.CharField(verbose_name='health certificate', max_length=255, unique=False, )\n address = models.CharField(verbose_name='address', max_length=255, unique=False, )\n coordinates = models.CharField(verbose_name='coordinates', max_length=255, unique=False, null=True)\n latitude = models.CharField(verbose_name='latitude', max_length=255, unique=False, null=True)\n longitude = models.CharField(verbose_name='longitude', max_length=255, unique=False, null=True)\n review = models.ForeignKey('applications.ApplicationReview', related_name='restaurants',\n on_delete=models.DO_NOTHING, null=True)\n\n\n\n\nclass Program(models.Model):\n created_at = models.DateTimeField(auto_now=True)\n program_name = models.CharField(verbose_name='program name', max_length=255, unique=False, )\n main_contact = models.ForeignKey('profiles.UserClass', on_delete=models.DO_NOTHING, related_name=\"program_object\", null=True)\n phone_number = models.CharField(verbose_name='phone number', max_length=255, unique=False, )\n schedule = models.ForeignKey('profiles.Schedule', on_delete=models.DO_NOTHING, null=True)\n meals = models.IntegerField(default=0, null=True)\n address = models.CharField(verbose_name='address', max_length=255, unique=False, )\n coordinates = models.CharField(verbose_name='address', max_length=255, unique=False, null=True)\n latitude = models.CharField(verbose_name='latitude', max_length=255, unique=False, null=True)\n longitude = models.CharField(verbose_name='longitude', max_length=255, unique=False, null=True)\n review = models.ForeignKey('applications.ApplicationReview', related_name=\"programs\",\n on_delete=models.DO_NOTHING, null=True)\n\n\n\n\n\nclass Courier(models.Model):\n created_at = models.DateTimeField(auto_now=True)\n\n\n\n\n\n\n\nclass Profile(models.Model):\n user = models.OneToOneField(BasicUser, on_delete=models.CASCADE)\n avatar = models.ImageField(upload_to='avatars', blank=True)\n\n def __str__(self):\n return self.user.username\n",
"step-ids": [
20,
31,
32,
36,
37
]
}
|
[
20,
31,
32,
36,
37
] |
<|reserved_special_token_0|>
class User(mongoengine.Document):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@classmethod
def pre_save(cls, sender, document, **kwargs):
document.updated_at = datetime.datetime.utcnow()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User(mongoengine.Document):
username = mongoengine.StringField(required=True)
password = mongoengine.StringField(required=True)
email = mongoengine.StringField(required=True)
active_hash = mongoengine.StringField(required=False, default=None)
active_hash_expires = mongoengine.DateTimeField(required=False, default
=None)
recover_hash = mongoengine.StringField(required=False)
recover_hash_expires = mongoengine.DateTimeField(required=False)
active = mongoengine.BooleanField(required=True, default=False)
locked = mongoengine.BooleanField(required=True, default=True)
first_name = mongoengine.StringField(required=False)
last_name = mongoengine.StringField(required=False)
show_as = mongoengine.StringField(required=False)
date_of_birth = mongoengine.DateTimeField(required=False)
created_at = mongoengine.DateTimeField(required=True, default=datetime.
datetime.utcnow())
updated_at = mongoengine.DateTimeField(required=False, default=datetime
.datetime.utcnow())
meta = {'db_alias': app.config['DEFAULT_DATABASE_ALIAS'], 'collection':
'users'}
@classmethod
def pre_save(cls, sender, document, **kwargs):
document.updated_at = datetime.datetime.utcnow()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User(mongoengine.Document):
username = mongoengine.StringField(required=True)
password = mongoengine.StringField(required=True)
email = mongoengine.StringField(required=True)
active_hash = mongoengine.StringField(required=False, default=None)
active_hash_expires = mongoengine.DateTimeField(required=False, default
=None)
recover_hash = mongoengine.StringField(required=False)
recover_hash_expires = mongoengine.DateTimeField(required=False)
active = mongoengine.BooleanField(required=True, default=False)
locked = mongoengine.BooleanField(required=True, default=True)
first_name = mongoengine.StringField(required=False)
last_name = mongoengine.StringField(required=False)
show_as = mongoengine.StringField(required=False)
date_of_birth = mongoengine.DateTimeField(required=False)
created_at = mongoengine.DateTimeField(required=True, default=datetime.
datetime.utcnow())
updated_at = mongoengine.DateTimeField(required=False, default=datetime
.datetime.utcnow())
meta = {'db_alias': app.config['DEFAULT_DATABASE_ALIAS'], 'collection':
'users'}
@classmethod
def pre_save(cls, sender, document, **kwargs):
document.updated_at = datetime.datetime.utcnow()
mongoengine.signals.pre_save.connect(User.pre_save, sender=User)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from www import app
import mongoengine
import datetime
class User(mongoengine.Document):
username = mongoengine.StringField(required=True)
password = mongoengine.StringField(required=True)
email = mongoengine.StringField(required=True)
active_hash = mongoengine.StringField(required=False, default=None)
active_hash_expires = mongoengine.DateTimeField(required=False, default
=None)
recover_hash = mongoengine.StringField(required=False)
recover_hash_expires = mongoengine.DateTimeField(required=False)
active = mongoengine.BooleanField(required=True, default=False)
locked = mongoengine.BooleanField(required=True, default=True)
first_name = mongoengine.StringField(required=False)
last_name = mongoengine.StringField(required=False)
show_as = mongoengine.StringField(required=False)
date_of_birth = mongoengine.DateTimeField(required=False)
created_at = mongoengine.DateTimeField(required=True, default=datetime.
datetime.utcnow())
updated_at = mongoengine.DateTimeField(required=False, default=datetime
.datetime.utcnow())
meta = {'db_alias': app.config['DEFAULT_DATABASE_ALIAS'], 'collection':
'users'}
@classmethod
def pre_save(cls, sender, document, **kwargs):
document.updated_at = datetime.datetime.utcnow()
mongoengine.signals.pre_save.connect(User.pre_save, sender=User)
<|reserved_special_token_1|>
"""
USERS MODEL
"""
from www import app
import mongoengine
import datetime
class User(mongoengine.Document):
username = mongoengine.StringField(required=True)
password = mongoengine.StringField(required=True)
email = mongoengine.StringField(required=True)
active_hash = mongoengine.StringField(required=False, default=None)
active_hash_expires = mongoengine.DateTimeField(required=False,
default=None)
recover_hash = mongoengine.StringField(required=False)
recover_hash_expires = mongoengine.DateTimeField(required=False)
active = mongoengine.BooleanField(required=True, default=False)
locked = mongoengine.BooleanField(required=True, default=True) # locked changes depending on user active or not
first_name = mongoengine.StringField(required=False)
last_name = mongoengine.StringField(required=False)
show_as = mongoengine.StringField(required=False)
date_of_birth = mongoengine.DateTimeField(required=False)
created_at = mongoengine.DateTimeField(required=True, default=datetime.datetime.utcnow())
updated_at = mongoengine.DateTimeField(required=False, default=datetime.datetime.utcnow())
meta = {
'db_alias': app.config["DEFAULT_DATABASE_ALIAS"],
'collection': 'users',
}
@classmethod
def pre_save(cls, sender, document, **kwargs):
document.updated_at = datetime.datetime.utcnow()
mongoengine.signals.pre_save.connect(User.pre_save, sender=User)
|
flexible
|
{
"blob_id": "51cdb41836415c08609ee6a6bcc3adbaf2533da4",
"index": 3697,
"step-1": "<mask token>\n\n\nclass User(mongoengine.Document):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def pre_save(cls, sender, document, **kwargs):\n document.updated_at = datetime.datetime.utcnow()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass User(mongoengine.Document):\n username = mongoengine.StringField(required=True)\n password = mongoengine.StringField(required=True)\n email = mongoengine.StringField(required=True)\n active_hash = mongoengine.StringField(required=False, default=None)\n active_hash_expires = mongoengine.DateTimeField(required=False, default\n =None)\n recover_hash = mongoengine.StringField(required=False)\n recover_hash_expires = mongoengine.DateTimeField(required=False)\n active = mongoengine.BooleanField(required=True, default=False)\n locked = mongoengine.BooleanField(required=True, default=True)\n first_name = mongoengine.StringField(required=False)\n last_name = mongoengine.StringField(required=False)\n show_as = mongoengine.StringField(required=False)\n date_of_birth = mongoengine.DateTimeField(required=False)\n created_at = mongoengine.DateTimeField(required=True, default=datetime.\n datetime.utcnow())\n updated_at = mongoengine.DateTimeField(required=False, default=datetime\n .datetime.utcnow())\n meta = {'db_alias': app.config['DEFAULT_DATABASE_ALIAS'], 'collection':\n 'users'}\n\n @classmethod\n def pre_save(cls, sender, document, **kwargs):\n document.updated_at = datetime.datetime.utcnow()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass User(mongoengine.Document):\n username = mongoengine.StringField(required=True)\n password = mongoengine.StringField(required=True)\n email = mongoengine.StringField(required=True)\n active_hash = mongoengine.StringField(required=False, default=None)\n active_hash_expires = mongoengine.DateTimeField(required=False, default\n =None)\n recover_hash = mongoengine.StringField(required=False)\n recover_hash_expires = mongoengine.DateTimeField(required=False)\n active = mongoengine.BooleanField(required=True, default=False)\n locked = mongoengine.BooleanField(required=True, default=True)\n first_name = mongoengine.StringField(required=False)\n last_name = mongoengine.StringField(required=False)\n show_as = mongoengine.StringField(required=False)\n date_of_birth = mongoengine.DateTimeField(required=False)\n created_at = mongoengine.DateTimeField(required=True, default=datetime.\n datetime.utcnow())\n updated_at = mongoengine.DateTimeField(required=False, default=datetime\n .datetime.utcnow())\n meta = {'db_alias': app.config['DEFAULT_DATABASE_ALIAS'], 'collection':\n 'users'}\n\n @classmethod\n def pre_save(cls, sender, document, **kwargs):\n document.updated_at = datetime.datetime.utcnow()\n\n\nmongoengine.signals.pre_save.connect(User.pre_save, sender=User)\n",
"step-4": "<mask token>\nfrom www import app\nimport mongoengine\nimport datetime\n\n\nclass User(mongoengine.Document):\n username = mongoengine.StringField(required=True)\n password = mongoengine.StringField(required=True)\n email = mongoengine.StringField(required=True)\n active_hash = mongoengine.StringField(required=False, default=None)\n active_hash_expires = mongoengine.DateTimeField(required=False, default\n =None)\n recover_hash = mongoengine.StringField(required=False)\n recover_hash_expires = mongoengine.DateTimeField(required=False)\n active = mongoengine.BooleanField(required=True, default=False)\n locked = mongoengine.BooleanField(required=True, default=True)\n first_name = mongoengine.StringField(required=False)\n last_name = mongoengine.StringField(required=False)\n show_as = mongoengine.StringField(required=False)\n date_of_birth = mongoengine.DateTimeField(required=False)\n created_at = mongoengine.DateTimeField(required=True, default=datetime.\n datetime.utcnow())\n updated_at = mongoengine.DateTimeField(required=False, default=datetime\n .datetime.utcnow())\n meta = {'db_alias': app.config['DEFAULT_DATABASE_ALIAS'], 'collection':\n 'users'}\n\n @classmethod\n def pre_save(cls, sender, document, **kwargs):\n document.updated_at = datetime.datetime.utcnow()\n\n\nmongoengine.signals.pre_save.connect(User.pre_save, sender=User)\n",
"step-5": "\"\"\"\n USERS MODEL\n\"\"\"\n\nfrom www import app\nimport mongoengine\nimport datetime\n\n\nclass User(mongoengine.Document):\n username = mongoengine.StringField(required=True)\n password = mongoengine.StringField(required=True)\n email = mongoengine.StringField(required=True)\n\n active_hash = mongoengine.StringField(required=False, default=None)\n active_hash_expires = mongoengine.DateTimeField(required=False,\n default=None)\n\n recover_hash = mongoengine.StringField(required=False)\n recover_hash_expires = mongoengine.DateTimeField(required=False)\n\n active = mongoengine.BooleanField(required=True, default=False)\n locked = mongoengine.BooleanField(required=True, default=True) # locked changes depending on user active or not\n\n first_name = mongoengine.StringField(required=False)\n last_name = mongoengine.StringField(required=False)\n show_as = mongoengine.StringField(required=False)\n date_of_birth = mongoengine.DateTimeField(required=False)\n\n created_at = mongoengine.DateTimeField(required=True, default=datetime.datetime.utcnow())\n updated_at = mongoengine.DateTimeField(required=False, default=datetime.datetime.utcnow())\n\n meta = {\n 'db_alias': app.config[\"DEFAULT_DATABASE_ALIAS\"],\n 'collection': 'users',\n }\n\n @classmethod\n def pre_save(cls, sender, document, **kwargs):\n document.updated_at = datetime.datetime.utcnow()\n\n\nmongoengine.signals.pre_save.connect(User.pre_save, sender=User)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
#imports
from math import sqrt, pi, exp
from csv import reader
from random import seed,randrange
"""
Helper functions
"""
#calculate probability
def probability(x,avg,standev):
exponent = exp(-((x-avg)**2 / (2 * standev**2)))
return (1/(sqrt(2*pi) *standev)) * exponent
#mean
def avg(vals):
return sum(vals)/float(len(vals))
#standard deviation
def standev(vals):
mean = avg(vals)
var = sum([(x-mean)**2 for x in vals]) / float(len(vals)-1)
return sqrt(var)
"""
Data Handling
"""
def read_csv(file_name):
data = list()
with open(file_name, 'r') as file:
csv = reader(file)
for row in csv:
if not row:
continue
data.append(row)
return data
# Convert string column to float
def str_column_to_float(dataset, column):
for row in dataset:
row[column] = float(row[column].strip())
def int_from_string_col(data,col):
class_val =[row[col] for row in data]
unique_set = set(class_val)
lookup = dict()
for i, val in enumerate(unique_set):
lookup[val] = i
for row in data:
row[col] = lookup[row[col]]
return lookup
def move_class_to_last_col(data,col):
for row in data:
temp = row[col]
del row[col]
row.append(temp)
return data
"""
Implementation Functions
"""
"""
We need to calculate the probability of data according to their class so the
training data needs to be split up by classes. In order to do this we need to
establish the column that represents the class value for each dataset.
"""
# this works for datasets with last column representing class value
def split_class(data):
data_by_class = dict()
for i in range(len(data)):
instance = data[i]
class_val = instance[-1]
if(class_val not in data_by_class):
data_by_class[class_val] = list()
data_by_class[class_val].append(instance)
return data_by_class
"""
We need to find the mean and standard deviation for each column of input.
"""
def data_stats(data):
stats = [(avg(col),standev(col),len(col)) for col in zip(*data)]
del(stats[-1])
return stats
def class_stats(data):
split = split_class(data)
class_stats = dict()
for class_val, row in split.items():
class_stats[class_val] = data_stats(row)
return class_stats
"""
Calculate Class Probabilities
"""
def class_get_prob(stats,instance):
num_rows = sum([stats[label][0][2] for label in stats])
prob_vals = dict()
for class_val, class_stats in stats.items():
prob_vals[class_val] = stats[class_val][0][2]/float(num_rows)
for i in range(len(class_stats)):
avg,standev,size = class_stats[i]
prob_vals[class_val] *= probability(instance[i],avg,standev)
return prob_vals
def predict(stats,instance):
prob_vals = class_get_prob(stats,instance)
top_prob, top_label = -1, None
for class_val, prob in prob_vals.items():
if top_label is None or prob > top_prob:
top_prob = prob
top_label = class_val
return top_label
def cross_validation_split(data, n_folds):
data_split = list()
copy = list(data)
fold_size = int(len(data) / n_folds)
for _ in range(n_folds):
fold = list()
while len(fold) < fold_size:
index = randrange(len(copy))
fold.append(copy.pop(index))
data_split.append(fold)
return data_split
def evaluate(actual, predicted):
correct = 0
for i in range(len(actual)):
if actual[i] == predicted[i]:
correct += 1
return correct / float(len(actual)) * 100.0
def cross_validation(data, algo, n_folds, *args):
folds = cross_validation_split(data, n_folds)
accuracy_list = list()
for fold in folds:
train_set = list(folds)
train_set.remove(fold)
train_set = sum(train_set, [])
test_set = list()
for row in fold:
copy = list(row)
test_set.append(copy)
copy[-1] = None
predicted = algo(train_set, test_set, *args)
actual = [row[-1] for row in fold]
accuracy = evaluate(actual, predicted)
accuracy_list.append(accuracy)
return accuracy_list
def naive_bayes(train,test):
stats = class_stats(train)
preds = list()
for row in test:
result = predict(stats,row)
preds.append(result)
return(preds)
def run(file_name, target):
seed(1)
data = read_csv(file_name)
data = move_class_to_last_col(data,target)
for i in range(len(data[0])-1):
str_column_to_float(data,i)
int_from_string_col(data,len(data[0])-1)
n_folds = 10
accuracies = cross_validation(data, naive_bayes, n_folds)
print("10-fold Cross-Validation Accuracy Scores")
for score in accuracies:
print("%.4f%%" % score)
print('Mean Accuracy: %.4f%%' % (sum(accuracies)/float(len(accuracies))))
|
normal
|
{
"blob_id": "f92a1398a27541557ec5bbf752d44ce40d1df94a",
"index": 4131,
"step-1": "<mask token>\n\n\ndef standev(vals):\n mean = avg(vals)\n var = sum([((x - mean) ** 2) for x in vals]) / float(len(vals) - 1)\n return sqrt(var)\n\n\n<mask token>\n\n\ndef read_csv(file_name):\n data = list()\n with open(file_name, 'r') as file:\n csv = reader(file)\n for row in csv:\n if not row:\n continue\n data.append(row)\n return data\n\n\n<mask token>\n\n\ndef split_class(data):\n data_by_class = dict()\n for i in range(len(data)):\n instance = data[i]\n class_val = instance[-1]\n if class_val not in data_by_class:\n data_by_class[class_val] = list()\n data_by_class[class_val].append(instance)\n return data_by_class\n\n\n<mask token>\n\n\ndef data_stats(data):\n stats = [(avg(col), standev(col), len(col)) for col in zip(*data)]\n del stats[-1]\n return stats\n\n\n<mask token>\n\n\ndef cross_validation_split(data, n_folds):\n data_split = list()\n copy = list(data)\n fold_size = int(len(data) / n_folds)\n for _ in range(n_folds):\n fold = list()\n while len(fold) < fold_size:\n index = randrange(len(copy))\n fold.append(copy.pop(index))\n data_split.append(fold)\n return data_split\n\n\ndef evaluate(actual, predicted):\n correct = 0\n for i in range(len(actual)):\n if actual[i] == predicted[i]:\n correct += 1\n return correct / float(len(actual)) * 100.0\n\n\ndef cross_validation(data, algo, n_folds, *args):\n folds = cross_validation_split(data, n_folds)\n accuracy_list = list()\n for fold in folds:\n train_set = list(folds)\n train_set.remove(fold)\n train_set = sum(train_set, [])\n test_set = list()\n for row in fold:\n copy = list(row)\n test_set.append(copy)\n copy[-1] = None\n predicted = algo(train_set, test_set, *args)\n actual = [row[-1] for row in fold]\n accuracy = evaluate(actual, predicted)\n accuracy_list.append(accuracy)\n return accuracy_list\n\n\ndef naive_bayes(train, test):\n stats = class_stats(train)\n preds = list()\n for row in test:\n result = predict(stats, row)\n preds.append(result)\n return preds\n\n\ndef run(file_name, target):\n seed(1)\n data = read_csv(file_name)\n data = move_class_to_last_col(data, target)\n for i in range(len(data[0]) - 1):\n str_column_to_float(data, i)\n int_from_string_col(data, len(data[0]) - 1)\n n_folds = 10\n accuracies = cross_validation(data, naive_bayes, n_folds)\n print('10-fold Cross-Validation Accuracy Scores')\n for score in accuracies:\n print('%.4f%%' % score)\n print('Mean Accuracy: %.4f%%' % (sum(accuracies) / float(len(accuracies))))\n",
"step-2": "<mask token>\n\n\ndef avg(vals):\n return sum(vals) / float(len(vals))\n\n\ndef standev(vals):\n mean = avg(vals)\n var = sum([((x - mean) ** 2) for x in vals]) / float(len(vals) - 1)\n return sqrt(var)\n\n\n<mask token>\n\n\ndef read_csv(file_name):\n data = list()\n with open(file_name, 'r') as file:\n csv = reader(file)\n for row in csv:\n if not row:\n continue\n data.append(row)\n return data\n\n\n<mask token>\n\n\ndef split_class(data):\n data_by_class = dict()\n for i in range(len(data)):\n instance = data[i]\n class_val = instance[-1]\n if class_val not in data_by_class:\n data_by_class[class_val] = list()\n data_by_class[class_val].append(instance)\n return data_by_class\n\n\n<mask token>\n\n\ndef data_stats(data):\n stats = [(avg(col), standev(col), len(col)) for col in zip(*data)]\n del stats[-1]\n return stats\n\n\n<mask token>\n\n\ndef class_get_prob(stats, instance):\n num_rows = sum([stats[label][0][2] for label in stats])\n prob_vals = dict()\n for class_val, class_stats in stats.items():\n prob_vals[class_val] = stats[class_val][0][2] / float(num_rows)\n for i in range(len(class_stats)):\n avg, standev, size = class_stats[i]\n prob_vals[class_val] *= probability(instance[i], avg, standev)\n return prob_vals\n\n\n<mask token>\n\n\ndef cross_validation_split(data, n_folds):\n data_split = list()\n copy = list(data)\n fold_size = int(len(data) / n_folds)\n for _ in range(n_folds):\n fold = list()\n while len(fold) < fold_size:\n index = randrange(len(copy))\n fold.append(copy.pop(index))\n data_split.append(fold)\n return data_split\n\n\ndef evaluate(actual, predicted):\n correct = 0\n for i in range(len(actual)):\n if actual[i] == predicted[i]:\n correct += 1\n return correct / float(len(actual)) * 100.0\n\n\ndef cross_validation(data, algo, n_folds, *args):\n folds = cross_validation_split(data, n_folds)\n accuracy_list = list()\n for fold in folds:\n train_set = list(folds)\n train_set.remove(fold)\n train_set = sum(train_set, [])\n test_set = list()\n for row in fold:\n copy = list(row)\n test_set.append(copy)\n copy[-1] = None\n predicted = algo(train_set, test_set, *args)\n actual = [row[-1] for row in fold]\n accuracy = evaluate(actual, predicted)\n accuracy_list.append(accuracy)\n return accuracy_list\n\n\ndef naive_bayes(train, test):\n stats = class_stats(train)\n preds = list()\n for row in test:\n result = predict(stats, row)\n preds.append(result)\n return preds\n\n\ndef run(file_name, target):\n seed(1)\n data = read_csv(file_name)\n data = move_class_to_last_col(data, target)\n for i in range(len(data[0]) - 1):\n str_column_to_float(data, i)\n int_from_string_col(data, len(data[0]) - 1)\n n_folds = 10\n accuracies = cross_validation(data, naive_bayes, n_folds)\n print('10-fold Cross-Validation Accuracy Scores')\n for score in accuracies:\n print('%.4f%%' % score)\n print('Mean Accuracy: %.4f%%' % (sum(accuracies) / float(len(accuracies))))\n",
"step-3": "<mask token>\n\n\ndef avg(vals):\n return sum(vals) / float(len(vals))\n\n\ndef standev(vals):\n mean = avg(vals)\n var = sum([((x - mean) ** 2) for x in vals]) / float(len(vals) - 1)\n return sqrt(var)\n\n\n<mask token>\n\n\ndef read_csv(file_name):\n data = list()\n with open(file_name, 'r') as file:\n csv = reader(file)\n for row in csv:\n if not row:\n continue\n data.append(row)\n return data\n\n\ndef str_column_to_float(dataset, column):\n for row in dataset:\n row[column] = float(row[column].strip())\n\n\n<mask token>\n\n\ndef split_class(data):\n data_by_class = dict()\n for i in range(len(data)):\n instance = data[i]\n class_val = instance[-1]\n if class_val not in data_by_class:\n data_by_class[class_val] = list()\n data_by_class[class_val].append(instance)\n return data_by_class\n\n\n<mask token>\n\n\ndef data_stats(data):\n stats = [(avg(col), standev(col), len(col)) for col in zip(*data)]\n del stats[-1]\n return stats\n\n\n<mask token>\n\n\ndef class_get_prob(stats, instance):\n num_rows = sum([stats[label][0][2] for label in stats])\n prob_vals = dict()\n for class_val, class_stats in stats.items():\n prob_vals[class_val] = stats[class_val][0][2] / float(num_rows)\n for i in range(len(class_stats)):\n avg, standev, size = class_stats[i]\n prob_vals[class_val] *= probability(instance[i], avg, standev)\n return prob_vals\n\n\n<mask token>\n\n\ndef cross_validation_split(data, n_folds):\n data_split = list()\n copy = list(data)\n fold_size = int(len(data) / n_folds)\n for _ in range(n_folds):\n fold = list()\n while len(fold) < fold_size:\n index = randrange(len(copy))\n fold.append(copy.pop(index))\n data_split.append(fold)\n return data_split\n\n\ndef evaluate(actual, predicted):\n correct = 0\n for i in range(len(actual)):\n if actual[i] == predicted[i]:\n correct += 1\n return correct / float(len(actual)) * 100.0\n\n\ndef cross_validation(data, algo, n_folds, *args):\n folds = cross_validation_split(data, n_folds)\n accuracy_list = list()\n for fold in folds:\n train_set = list(folds)\n train_set.remove(fold)\n train_set = sum(train_set, [])\n test_set = list()\n for row in fold:\n copy = list(row)\n test_set.append(copy)\n copy[-1] = None\n predicted = algo(train_set, test_set, *args)\n actual = [row[-1] for row in fold]\n accuracy = evaluate(actual, predicted)\n accuracy_list.append(accuracy)\n return accuracy_list\n\n\ndef naive_bayes(train, test):\n stats = class_stats(train)\n preds = list()\n for row in test:\n result = predict(stats, row)\n preds.append(result)\n return preds\n\n\ndef run(file_name, target):\n seed(1)\n data = read_csv(file_name)\n data = move_class_to_last_col(data, target)\n for i in range(len(data[0]) - 1):\n str_column_to_float(data, i)\n int_from_string_col(data, len(data[0]) - 1)\n n_folds = 10\n accuracies = cross_validation(data, naive_bayes, n_folds)\n print('10-fold Cross-Validation Accuracy Scores')\n for score in accuracies:\n print('%.4f%%' % score)\n print('Mean Accuracy: %.4f%%' % (sum(accuracies) / float(len(accuracies))))\n",
"step-4": "from math import sqrt, pi, exp\nfrom csv import reader\nfrom random import seed, randrange\n<mask token>\n\n\ndef probability(x, avg, standev):\n exponent = exp(-((x - avg) ** 2 / (2 * standev ** 2)))\n return 1 / (sqrt(2 * pi) * standev) * exponent\n\n\ndef avg(vals):\n return sum(vals) / float(len(vals))\n\n\ndef standev(vals):\n mean = avg(vals)\n var = sum([((x - mean) ** 2) for x in vals]) / float(len(vals) - 1)\n return sqrt(var)\n\n\n<mask token>\n\n\ndef read_csv(file_name):\n data = list()\n with open(file_name, 'r') as file:\n csv = reader(file)\n for row in csv:\n if not row:\n continue\n data.append(row)\n return data\n\n\ndef str_column_to_float(dataset, column):\n for row in dataset:\n row[column] = float(row[column].strip())\n\n\ndef int_from_string_col(data, col):\n class_val = [row[col] for row in data]\n unique_set = set(class_val)\n lookup = dict()\n for i, val in enumerate(unique_set):\n lookup[val] = i\n for row in data:\n row[col] = lookup[row[col]]\n return lookup\n\n\ndef move_class_to_last_col(data, col):\n for row in data:\n temp = row[col]\n del row[col]\n row.append(temp)\n return data\n\n\n<mask token>\n\n\ndef split_class(data):\n data_by_class = dict()\n for i in range(len(data)):\n instance = data[i]\n class_val = instance[-1]\n if class_val not in data_by_class:\n data_by_class[class_val] = list()\n data_by_class[class_val].append(instance)\n return data_by_class\n\n\n<mask token>\n\n\ndef data_stats(data):\n stats = [(avg(col), standev(col), len(col)) for col in zip(*data)]\n del stats[-1]\n return stats\n\n\ndef class_stats(data):\n split = split_class(data)\n class_stats = dict()\n for class_val, row in split.items():\n class_stats[class_val] = data_stats(row)\n return class_stats\n\n\n<mask token>\n\n\ndef class_get_prob(stats, instance):\n num_rows = sum([stats[label][0][2] for label in stats])\n prob_vals = dict()\n for class_val, class_stats in stats.items():\n prob_vals[class_val] = stats[class_val][0][2] / float(num_rows)\n for i in range(len(class_stats)):\n avg, standev, size = class_stats[i]\n prob_vals[class_val] *= probability(instance[i], avg, standev)\n return prob_vals\n\n\ndef predict(stats, instance):\n prob_vals = class_get_prob(stats, instance)\n top_prob, top_label = -1, None\n for class_val, prob in prob_vals.items():\n if top_label is None or prob > top_prob:\n top_prob = prob\n top_label = class_val\n return top_label\n\n\ndef cross_validation_split(data, n_folds):\n data_split = list()\n copy = list(data)\n fold_size = int(len(data) / n_folds)\n for _ in range(n_folds):\n fold = list()\n while len(fold) < fold_size:\n index = randrange(len(copy))\n fold.append(copy.pop(index))\n data_split.append(fold)\n return data_split\n\n\ndef evaluate(actual, predicted):\n correct = 0\n for i in range(len(actual)):\n if actual[i] == predicted[i]:\n correct += 1\n return correct / float(len(actual)) * 100.0\n\n\ndef cross_validation(data, algo, n_folds, *args):\n folds = cross_validation_split(data, n_folds)\n accuracy_list = list()\n for fold in folds:\n train_set = list(folds)\n train_set.remove(fold)\n train_set = sum(train_set, [])\n test_set = list()\n for row in fold:\n copy = list(row)\n test_set.append(copy)\n copy[-1] = None\n predicted = algo(train_set, test_set, *args)\n actual = [row[-1] for row in fold]\n accuracy = evaluate(actual, predicted)\n accuracy_list.append(accuracy)\n return accuracy_list\n\n\ndef naive_bayes(train, test):\n stats = class_stats(train)\n preds = list()\n for row in test:\n result = predict(stats, row)\n preds.append(result)\n return preds\n\n\ndef run(file_name, target):\n seed(1)\n data = read_csv(file_name)\n data = move_class_to_last_col(data, target)\n for i in range(len(data[0]) - 1):\n str_column_to_float(data, i)\n int_from_string_col(data, len(data[0]) - 1)\n n_folds = 10\n accuracies = cross_validation(data, naive_bayes, n_folds)\n print('10-fold Cross-Validation Accuracy Scores')\n for score in accuracies:\n print('%.4f%%' % score)\n print('Mean Accuracy: %.4f%%' % (sum(accuracies) / float(len(accuracies))))\n",
"step-5": "# -*- coding: utf-8 -*-\n\n#imports\nfrom math import sqrt, pi, exp\nfrom csv import reader\nfrom random import seed,randrange\n\n\n\"\"\"\nHelper functions\n\"\"\"\n#calculate probability\ndef probability(x,avg,standev):\n exponent = exp(-((x-avg)**2 / (2 * standev**2)))\n return (1/(sqrt(2*pi) *standev)) * exponent\n\n#mean\ndef avg(vals):\n return sum(vals)/float(len(vals))\n\n#standard deviation\ndef standev(vals):\n mean = avg(vals)\n var = sum([(x-mean)**2 for x in vals]) / float(len(vals)-1)\n return sqrt(var)\n\n\"\"\"\nData Handling\n\"\"\"\ndef read_csv(file_name):\n data = list()\n with open(file_name, 'r') as file:\n csv = reader(file)\n for row in csv:\n if not row:\n continue\n data.append(row)\n return data\n\n# Convert string column to float\ndef str_column_to_float(dataset, column):\n\tfor row in dataset:\n\t\trow[column] = float(row[column].strip())\n\ndef int_from_string_col(data,col):\n class_val =[row[col] for row in data]\n unique_set = set(class_val)\n lookup = dict()\n for i, val in enumerate(unique_set):\n lookup[val] = i\n for row in data:\n row[col] = lookup[row[col]]\n return lookup\n\ndef move_class_to_last_col(data,col):\n for row in data:\n temp = row[col]\n del row[col]\n row.append(temp)\n return data\n\n\n\"\"\"\nImplementation Functions\n\"\"\"\n\n\"\"\"\nWe need to calculate the probability of data according to their class so the \ntraining data needs to be split up by classes. In order to do this we need to \nestablish the column that represents the class value for each dataset. \n\"\"\"\n# this works for datasets with last column representing class value\ndef split_class(data):\n data_by_class = dict()\n for i in range(len(data)):\n instance = data[i]\n class_val = instance[-1]\n if(class_val not in data_by_class):\n data_by_class[class_val] = list()\n data_by_class[class_val].append(instance)\n return data_by_class\n\n \n\n\n\"\"\"\nWe need to find the mean and standard deviation for each column of input.\n\"\"\"\n\ndef data_stats(data):\n stats = [(avg(col),standev(col),len(col)) for col in zip(*data)] \n del(stats[-1])\n return stats\n \ndef class_stats(data):\n split = split_class(data)\n class_stats = dict()\n for class_val, row in split.items():\n class_stats[class_val] = data_stats(row)\n return class_stats\n\n\"\"\"\nCalculate Class Probabilities\n\"\"\"\ndef class_get_prob(stats,instance):\n num_rows = sum([stats[label][0][2] for label in stats])\n prob_vals = dict()\n for class_val, class_stats in stats.items():\n prob_vals[class_val] = stats[class_val][0][2]/float(num_rows)\n for i in range(len(class_stats)):\n avg,standev,size = class_stats[i]\n prob_vals[class_val] *= probability(instance[i],avg,standev)\n return prob_vals\n\ndef predict(stats,instance):\n prob_vals = class_get_prob(stats,instance)\n top_prob, top_label = -1, None\n for class_val, prob in prob_vals.items():\n if top_label is None or prob > top_prob:\n top_prob = prob\n top_label = class_val\n return top_label\n\ndef cross_validation_split(data, n_folds):\n\tdata_split = list()\n\tcopy = list(data)\n\tfold_size = int(len(data) / n_folds)\n\tfor _ in range(n_folds):\n\t\tfold = list()\n\t\twhile len(fold) < fold_size:\n\t\t\tindex = randrange(len(copy))\n\t\t\tfold.append(copy.pop(index))\n\t\tdata_split.append(fold)\n\treturn data_split\n \ndef evaluate(actual, predicted):\n\tcorrect = 0\n\tfor i in range(len(actual)):\n\t\tif actual[i] == predicted[i]:\n\t\t\tcorrect += 1\n\treturn correct / float(len(actual)) * 100.0\n \ndef cross_validation(data, algo, n_folds, *args):\n\tfolds = cross_validation_split(data, n_folds)\n\taccuracy_list = list()\n\tfor fold in folds:\n\t\ttrain_set = list(folds)\n\t\ttrain_set.remove(fold)\n\t\ttrain_set = sum(train_set, [])\n\t\ttest_set = list()\n\t\tfor row in fold:\n\t\t\tcopy = list(row)\n\t\t\ttest_set.append(copy)\n\t\t\tcopy[-1] = None\n\t\tpredicted = algo(train_set, test_set, *args)\n\t\tactual = [row[-1] for row in fold]\n\t\taccuracy = evaluate(actual, predicted)\n\t\taccuracy_list.append(accuracy)\n\treturn accuracy_list\n\ndef naive_bayes(train,test):\n stats = class_stats(train)\n preds = list()\n for row in test:\n result = predict(stats,row)\n preds.append(result)\n return(preds)\n \n\ndef run(file_name, target):\n seed(1)\n data = read_csv(file_name)\n data = move_class_to_last_col(data,target)\n for i in range(len(data[0])-1):\n str_column_to_float(data,i)\n int_from_string_col(data,len(data[0])-1)\n n_folds = 10\n accuracies = cross_validation(data, naive_bayes, n_folds)\n print(\"10-fold Cross-Validation Accuracy Scores\")\n for score in accuracies:\n print(\"%.4f%%\" % score)\n print('Mean Accuracy: %.4f%%' % (sum(accuracies)/float(len(accuracies))))\n \n\n ",
"step-ids": [
9,
11,
12,
18,
19
]
}
|
[
9,
11,
12,
18,
19
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EasyTechConfig(AppConfig):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EasyTechConfig(AppConfig):
name = 'easy_tech'
<|reserved_special_token_1|>
from django.apps import AppConfig
class EasyTechConfig(AppConfig):
name = 'easy_tech'
|
flexible
|
{
"blob_id": "0ef172ced411213c0f7daccd632f8d5ec97379c3",
"index": 5604,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass EasyTechConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass EasyTechConfig(AppConfig):\n name = 'easy_tech'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass EasyTechConfig(AppConfig):\n name = 'easy_tech'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('info', '0010_auto_20200808_2117')]
operations = [migrations.AddField(model_name='profile', name=
'annual_income', field=models.CharField(blank=True, choices=[(
'100000', '<100000'), ('100000-300000', '100000-300000'), (
'300000-600000', '300000-600000'), ('600000-1000000',
'600000-1000000'), ('1000000-1500000', '1000000-1500000'), (
'1500000-2000000', '1500000-2000000'), ('>2000000', '>2000000')],
max_length=20, null=True)), migrations.AddField(model_name=
'profile', name='birthdate', field=models.DateTimeField(blank=True,
null=True)), migrations.AddField(model_name='profile', name=
'birthplace', field=models.CharField(blank=True, max_length=50,
null=True)), migrations.AddField(model_name='profile', name=
'blood_group', field=models.CharField(blank=True, choices=[('-A',
'-A'), ('B', 'B'), ('AB', 'AB'), ('O', 'O')], max_length=10, null=
True)), migrations.AddField(model_name='profile', name='body_type',
field=models.CharField(blank=True, choices=[('Fair', 'Fair'), (
'Black', 'Black'), ('Brown', 'Brown')], max_length=20, null=True)),
migrations.AddField(model_name='profile', name='caste', field=
models.CharField(blank=True, max_length=20, null=True)), migrations
.AddField(model_name='profile', name='education', field=models.
CharField(blank=True, max_length=100, null=True)), migrations.
AddField(model_name='profile', name='education_detail', field=
models.CharField(blank=True, max_length=100, null=True)),
migrations.AddField(model_name='profile', name='height', field=
models.FloatField(blank=True, null=True)), migrations.AddField(
model_name='profile', name='maritial_status', field=models.
CharField(blank=True, choices=[('Single', 'Single'), ('Single',
'Single')], max_length=50, null=True)), migrations.AddField(
model_name='profile', name='mother_tongue', field=models.CharField(
blank=True, choices=[('Assamese', 'Assamese'), ('Bengali',
'Bengali'), ('Bodo', 'Bodo'), ('Dogri', 'Dogri'), ('English',
'English'), ('Gujarati', 'Gujarati'), ('Hindi', 'Hindi'), (
'Kannada', 'Kannada'), ('Kashmiri', 'Kashmiri'), ('Konkani',
'Konkani'), ('Maithili', 'Maithili'), ('Malayalam', 'Malayalam'), (
'Marathi', 'Marathi'), ('Meitei (Manipuri)', 'Meitei (Manipuri)'),
('Nepali', 'Nepali'), ('Odia', 'Odia'), ('Punjabi', 'Punjabi'), (
'Sanskrit', 'Sanskrit'), ('Santali', 'Santali')], max_length=30,
null=True)), migrations.AddField(model_name='profile', name=
'navaras', field=models.CharField(blank=True, max_length=50, null=
True)), migrations.AddField(model_name='profile', name='occupation',
field=models.CharField(blank=True, max_length=200, null=True)),
migrations.AddField(model_name='profile', name='religion', field=
models.CharField(blank=True, choices=[('Hinduism', 'Hinduism'), (
'Islam', 'Islam'), ('Christianity', 'Christianity'), ('Sikhism',
'Sikhism'), ('Buddhism', 'Buddhism'), ('Jainism', 'Jainism'), (
'Zoroastrianism', 'Zoroastrianism')], max_length=30, null=True)),
migrations.AddField(model_name='profile', name='sub_caste', field=
models.CharField(blank=True, max_length=20, null=True)), migrations
.AddField(model_name='profile', name='weight', field=models.
PositiveSmallIntegerField(blank=True, null=True)), migrations.
AlterField(model_name='profile', name='age', field=models.
PositiveSmallIntegerField(blank=True, null=True))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('info', '0010_auto_20200808_2117')]
operations = [migrations.AddField(model_name='profile', name=
'annual_income', field=models.CharField(blank=True, choices=[(
'100000', '<100000'), ('100000-300000', '100000-300000'), (
'300000-600000', '300000-600000'), ('600000-1000000',
'600000-1000000'), ('1000000-1500000', '1000000-1500000'), (
'1500000-2000000', '1500000-2000000'), ('>2000000', '>2000000')],
max_length=20, null=True)), migrations.AddField(model_name=
'profile', name='birthdate', field=models.DateTimeField(blank=True,
null=True)), migrations.AddField(model_name='profile', name=
'birthplace', field=models.CharField(blank=True, max_length=50,
null=True)), migrations.AddField(model_name='profile', name=
'blood_group', field=models.CharField(blank=True, choices=[('-A',
'-A'), ('B', 'B'), ('AB', 'AB'), ('O', 'O')], max_length=10, null=
True)), migrations.AddField(model_name='profile', name='body_type',
field=models.CharField(blank=True, choices=[('Fair', 'Fair'), (
'Black', 'Black'), ('Brown', 'Brown')], max_length=20, null=True)),
migrations.AddField(model_name='profile', name='caste', field=
models.CharField(blank=True, max_length=20, null=True)), migrations
.AddField(model_name='profile', name='education', field=models.
CharField(blank=True, max_length=100, null=True)), migrations.
AddField(model_name='profile', name='education_detail', field=
models.CharField(blank=True, max_length=100, null=True)),
migrations.AddField(model_name='profile', name='height', field=
models.FloatField(blank=True, null=True)), migrations.AddField(
model_name='profile', name='maritial_status', field=models.
CharField(blank=True, choices=[('Single', 'Single'), ('Single',
'Single')], max_length=50, null=True)), migrations.AddField(
model_name='profile', name='mother_tongue', field=models.CharField(
blank=True, choices=[('Assamese', 'Assamese'), ('Bengali',
'Bengali'), ('Bodo', 'Bodo'), ('Dogri', 'Dogri'), ('English',
'English'), ('Gujarati', 'Gujarati'), ('Hindi', 'Hindi'), (
'Kannada', 'Kannada'), ('Kashmiri', 'Kashmiri'), ('Konkani',
'Konkani'), ('Maithili', 'Maithili'), ('Malayalam', 'Malayalam'), (
'Marathi', 'Marathi'), ('Meitei (Manipuri)', 'Meitei (Manipuri)'),
('Nepali', 'Nepali'), ('Odia', 'Odia'), ('Punjabi', 'Punjabi'), (
'Sanskrit', 'Sanskrit'), ('Santali', 'Santali')], max_length=30,
null=True)), migrations.AddField(model_name='profile', name=
'navaras', field=models.CharField(blank=True, max_length=50, null=
True)), migrations.AddField(model_name='profile', name='occupation',
field=models.CharField(blank=True, max_length=200, null=True)),
migrations.AddField(model_name='profile', name='religion', field=
models.CharField(blank=True, choices=[('Hinduism', 'Hinduism'), (
'Islam', 'Islam'), ('Christianity', 'Christianity'), ('Sikhism',
'Sikhism'), ('Buddhism', 'Buddhism'), ('Jainism', 'Jainism'), (
'Zoroastrianism', 'Zoroastrianism')], max_length=30, null=True)),
migrations.AddField(model_name='profile', name='sub_caste', field=
models.CharField(blank=True, max_length=20, null=True)), migrations
.AddField(model_name='profile', name='weight', field=models.
PositiveSmallIntegerField(blank=True, null=True)), migrations.
AlterField(model_name='profile', name='age', field=models.
PositiveSmallIntegerField(blank=True, null=True))]
<|reserved_special_token_1|>
# Generated by Django 3.0.2 on 2020-08-27 16:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('info', '0010_auto_20200808_2117'),
]
operations = [
migrations.AddField(
model_name='profile',
name='annual_income',
field=models.CharField(blank=True, choices=[('100000', '<100000'), ('100000-300000', '100000-300000'), ('300000-600000', '300000-600000'), ('600000-1000000', '600000-1000000'), ('1000000-1500000', '1000000-1500000'), ('1500000-2000000', '1500000-2000000'), ('>2000000', '>2000000')], max_length=20, null=True),
),
migrations.AddField(
model_name='profile',
name='birthdate',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='profile',
name='birthplace',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='profile',
name='blood_group',
field=models.CharField(blank=True, choices=[('-A', '-A'), ('B', 'B'), ('AB', 'AB'), ('O', 'O')], max_length=10, null=True),
),
migrations.AddField(
model_name='profile',
name='body_type',
field=models.CharField(blank=True, choices=[('Fair', 'Fair'), ('Black', 'Black'), ('Brown', 'Brown')], max_length=20, null=True),
),
migrations.AddField(
model_name='profile',
name='caste',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AddField(
model_name='profile',
name='education',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='profile',
name='education_detail',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='profile',
name='height',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profile',
name='maritial_status',
field=models.CharField(blank=True, choices=[('Single', 'Single'), ('Single', 'Single')], max_length=50, null=True),
),
migrations.AddField(
model_name='profile',
name='mother_tongue',
field=models.CharField(blank=True, choices=[('Assamese', 'Assamese'), ('Bengali', 'Bengali'), ('Bodo', 'Bodo'), ('Dogri', 'Dogri'), ('English', 'English'), ('Gujarati', 'Gujarati'), ('Hindi', 'Hindi'), ('Kannada', 'Kannada'), ('Kashmiri', 'Kashmiri'), ('Konkani', 'Konkani'), ('Maithili', 'Maithili'), ('Malayalam', 'Malayalam'), ('Marathi', 'Marathi'), ('Meitei (Manipuri)', 'Meitei (Manipuri)'), ('Nepali', 'Nepali'), ('Odia', 'Odia'), ('Punjabi', 'Punjabi'), ('Sanskrit', 'Sanskrit'), ('Santali', 'Santali')], max_length=30, null=True),
),
migrations.AddField(
model_name='profile',
name='navaras',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='profile',
name='occupation',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='profile',
name='religion',
field=models.CharField(blank=True, choices=[('Hinduism', 'Hinduism'), ('Islam', 'Islam'), ('Christianity', 'Christianity'), ('Sikhism', 'Sikhism'), ('Buddhism', 'Buddhism'), ('Jainism', 'Jainism'), ('Zoroastrianism', 'Zoroastrianism')], max_length=30, null=True),
),
migrations.AddField(
model_name='profile',
name='sub_caste',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AddField(
model_name='profile',
name='weight',
field=models.PositiveSmallIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='profile',
name='age',
field=models.PositiveSmallIntegerField(blank=True, null=True),
),
]
|
flexible
|
{
"blob_id": "45b2b611a80b93c9a7d8ec8a09e5838147e1ea76",
"index": 8626,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('info', '0010_auto_20200808_2117')]\n operations = [migrations.AddField(model_name='profile', name=\n 'annual_income', field=models.CharField(blank=True, choices=[(\n '100000', '<100000'), ('100000-300000', '100000-300000'), (\n '300000-600000', '300000-600000'), ('600000-1000000',\n '600000-1000000'), ('1000000-1500000', '1000000-1500000'), (\n '1500000-2000000', '1500000-2000000'), ('>2000000', '>2000000')],\n max_length=20, null=True)), migrations.AddField(model_name=\n 'profile', name='birthdate', field=models.DateTimeField(blank=True,\n null=True)), migrations.AddField(model_name='profile', name=\n 'birthplace', field=models.CharField(blank=True, max_length=50,\n null=True)), migrations.AddField(model_name='profile', name=\n 'blood_group', field=models.CharField(blank=True, choices=[('-A',\n '-A'), ('B', 'B'), ('AB', 'AB'), ('O', 'O')], max_length=10, null=\n True)), migrations.AddField(model_name='profile', name='body_type',\n field=models.CharField(blank=True, choices=[('Fair', 'Fair'), (\n 'Black', 'Black'), ('Brown', 'Brown')], max_length=20, null=True)),\n migrations.AddField(model_name='profile', name='caste', field=\n models.CharField(blank=True, max_length=20, null=True)), migrations\n .AddField(model_name='profile', name='education', field=models.\n CharField(blank=True, max_length=100, null=True)), migrations.\n AddField(model_name='profile', name='education_detail', field=\n models.CharField(blank=True, max_length=100, null=True)),\n migrations.AddField(model_name='profile', name='height', field=\n models.FloatField(blank=True, null=True)), migrations.AddField(\n model_name='profile', name='maritial_status', field=models.\n CharField(blank=True, choices=[('Single', 'Single'), ('Single',\n 'Single')], max_length=50, null=True)), migrations.AddField(\n model_name='profile', name='mother_tongue', field=models.CharField(\n blank=True, choices=[('Assamese', 'Assamese'), ('Bengali',\n 'Bengali'), ('Bodo', 'Bodo'), ('Dogri', 'Dogri'), ('English',\n 'English'), ('Gujarati', 'Gujarati'), ('Hindi', 'Hindi'), (\n 'Kannada', 'Kannada'), ('Kashmiri', 'Kashmiri'), ('Konkani',\n 'Konkani'), ('Maithili', 'Maithili'), ('Malayalam', 'Malayalam'), (\n 'Marathi', 'Marathi'), ('Meitei (Manipuri)', 'Meitei (Manipuri)'),\n ('Nepali', 'Nepali'), ('Odia', 'Odia'), ('Punjabi', 'Punjabi'), (\n 'Sanskrit', 'Sanskrit'), ('Santali', 'Santali')], max_length=30,\n null=True)), migrations.AddField(model_name='profile', name=\n 'navaras', field=models.CharField(blank=True, max_length=50, null=\n True)), migrations.AddField(model_name='profile', name='occupation',\n field=models.CharField(blank=True, max_length=200, null=True)),\n migrations.AddField(model_name='profile', name='religion', field=\n models.CharField(blank=True, choices=[('Hinduism', 'Hinduism'), (\n 'Islam', 'Islam'), ('Christianity', 'Christianity'), ('Sikhism',\n 'Sikhism'), ('Buddhism', 'Buddhism'), ('Jainism', 'Jainism'), (\n 'Zoroastrianism', 'Zoroastrianism')], max_length=30, null=True)),\n migrations.AddField(model_name='profile', name='sub_caste', field=\n models.CharField(blank=True, max_length=20, null=True)), migrations\n .AddField(model_name='profile', name='weight', field=models.\n PositiveSmallIntegerField(blank=True, null=True)), migrations.\n AlterField(model_name='profile', name='age', field=models.\n PositiveSmallIntegerField(blank=True, null=True))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('info', '0010_auto_20200808_2117')]\n operations = [migrations.AddField(model_name='profile', name=\n 'annual_income', field=models.CharField(blank=True, choices=[(\n '100000', '<100000'), ('100000-300000', '100000-300000'), (\n '300000-600000', '300000-600000'), ('600000-1000000',\n '600000-1000000'), ('1000000-1500000', '1000000-1500000'), (\n '1500000-2000000', '1500000-2000000'), ('>2000000', '>2000000')],\n max_length=20, null=True)), migrations.AddField(model_name=\n 'profile', name='birthdate', field=models.DateTimeField(blank=True,\n null=True)), migrations.AddField(model_name='profile', name=\n 'birthplace', field=models.CharField(blank=True, max_length=50,\n null=True)), migrations.AddField(model_name='profile', name=\n 'blood_group', field=models.CharField(blank=True, choices=[('-A',\n '-A'), ('B', 'B'), ('AB', 'AB'), ('O', 'O')], max_length=10, null=\n True)), migrations.AddField(model_name='profile', name='body_type',\n field=models.CharField(blank=True, choices=[('Fair', 'Fair'), (\n 'Black', 'Black'), ('Brown', 'Brown')], max_length=20, null=True)),\n migrations.AddField(model_name='profile', name='caste', field=\n models.CharField(blank=True, max_length=20, null=True)), migrations\n .AddField(model_name='profile', name='education', field=models.\n CharField(blank=True, max_length=100, null=True)), migrations.\n AddField(model_name='profile', name='education_detail', field=\n models.CharField(blank=True, max_length=100, null=True)),\n migrations.AddField(model_name='profile', name='height', field=\n models.FloatField(blank=True, null=True)), migrations.AddField(\n model_name='profile', name='maritial_status', field=models.\n CharField(blank=True, choices=[('Single', 'Single'), ('Single',\n 'Single')], max_length=50, null=True)), migrations.AddField(\n model_name='profile', name='mother_tongue', field=models.CharField(\n blank=True, choices=[('Assamese', 'Assamese'), ('Bengali',\n 'Bengali'), ('Bodo', 'Bodo'), ('Dogri', 'Dogri'), ('English',\n 'English'), ('Gujarati', 'Gujarati'), ('Hindi', 'Hindi'), (\n 'Kannada', 'Kannada'), ('Kashmiri', 'Kashmiri'), ('Konkani',\n 'Konkani'), ('Maithili', 'Maithili'), ('Malayalam', 'Malayalam'), (\n 'Marathi', 'Marathi'), ('Meitei (Manipuri)', 'Meitei (Manipuri)'),\n ('Nepali', 'Nepali'), ('Odia', 'Odia'), ('Punjabi', 'Punjabi'), (\n 'Sanskrit', 'Sanskrit'), ('Santali', 'Santali')], max_length=30,\n null=True)), migrations.AddField(model_name='profile', name=\n 'navaras', field=models.CharField(blank=True, max_length=50, null=\n True)), migrations.AddField(model_name='profile', name='occupation',\n field=models.CharField(blank=True, max_length=200, null=True)),\n migrations.AddField(model_name='profile', name='religion', field=\n models.CharField(blank=True, choices=[('Hinduism', 'Hinduism'), (\n 'Islam', 'Islam'), ('Christianity', 'Christianity'), ('Sikhism',\n 'Sikhism'), ('Buddhism', 'Buddhism'), ('Jainism', 'Jainism'), (\n 'Zoroastrianism', 'Zoroastrianism')], max_length=30, null=True)),\n migrations.AddField(model_name='profile', name='sub_caste', field=\n models.CharField(blank=True, max_length=20, null=True)), migrations\n .AddField(model_name='profile', name='weight', field=models.\n PositiveSmallIntegerField(blank=True, null=True)), migrations.\n AlterField(model_name='profile', name='age', field=models.\n PositiveSmallIntegerField(blank=True, null=True))]\n",
"step-5": "# Generated by Django 3.0.2 on 2020-08-27 16:03\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('info', '0010_auto_20200808_2117'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='profile',\n name='annual_income',\n field=models.CharField(blank=True, choices=[('100000', '<100000'), ('100000-300000', '100000-300000'), ('300000-600000', '300000-600000'), ('600000-1000000', '600000-1000000'), ('1000000-1500000', '1000000-1500000'), ('1500000-2000000', '1500000-2000000'), ('>2000000', '>2000000')], max_length=20, null=True),\n ),\n migrations.AddField(\n model_name='profile',\n name='birthdate',\n field=models.DateTimeField(blank=True, null=True),\n ),\n migrations.AddField(\n model_name='profile',\n name='birthplace',\n field=models.CharField(blank=True, max_length=50, null=True),\n ),\n migrations.AddField(\n model_name='profile',\n name='blood_group',\n field=models.CharField(blank=True, choices=[('-A', '-A'), ('B', 'B'), ('AB', 'AB'), ('O', 'O')], max_length=10, null=True),\n ),\n migrations.AddField(\n model_name='profile',\n name='body_type',\n field=models.CharField(blank=True, choices=[('Fair', 'Fair'), ('Black', 'Black'), ('Brown', 'Brown')], max_length=20, null=True),\n ),\n migrations.AddField(\n model_name='profile',\n name='caste',\n field=models.CharField(blank=True, max_length=20, null=True),\n ),\n migrations.AddField(\n model_name='profile',\n name='education',\n field=models.CharField(blank=True, max_length=100, null=True),\n ),\n migrations.AddField(\n model_name='profile',\n name='education_detail',\n field=models.CharField(blank=True, max_length=100, null=True),\n ),\n migrations.AddField(\n model_name='profile',\n name='height',\n field=models.FloatField(blank=True, null=True),\n ),\n migrations.AddField(\n model_name='profile',\n name='maritial_status',\n field=models.CharField(blank=True, choices=[('Single', 'Single'), ('Single', 'Single')], max_length=50, null=True),\n ),\n migrations.AddField(\n model_name='profile',\n name='mother_tongue',\n field=models.CharField(blank=True, choices=[('Assamese', 'Assamese'), ('Bengali', 'Bengali'), ('Bodo', 'Bodo'), ('Dogri', 'Dogri'), ('English', 'English'), ('Gujarati', 'Gujarati'), ('Hindi', 'Hindi'), ('Kannada', 'Kannada'), ('Kashmiri', 'Kashmiri'), ('Konkani', 'Konkani'), ('Maithili', 'Maithili'), ('Malayalam', 'Malayalam'), ('Marathi', 'Marathi'), ('Meitei (Manipuri)', 'Meitei (Manipuri)'), ('Nepali', 'Nepali'), ('Odia', 'Odia'), ('Punjabi', 'Punjabi'), ('Sanskrit', 'Sanskrit'), ('Santali', 'Santali')], max_length=30, null=True),\n ),\n migrations.AddField(\n model_name='profile',\n name='navaras',\n field=models.CharField(blank=True, max_length=50, null=True),\n ),\n migrations.AddField(\n model_name='profile',\n name='occupation',\n field=models.CharField(blank=True, max_length=200, null=True),\n ),\n migrations.AddField(\n model_name='profile',\n name='religion',\n field=models.CharField(blank=True, choices=[('Hinduism', 'Hinduism'), ('Islam', 'Islam'), ('Christianity', 'Christianity'), ('Sikhism', 'Sikhism'), ('Buddhism', 'Buddhism'), ('Jainism', 'Jainism'), ('Zoroastrianism', 'Zoroastrianism')], max_length=30, null=True),\n ),\n migrations.AddField(\n model_name='profile',\n name='sub_caste',\n field=models.CharField(blank=True, max_length=20, null=True),\n ),\n migrations.AddField(\n model_name='profile',\n name='weight',\n field=models.PositiveSmallIntegerField(blank=True, null=True),\n ),\n migrations.AlterField(\n model_name='profile',\n name='age',\n field=models.PositiveSmallIntegerField(blank=True, null=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if silva == True:
print('Existe Silva nesse nome')
else:
print('Não há Silva nesse nome')
<|reserved_special_token_1|>
nome = str(input('Digite um nome completo: ')).lower()
silva = 'silva' in nome
if silva == True:
print('Existe Silva nesse nome')
else:
print('Não há Silva nesse nome')
|
flexible
|
{
"blob_id": "faebefcadbc184fab29deb2988089223a8f09e7e",
"index": 8219,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif silva == True:\n print('Existe Silva nesse nome')\nelse:\n print('Não há Silva nesse nome')\n",
"step-3": "nome = str(input('Digite um nome completo: ')).lower()\nsilva = 'silva' in nome\nif silva == True:\n print('Existe Silva nesse nome')\nelse:\n print('Não há Silva nesse nome')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def delete_devices():
"""."""
db = tango.Database()
class_list = db.get_class_list('*')
print('class list = ', class_list)
server_list = db.get_server_list('*')
print('server list = ', server_list)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def delete_devices():
"""."""
db = tango.Database()
class_list = db.get_class_list('*')
print('class list = ', class_list)
server_list = db.get_server_list('*')
print('server list = ', server_list)
def delete_server():
"""."""
db = tango.Database()
db.delete_server('')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def delete_devices():
"""."""
db = tango.Database()
class_list = db.get_class_list('*')
print('class list = ', class_list)
server_list = db.get_server_list('*')
print('server list = ', server_list)
def delete_server():
"""."""
db = tango.Database()
db.delete_server('')
if __name__ == '__main__':
delete_devices()
<|reserved_special_token_1|>
import argparse
import logging
import tango
def delete_devices():
"""."""
db = tango.Database()
class_list = db.get_class_list('*')
print('class list = ', class_list)
server_list = db.get_server_list('*')
print('server list = ', server_list)
def delete_server():
"""."""
db = tango.Database()
db.delete_server('')
if __name__ == '__main__':
delete_devices()
<|reserved_special_token_1|>
#!/usr/bin/env python3
import argparse
import logging
import tango
def delete_devices():
"""."""
db = tango.Database()
class_list = db.get_class_list('*')
print('class list = ', class_list)
server_list = db.get_server_list('*')
print('server list = ', server_list)
# for index in range(num_devices):
# name = 'low_sdp/elt/test_device_{:05d}'.format(index)
# db.delete_server('TestDevice/test1')
# db.delete_device('tango/test1/000')
def delete_server():
"""."""
db = tango.Database()
db.delete_server('')
if __name__ == '__main__':
delete_devices()
|
flexible
|
{
"blob_id": "f3dad6a474d5882beaac7d98f8f60c347730ee55",
"index": 8428,
"step-1": "<mask token>\n\n\ndef delete_devices():\n \"\"\".\"\"\"\n db = tango.Database()\n class_list = db.get_class_list('*')\n print('class list = ', class_list)\n server_list = db.get_server_list('*')\n print('server list = ', server_list)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef delete_devices():\n \"\"\".\"\"\"\n db = tango.Database()\n class_list = db.get_class_list('*')\n print('class list = ', class_list)\n server_list = db.get_server_list('*')\n print('server list = ', server_list)\n\n\ndef delete_server():\n \"\"\".\"\"\"\n db = tango.Database()\n db.delete_server('')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef delete_devices():\n \"\"\".\"\"\"\n db = tango.Database()\n class_list = db.get_class_list('*')\n print('class list = ', class_list)\n server_list = db.get_server_list('*')\n print('server list = ', server_list)\n\n\ndef delete_server():\n \"\"\".\"\"\"\n db = tango.Database()\n db.delete_server('')\n\n\nif __name__ == '__main__':\n delete_devices()\n",
"step-4": "import argparse\nimport logging\nimport tango\n\n\ndef delete_devices():\n \"\"\".\"\"\"\n db = tango.Database()\n class_list = db.get_class_list('*')\n print('class list = ', class_list)\n server_list = db.get_server_list('*')\n print('server list = ', server_list)\n\n\ndef delete_server():\n \"\"\".\"\"\"\n db = tango.Database()\n db.delete_server('')\n\n\nif __name__ == '__main__':\n delete_devices()\n",
"step-5": "#!/usr/bin/env python3\nimport argparse\nimport logging\n\nimport tango\n\n\ndef delete_devices():\n \"\"\".\"\"\"\n db = tango.Database()\n class_list = db.get_class_list('*')\n print('class list = ', class_list)\n server_list = db.get_server_list('*')\n print('server list = ', server_list)\n\n # for index in range(num_devices):\n # name = 'low_sdp/elt/test_device_{:05d}'.format(index)\n\n # db.delete_server('TestDevice/test1')\n # db.delete_device('tango/test1/000')\n\n\ndef delete_server():\n \"\"\".\"\"\"\n db = tango.Database()\n db.delete_server('')\n\n\nif __name__ == '__main__':\n delete_devices()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def process_mile(price, use_time, mile):
"""
mile处理
"""
mile_per_month = mile / use_time
if mile_per_month < gl.MILE_THRESHOLD_2_5:
return price + 0.035 * (1 - mile_per_month / gl.MILE_THRESHOLD_2_5
) * price
elif gl.MILE_THRESHOLD_2_5 <= mile_per_month < gl.MILE_THRESHOLD_5:
return price - (0.04 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.035
) * price
elif gl.MILE_THRESHOLD_5 <= mile_per_month < gl.MILE_THRESHOLD_10:
return price - (0.05 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.075
) * price
else:
return price - 0.125 * price
<|reserved_special_token_0|>
def process_prices_relate(dealer_price, cpersonal_price):
"""
人工处理三类价格的相关性
"""
buy = dealer_price
private = cpersonal_price
private_buy_rate = (buy - private) / private
if (private_buy_rate < 0) | (abs(private_buy_rate) > 0.12):
private = int(buy * (1 - 0.0875))
sell = int(private * (1 - 0.0525))
return buy, private, sell
<|reserved_special_token_0|>
def check_params_value(city, model_detail_slug, use_time, mile, category):
"""
校验参数
"""
if city not in cities:
raise ApiParamsValueError('city', city, 'Unknown city!')
if model_detail_slug not in models:
raise ApiParamsValueError('model_detail_slug', model_detail_slug,
'Unknown model!')
if not isinstance(mile, int) | isinstance(mile, float):
raise ApiParamsTypeError('mile', mile, 'Mile must be int or float!')
elif mile < 0:
raise ApiParamsValueError('mile', mile,
'Mile must be greater than zero!')
if not isinstance(use_time, int):
raise ApiParamsTypeError('use_time', use_time, 'Use_time must be int!')
if category == 'valuate':
if (use_time < 1) | (use_time > 240):
raise ApiParamsValueError('use_time', use_time,
'The use_time of Forecast must be in 1-240!')
elif category == 'history':
if (use_time < 1) | (use_time > 240):
raise ApiParamsValueError('use_time', use_time,
'The use_time of historical trend must be in 1-240!')
elif category == 'future':
if (use_time < 1) | (use_time > 240):
raise ApiParamsValueError('use_time', use_time,
'The use_time of future trend must be in 1-240!')
class Predict(object):
def __init__(self):
"""
加载各类匹配表和模型
"""
self.result = []
self.valuate_model = []
def add_process_intent(self, buy, private, sell, popularity, price_bn):
"""
根据交易方式修正预测值
"""
self.result = result_map.copy()
self.result.loc[self.result['intent'] == 'buy', 'predict_price'] = buy
self.result.loc[self.result['intent'] == 'private', 'predict_price'
] = private
self.result.loc[self.result['intent'] == 'sell', 'predict_price'
] = sell
self.result['predict_price'] = self.result['predict_price'].fillna(buy)
self.result['popularity'] = popularity
self.result['profit_rate'] = self.result.apply(process_profit_rate,
axis=1)
self.result['buy_profit_rate'] = self.result.apply(
process_buy_profit_rate, axis=1)
self.result['predict_price'] = self.result['predict_price'
] / self.result['buy_profit_rate']
self.result['predict_price'] = self.result['profit_rate'
] * self.result['predict_price']
self.result = cal_intent_condition(self.result.predict_price.values,
price_bn)
def follow_process(self, use_time, mile, price_bn, dealer_hedge,
cpersonal_hedge, province, model_slug, model_detail_slug):
"""
后续跟进处理
"""
dealer_price, cpersonal_price = (dealer_hedge * price_bn,
cpersonal_hedge * price_bn)
dealer_price = process_mile(dealer_price, use_time, mile)
cpersonal_price = process_mile(cpersonal_price, use_time, mile)
buy, private, sell = process_prices_relate(dealer_price,
cpersonal_price)
index = str(model_slug) + '_' + str(province)
if index in province_popularity_index:
popularity = province_popularity_map.loc[index, 'popularity']
else:
popularity = 'C'
rate = process_adjust_profit(model_detail_slug, popularity)
buy, private, sell = buy * (1 + rate), private * (1 + rate), sell * (
1 + rate)
return buy, private, sell, popularity
def predict(self, city='深圳', model_detail_slug='model_25023_cs',
use_time=12, mile=2, ret_type='records'):
"""
预测返回
"""
check_params_value(city, model_detail_slug, use_time, mile,
category='valuate')
price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']
price_bn = price_bn * 10000
province = province_city_map.loc[city, 'province']
model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']
final_model_detail_slug = model_detail_map.loc[model_detail_slug,
'final_model_detail_slug']
dealer_hedge, cpersonal_hedge = predict_from_db(final_model_detail_slug
, city, use_time)
buy, private, sell, popularity = self.follow_process(use_time, mile,
price_bn, dealer_hedge, cpersonal_hedge, province, model_slug,
model_detail_slug)
self.add_process_intent(buy, private, sell, popularity, price_bn)
if ret_type == 'records':
return self.result.to_dict('records')
else:
return self.result
def predict_for_history(self, city='深圳', model_detail_slug=
'model_25023_cs', use_time=12, mile=2):
"""
预测历史数据返回
"""
check_params_value(city, model_detail_slug, use_time, mile,
category='valuate')
price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']
price_bn = price_bn * 10000
province = province_city_map.loc[city, 'province']
model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']
final_model_detail_slug = model_detail_map.loc[model_detail_slug,
'final_model_detail_slug']
data_buy = []
data_sell = []
data_private = []
result = predict_from_db_history(final_model_detail_slug, city,
use_time)
for dealer_hedge, cpersonal_hedge, use_time_per in result:
buy, private, sell, popularity = self.follow_process(use_time_per,
mile, price_bn, dealer_hedge, cpersonal_hedge, province,
model_slug, model_detail_slug)
data_buy.append(int(buy))
data_private.append(int(private))
data_sell.append(int(sell))
return data_buy, data_private, data_sell
def predict_for_future(self, city='深圳', model_detail_slug=
'model_25023_cs', use_time=12, mile=2, times=3):
"""
预测历史数据返回
"""
check_params_value(city, model_detail_slug, use_time, mile,
category='valuate')
price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']
price_bn = price_bn * 10000
province = province_city_map.loc[city, 'province']
model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']
final_model_detail_slug = model_detail_map.loc[model_detail_slug,
'final_model_detail_slug']
data_buy = []
data_sell = []
data_private = []
result = predict_from_db_future(final_model_detail_slug, city,
use_time, times)
for dealer_hedge, cpersonal_hedge, use_time_per in result:
buy, private, sell, popularity = self.follow_process(use_time_per,
mile, price_bn, dealer_hedge, cpersonal_hedge, province,
model_slug, model_detail_slug)
data_buy.append(int(buy))
data_private.append(int(private))
data_sell.append(int(sell))
return data_buy, data_private, data_sell
def history_price_trend(self, city='深圳', model_detail_slug=
'model_25023_cs', use_time=12, mile=2, ret_type='records'):
"""
计算历史价格趋势
"""
check_params_value(city, model_detail_slug, use_time, mile,
category='history')
times_str = ['0', '-1', '-2', '-3', '-4', '-5', '-6']
nums = 6
if use_time <= 6:
times_str = []
nums = use_time - 1
for i in range(0, nums + 1):
times_str.append(str(-i))
data_buy, data_private, data_sell = self.predict_for_history(city,
model_detail_slug, use_time, mile)
data_buy = process_unreasonable_history_price(data_buy, nums)
data_sell = process_unreasonable_history_price(data_sell, nums)
data_private = process_unreasonable_history_price(data_private, nums)
result_b_2_c = pd.DataFrame([data_buy], columns=times_str)
result_b_2_c['type'] = 'buy'
result_c_2_b = pd.DataFrame([data_sell], columns=times_str)
result_c_2_b['type'] = 'sell'
result_c_2_c = pd.DataFrame([data_private], columns=times_str)
result_c_2_c['type'] = 'private'
result = result_b_2_c.append(result_c_2_b, ignore_index=True)
result = result.append(result_c_2_c, ignore_index=True)
if ret_type == 'records':
return result.to_dict('records')
else:
return result
def future_price_trend(self, city='深圳', model_detail_slug=
'model_25023_cs', use_time=365, mile=2, ret_type='records'):
"""
计算未来价格趋势
"""
check_params_value(city, model_detail_slug, use_time, mile,
category='future')
times_str = ['0', '12', '24', '36']
nums = 3
if use_time > 204:
times_str = []
nums = int((240 - use_time) / 12)
for i in range(0, nums + 1):
times_str.append(str(i * 12))
data_buy, data_private, data_sell = self.predict_for_future(city,
model_detail_slug, use_time, mile, len(times_str))
data_buy = process_unreasonable_future_price(data_buy, nums)
data_sell = process_unreasonable_future_price(data_sell, nums)
data_private = process_unreasonable_future_price(data_private, nums)
result_b_2_c = pd.DataFrame([data_buy], columns=times_str)
result_b_2_c['type'] = 'buy'
result_c_2_b = pd.DataFrame([data_sell], columns=times_str)
result_c_2_b['type'] = 'sell'
result_c_2_c = pd.DataFrame([data_private], columns=times_str)
result_c_2_c['type'] = 'private'
result = result_b_2_c.append(result_c_2_b, ignore_index=True)
result = result.append(result_c_2_c, ignore_index=True)
if ret_type == 'records':
return result.to_dict('records')
else:
return result
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def process_mile(price, use_time, mile):
"""
mile处理
"""
mile_per_month = mile / use_time
if mile_per_month < gl.MILE_THRESHOLD_2_5:
return price + 0.035 * (1 - mile_per_month / gl.MILE_THRESHOLD_2_5
) * price
elif gl.MILE_THRESHOLD_2_5 <= mile_per_month < gl.MILE_THRESHOLD_5:
return price - (0.04 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.035
) * price
elif gl.MILE_THRESHOLD_5 <= mile_per_month < gl.MILE_THRESHOLD_10:
return price - (0.05 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.075
) * price
else:
return price - 0.125 * price
<|reserved_special_token_0|>
def process_unreasonable_history_price(data, nums):
"""
处理不合理历史价格趋势
"""
if nums == 0:
return data
temp = data[1:]
temp.sort()
for i, value in enumerate(temp):
data[i + 1] = temp[i]
for i in range(0, nums):
rate = (data[i + 1] - data[i]) / data[i + 1]
if (data[i] >= data[i + 1]) | (0.003 > rate) | (0.0157 < rate):
data[i + 1] = int(data[i] * 1.0083)
return data
<|reserved_special_token_0|>
def process_fill_zero(hedge):
temp = hedge
if len(hedge) < 18:
for i in range(0, 18 - len(hedge)):
temp = '0' + temp
return temp
<|reserved_special_token_0|>
def predict_from_db_history(model_detail_slug, city, use_time):
"""
从生产库查询预测
"""
city_id = province_city_map.loc[city, 'city_id']
model_detail_slug_id = model_detail_map.loc[model_detail_slug,
'final_model_detail_slug_id']
if use_time % 6 == 0:
column_num = int(use_time / 6) - 1
month_num = 6
else:
column_num = int(use_time / 6)
month_num = use_time % 6
dealer_hedge, cpersonal_hedge = db_operate.query_valuate_history(
model_detail_slug_id, city_id, column_num, use_time)
result = []
if len(dealer_hedge) == 1:
dealer_hedge = process_fill_zero(dealer_hedge[0])
cpersonal_hedge = process_fill_zero(cpersonal_hedge[0])
for i in range(0, use_time):
dealer_per = dealer_hedge[i * 3:(i + 1) * 3]
cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]
result.append([int(dealer_per) / 1000, int(cpersonal_per) /
1000, use_time])
result.reverse()
elif len(dealer_hedge) == 2:
dealer_hedge = process_fill_zero(dealer_hedge[0]) + process_fill_zero(
dealer_hedge[1])
cpersonal_hedge = process_fill_zero(cpersonal_hedge[0]
) + process_fill_zero(cpersonal_hedge[1])
for i in range(month_num - 1, month_num + 6):
dealer_per = dealer_hedge[i * 3:(i + 1) * 3]
cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]
result.append([int(dealer_per) / 1000, int(cpersonal_per) /
1000, use_time])
result.reverse()
return result
<|reserved_special_token_0|>
def process_prices_relate(dealer_price, cpersonal_price):
"""
人工处理三类价格的相关性
"""
buy = dealer_price
private = cpersonal_price
private_buy_rate = (buy - private) / private
if (private_buy_rate < 0) | (abs(private_buy_rate) > 0.12):
private = int(buy * (1 - 0.0875))
sell = int(private * (1 - 0.0525))
return buy, private, sell
<|reserved_special_token_0|>
def check_params_value(city, model_detail_slug, use_time, mile, category):
"""
校验参数
"""
if city not in cities:
raise ApiParamsValueError('city', city, 'Unknown city!')
if model_detail_slug not in models:
raise ApiParamsValueError('model_detail_slug', model_detail_slug,
'Unknown model!')
if not isinstance(mile, int) | isinstance(mile, float):
raise ApiParamsTypeError('mile', mile, 'Mile must be int or float!')
elif mile < 0:
raise ApiParamsValueError('mile', mile,
'Mile must be greater than zero!')
if not isinstance(use_time, int):
raise ApiParamsTypeError('use_time', use_time, 'Use_time must be int!')
if category == 'valuate':
if (use_time < 1) | (use_time > 240):
raise ApiParamsValueError('use_time', use_time,
'The use_time of Forecast must be in 1-240!')
elif category == 'history':
if (use_time < 1) | (use_time > 240):
raise ApiParamsValueError('use_time', use_time,
'The use_time of historical trend must be in 1-240!')
elif category == 'future':
if (use_time < 1) | (use_time > 240):
raise ApiParamsValueError('use_time', use_time,
'The use_time of future trend must be in 1-240!')
class Predict(object):
def __init__(self):
"""
加载各类匹配表和模型
"""
self.result = []
self.valuate_model = []
def add_process_intent(self, buy, private, sell, popularity, price_bn):
"""
根据交易方式修正预测值
"""
self.result = result_map.copy()
self.result.loc[self.result['intent'] == 'buy', 'predict_price'] = buy
self.result.loc[self.result['intent'] == 'private', 'predict_price'
] = private
self.result.loc[self.result['intent'] == 'sell', 'predict_price'
] = sell
self.result['predict_price'] = self.result['predict_price'].fillna(buy)
self.result['popularity'] = popularity
self.result['profit_rate'] = self.result.apply(process_profit_rate,
axis=1)
self.result['buy_profit_rate'] = self.result.apply(
process_buy_profit_rate, axis=1)
self.result['predict_price'] = self.result['predict_price'
] / self.result['buy_profit_rate']
self.result['predict_price'] = self.result['profit_rate'
] * self.result['predict_price']
self.result = cal_intent_condition(self.result.predict_price.values,
price_bn)
def follow_process(self, use_time, mile, price_bn, dealer_hedge,
cpersonal_hedge, province, model_slug, model_detail_slug):
"""
后续跟进处理
"""
dealer_price, cpersonal_price = (dealer_hedge * price_bn,
cpersonal_hedge * price_bn)
dealer_price = process_mile(dealer_price, use_time, mile)
cpersonal_price = process_mile(cpersonal_price, use_time, mile)
buy, private, sell = process_prices_relate(dealer_price,
cpersonal_price)
index = str(model_slug) + '_' + str(province)
if index in province_popularity_index:
popularity = province_popularity_map.loc[index, 'popularity']
else:
popularity = 'C'
rate = process_adjust_profit(model_detail_slug, popularity)
buy, private, sell = buy * (1 + rate), private * (1 + rate), sell * (
1 + rate)
return buy, private, sell, popularity
def predict(self, city='深圳', model_detail_slug='model_25023_cs',
use_time=12, mile=2, ret_type='records'):
"""
预测返回
"""
check_params_value(city, model_detail_slug, use_time, mile,
category='valuate')
price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']
price_bn = price_bn * 10000
province = province_city_map.loc[city, 'province']
model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']
final_model_detail_slug = model_detail_map.loc[model_detail_slug,
'final_model_detail_slug']
dealer_hedge, cpersonal_hedge = predict_from_db(final_model_detail_slug
, city, use_time)
buy, private, sell, popularity = self.follow_process(use_time, mile,
price_bn, dealer_hedge, cpersonal_hedge, province, model_slug,
model_detail_slug)
self.add_process_intent(buy, private, sell, popularity, price_bn)
if ret_type == 'records':
return self.result.to_dict('records')
else:
return self.result
def predict_for_history(self, city='深圳', model_detail_slug=
'model_25023_cs', use_time=12, mile=2):
"""
预测历史数据返回
"""
check_params_value(city, model_detail_slug, use_time, mile,
category='valuate')
price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']
price_bn = price_bn * 10000
province = province_city_map.loc[city, 'province']
model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']
final_model_detail_slug = model_detail_map.loc[model_detail_slug,
'final_model_detail_slug']
data_buy = []
data_sell = []
data_private = []
result = predict_from_db_history(final_model_detail_slug, city,
use_time)
for dealer_hedge, cpersonal_hedge, use_time_per in result:
buy, private, sell, popularity = self.follow_process(use_time_per,
mile, price_bn, dealer_hedge, cpersonal_hedge, province,
model_slug, model_detail_slug)
data_buy.append(int(buy))
data_private.append(int(private))
data_sell.append(int(sell))
return data_buy, data_private, data_sell
def predict_for_future(self, city='深圳', model_detail_slug=
'model_25023_cs', use_time=12, mile=2, times=3):
"""
预测历史数据返回
"""
check_params_value(city, model_detail_slug, use_time, mile,
category='valuate')
price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']
price_bn = price_bn * 10000
province = province_city_map.loc[city, 'province']
model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']
final_model_detail_slug = model_detail_map.loc[model_detail_slug,
'final_model_detail_slug']
data_buy = []
data_sell = []
data_private = []
result = predict_from_db_future(final_model_detail_slug, city,
use_time, times)
for dealer_hedge, cpersonal_hedge, use_time_per in result:
buy, private, sell, popularity = self.follow_process(use_time_per,
mile, price_bn, dealer_hedge, cpersonal_hedge, province,
model_slug, model_detail_slug)
data_buy.append(int(buy))
data_private.append(int(private))
data_sell.append(int(sell))
return data_buy, data_private, data_sell
def history_price_trend(self, city='深圳', model_detail_slug=
'model_25023_cs', use_time=12, mile=2, ret_type='records'):
"""
计算历史价格趋势
"""
check_params_value(city, model_detail_slug, use_time, mile,
category='history')
times_str = ['0', '-1', '-2', '-3', '-4', '-5', '-6']
nums = 6
if use_time <= 6:
times_str = []
nums = use_time - 1
for i in range(0, nums + 1):
times_str.append(str(-i))
data_buy, data_private, data_sell = self.predict_for_history(city,
model_detail_slug, use_time, mile)
data_buy = process_unreasonable_history_price(data_buy, nums)
data_sell = process_unreasonable_history_price(data_sell, nums)
data_private = process_unreasonable_history_price(data_private, nums)
result_b_2_c = pd.DataFrame([data_buy], columns=times_str)
result_b_2_c['type'] = 'buy'
result_c_2_b = pd.DataFrame([data_sell], columns=times_str)
result_c_2_b['type'] = 'sell'
result_c_2_c = pd.DataFrame([data_private], columns=times_str)
result_c_2_c['type'] = 'private'
result = result_b_2_c.append(result_c_2_b, ignore_index=True)
result = result.append(result_c_2_c, ignore_index=True)
if ret_type == 'records':
return result.to_dict('records')
else:
return result
def future_price_trend(self, city='深圳', model_detail_slug=
'model_25023_cs', use_time=365, mile=2, ret_type='records'):
"""
计算未来价格趋势
"""
check_params_value(city, model_detail_slug, use_time, mile,
category='future')
times_str = ['0', '12', '24', '36']
nums = 3
if use_time > 204:
times_str = []
nums = int((240 - use_time) / 12)
for i in range(0, nums + 1):
times_str.append(str(i * 12))
data_buy, data_private, data_sell = self.predict_for_future(city,
model_detail_slug, use_time, mile, len(times_str))
data_buy = process_unreasonable_future_price(data_buy, nums)
data_sell = process_unreasonable_future_price(data_sell, nums)
data_private = process_unreasonable_future_price(data_private, nums)
result_b_2_c = pd.DataFrame([data_buy], columns=times_str)
result_b_2_c['type'] = 'buy'
result_c_2_b = pd.DataFrame([data_sell], columns=times_str)
result_c_2_b['type'] = 'sell'
result_c_2_c = pd.DataFrame([data_private], columns=times_str)
result_c_2_c['type'] = 'private'
result = result_b_2_c.append(result_c_2_b, ignore_index=True)
result = result.append(result_c_2_c, ignore_index=True)
if ret_type == 'records':
return result.to_dict('records')
else:
return result
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def process_mile(price, use_time, mile):
"""
mile处理
"""
mile_per_month = mile / use_time
if mile_per_month < gl.MILE_THRESHOLD_2_5:
return price + 0.035 * (1 - mile_per_month / gl.MILE_THRESHOLD_2_5
) * price
elif gl.MILE_THRESHOLD_2_5 <= mile_per_month < gl.MILE_THRESHOLD_5:
return price - (0.04 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.035
) * price
elif gl.MILE_THRESHOLD_5 <= mile_per_month < gl.MILE_THRESHOLD_10:
return price - (0.05 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.075
) * price
else:
return price - 0.125 * price
<|reserved_special_token_0|>
def process_unreasonable_history_price(data, nums):
"""
处理不合理历史价格趋势
"""
if nums == 0:
return data
temp = data[1:]
temp.sort()
for i, value in enumerate(temp):
data[i + 1] = temp[i]
for i in range(0, nums):
rate = (data[i + 1] - data[i]) / data[i + 1]
if (data[i] >= data[i + 1]) | (0.003 > rate) | (0.0157 < rate):
data[i + 1] = int(data[i] * 1.0083)
return data
<|reserved_special_token_0|>
def process_fill_zero(hedge):
temp = hedge
if len(hedge) < 18:
for i in range(0, 18 - len(hedge)):
temp = '0' + temp
return temp
def predict_from_db(model_detail_slug, city, use_time):
"""
从生产库查询预测
"""
city_id = province_city_map.loc[city, 'city_id']
model_detail_slug_id = model_detail_map.loc[model_detail_slug,
'final_model_detail_slug_id']
if use_time % 6 == 0:
column_num = str(int(use_time / 6) - 1)
month_num = 6
else:
column_num = str(int(use_time / 6))
month_num = use_time % 6
record = db_operate.query_valuate(model_detail_slug_id, city_id,
column_num, use_time)
dealer_hedge = str(record.loc[0, 'b2c_year_' + column_num])
dealer_hedge = process_fill_zero(dealer_hedge)
dealer_hedge = dealer_hedge[(month_num - 1) * 3:month_num * 3]
dealer_hedge = int(dealer_hedge) / 1000
cpersonal_hedge = str(record.loc[0, 'c2c_year_' + column_num])
cpersonal_hedge = process_fill_zero(cpersonal_hedge)
cpersonal_hedge = cpersonal_hedge[(month_num - 1) * 3:month_num * 3]
cpersonal_hedge = int(cpersonal_hedge) / 1000
return dealer_hedge, cpersonal_hedge
def predict_from_db_history(model_detail_slug, city, use_time):
"""
从生产库查询预测
"""
city_id = province_city_map.loc[city, 'city_id']
model_detail_slug_id = model_detail_map.loc[model_detail_slug,
'final_model_detail_slug_id']
if use_time % 6 == 0:
column_num = int(use_time / 6) - 1
month_num = 6
else:
column_num = int(use_time / 6)
month_num = use_time % 6
dealer_hedge, cpersonal_hedge = db_operate.query_valuate_history(
model_detail_slug_id, city_id, column_num, use_time)
result = []
if len(dealer_hedge) == 1:
dealer_hedge = process_fill_zero(dealer_hedge[0])
cpersonal_hedge = process_fill_zero(cpersonal_hedge[0])
for i in range(0, use_time):
dealer_per = dealer_hedge[i * 3:(i + 1) * 3]
cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]
result.append([int(dealer_per) / 1000, int(cpersonal_per) /
1000, use_time])
result.reverse()
elif len(dealer_hedge) == 2:
dealer_hedge = process_fill_zero(dealer_hedge[0]) + process_fill_zero(
dealer_hedge[1])
cpersonal_hedge = process_fill_zero(cpersonal_hedge[0]
) + process_fill_zero(cpersonal_hedge[1])
for i in range(month_num - 1, month_num + 6):
dealer_per = dealer_hedge[i * 3:(i + 1) * 3]
cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]
result.append([int(dealer_per) / 1000, int(cpersonal_per) /
1000, use_time])
result.reverse()
return result
<|reserved_special_token_0|>
def process_prices_relate(dealer_price, cpersonal_price):
"""
人工处理三类价格的相关性
"""
buy = dealer_price
private = cpersonal_price
private_buy_rate = (buy - private) / private
if (private_buy_rate < 0) | (abs(private_buy_rate) > 0.12):
private = int(buy * (1 - 0.0875))
sell = int(private * (1 - 0.0525))
return buy, private, sell
<|reserved_special_token_0|>
def check_params_value(city, model_detail_slug, use_time, mile, category):
"""
校验参数
"""
if city not in cities:
raise ApiParamsValueError('city', city, 'Unknown city!')
if model_detail_slug not in models:
raise ApiParamsValueError('model_detail_slug', model_detail_slug,
'Unknown model!')
if not isinstance(mile, int) | isinstance(mile, float):
raise ApiParamsTypeError('mile', mile, 'Mile must be int or float!')
elif mile < 0:
raise ApiParamsValueError('mile', mile,
'Mile must be greater than zero!')
if not isinstance(use_time, int):
raise ApiParamsTypeError('use_time', use_time, 'Use_time must be int!')
if category == 'valuate':
if (use_time < 1) | (use_time > 240):
raise ApiParamsValueError('use_time', use_time,
'The use_time of Forecast must be in 1-240!')
elif category == 'history':
if (use_time < 1) | (use_time > 240):
raise ApiParamsValueError('use_time', use_time,
'The use_time of historical trend must be in 1-240!')
elif category == 'future':
if (use_time < 1) | (use_time > 240):
raise ApiParamsValueError('use_time', use_time,
'The use_time of future trend must be in 1-240!')
class Predict(object):
def __init__(self):
"""
加载各类匹配表和模型
"""
self.result = []
self.valuate_model = []
def add_process_intent(self, buy, private, sell, popularity, price_bn):
"""
根据交易方式修正预测值
"""
self.result = result_map.copy()
self.result.loc[self.result['intent'] == 'buy', 'predict_price'] = buy
self.result.loc[self.result['intent'] == 'private', 'predict_price'
] = private
self.result.loc[self.result['intent'] == 'sell', 'predict_price'
] = sell
self.result['predict_price'] = self.result['predict_price'].fillna(buy)
self.result['popularity'] = popularity
self.result['profit_rate'] = self.result.apply(process_profit_rate,
axis=1)
self.result['buy_profit_rate'] = self.result.apply(
process_buy_profit_rate, axis=1)
self.result['predict_price'] = self.result['predict_price'
] / self.result['buy_profit_rate']
self.result['predict_price'] = self.result['profit_rate'
] * self.result['predict_price']
self.result = cal_intent_condition(self.result.predict_price.values,
price_bn)
def follow_process(self, use_time, mile, price_bn, dealer_hedge,
cpersonal_hedge, province, model_slug, model_detail_slug):
"""
后续跟进处理
"""
dealer_price, cpersonal_price = (dealer_hedge * price_bn,
cpersonal_hedge * price_bn)
dealer_price = process_mile(dealer_price, use_time, mile)
cpersonal_price = process_mile(cpersonal_price, use_time, mile)
buy, private, sell = process_prices_relate(dealer_price,
cpersonal_price)
index = str(model_slug) + '_' + str(province)
if index in province_popularity_index:
popularity = province_popularity_map.loc[index, 'popularity']
else:
popularity = 'C'
rate = process_adjust_profit(model_detail_slug, popularity)
buy, private, sell = buy * (1 + rate), private * (1 + rate), sell * (
1 + rate)
return buy, private, sell, popularity
def predict(self, city='深圳', model_detail_slug='model_25023_cs',
use_time=12, mile=2, ret_type='records'):
"""
预测返回
"""
check_params_value(city, model_detail_slug, use_time, mile,
category='valuate')
price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']
price_bn = price_bn * 10000
province = province_city_map.loc[city, 'province']
model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']
final_model_detail_slug = model_detail_map.loc[model_detail_slug,
'final_model_detail_slug']
dealer_hedge, cpersonal_hedge = predict_from_db(final_model_detail_slug
, city, use_time)
buy, private, sell, popularity = self.follow_process(use_time, mile,
price_bn, dealer_hedge, cpersonal_hedge, province, model_slug,
model_detail_slug)
self.add_process_intent(buy, private, sell, popularity, price_bn)
if ret_type == 'records':
return self.result.to_dict('records')
else:
return self.result
def predict_for_history(self, city='深圳', model_detail_slug=
'model_25023_cs', use_time=12, mile=2):
"""
预测历史数据返回
"""
check_params_value(city, model_detail_slug, use_time, mile,
category='valuate')
price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']
price_bn = price_bn * 10000
province = province_city_map.loc[city, 'province']
model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']
final_model_detail_slug = model_detail_map.loc[model_detail_slug,
'final_model_detail_slug']
data_buy = []
data_sell = []
data_private = []
result = predict_from_db_history(final_model_detail_slug, city,
use_time)
for dealer_hedge, cpersonal_hedge, use_time_per in result:
buy, private, sell, popularity = self.follow_process(use_time_per,
mile, price_bn, dealer_hedge, cpersonal_hedge, province,
model_slug, model_detail_slug)
data_buy.append(int(buy))
data_private.append(int(private))
data_sell.append(int(sell))
return data_buy, data_private, data_sell
def predict_for_future(self, city='深圳', model_detail_slug=
'model_25023_cs', use_time=12, mile=2, times=3):
"""
预测历史数据返回
"""
check_params_value(city, model_detail_slug, use_time, mile,
category='valuate')
price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']
price_bn = price_bn * 10000
province = province_city_map.loc[city, 'province']
model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']
final_model_detail_slug = model_detail_map.loc[model_detail_slug,
'final_model_detail_slug']
data_buy = []
data_sell = []
data_private = []
result = predict_from_db_future(final_model_detail_slug, city,
use_time, times)
for dealer_hedge, cpersonal_hedge, use_time_per in result:
buy, private, sell, popularity = self.follow_process(use_time_per,
mile, price_bn, dealer_hedge, cpersonal_hedge, province,
model_slug, model_detail_slug)
data_buy.append(int(buy))
data_private.append(int(private))
data_sell.append(int(sell))
return data_buy, data_private, data_sell
def history_price_trend(self, city='深圳', model_detail_slug=
'model_25023_cs', use_time=12, mile=2, ret_type='records'):
"""
计算历史价格趋势
"""
check_params_value(city, model_detail_slug, use_time, mile,
category='history')
times_str = ['0', '-1', '-2', '-3', '-4', '-5', '-6']
nums = 6
if use_time <= 6:
times_str = []
nums = use_time - 1
for i in range(0, nums + 1):
times_str.append(str(-i))
data_buy, data_private, data_sell = self.predict_for_history(city,
model_detail_slug, use_time, mile)
data_buy = process_unreasonable_history_price(data_buy, nums)
data_sell = process_unreasonable_history_price(data_sell, nums)
data_private = process_unreasonable_history_price(data_private, nums)
result_b_2_c = pd.DataFrame([data_buy], columns=times_str)
result_b_2_c['type'] = 'buy'
result_c_2_b = pd.DataFrame([data_sell], columns=times_str)
result_c_2_b['type'] = 'sell'
result_c_2_c = pd.DataFrame([data_private], columns=times_str)
result_c_2_c['type'] = 'private'
result = result_b_2_c.append(result_c_2_b, ignore_index=True)
result = result.append(result_c_2_c, ignore_index=True)
if ret_type == 'records':
return result.to_dict('records')
else:
return result
def future_price_trend(self, city='深圳', model_detail_slug=
'model_25023_cs', use_time=365, mile=2, ret_type='records'):
"""
计算未来价格趋势
"""
check_params_value(city, model_detail_slug, use_time, mile,
category='future')
times_str = ['0', '12', '24', '36']
nums = 3
if use_time > 204:
times_str = []
nums = int((240 - use_time) / 12)
for i in range(0, nums + 1):
times_str.append(str(i * 12))
data_buy, data_private, data_sell = self.predict_for_future(city,
model_detail_slug, use_time, mile, len(times_str))
data_buy = process_unreasonable_future_price(data_buy, nums)
data_sell = process_unreasonable_future_price(data_sell, nums)
data_private = process_unreasonable_future_price(data_private, nums)
result_b_2_c = pd.DataFrame([data_buy], columns=times_str)
result_b_2_c['type'] = 'buy'
result_c_2_b = pd.DataFrame([data_sell], columns=times_str)
result_c_2_b['type'] = 'sell'
result_c_2_c = pd.DataFrame([data_private], columns=times_str)
result_c_2_c['type'] = 'private'
result = result_b_2_c.append(result_c_2_b, ignore_index=True)
result = result.append(result_c_2_c, ignore_index=True)
if ret_type == 'records':
return result.to_dict('records')
else:
return result
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_profit_rate(intent, popularity):
"""
获取畅销系数
"""
profits = gl.PROFITS
profit = profits[popularity]
if intent == 'sell':
profit_rate = 1 - profit[0] - profit[1]
elif intent == 'buy':
profit_rate = 1 - profit[0]
elif intent == 'release':
profit_rate = 1
elif intent == 'private':
profit_rate = 1 - profit[0] - profit[2]
elif intent == 'lowest':
profit_rate = 1 - profit[0] - profit[1] - profit[3]
elif intent == 'cpo':
profit_rate = 1 - profit[0] - profit[8]
elif intent == 'replace':
profit_rate = 1 - profit[0] - profit[4]
elif intent == 'auction':
profit_rate = 1 - profit[0] - profit[5]
elif intent == 'avg-buy':
profit_rate = 1 - profit[0] - profit[7]
elif intent == 'avg-sell':
profit_rate = 1 - profit[0] - profit[6]
return profit_rate
def cal_intent_condition(prices, price_bn):
"""
计算所有交易方式的4个级别车况价
"""
if prices[2] * 1.03 > price_bn:
rate = prices[2] * 1.03 / price_bn
prices = prices / rate
df1 = pd.DataFrame(prices)
df2 = pd.DataFrame([gl.CAR_CONDITION_COEFFICIENT_VALUES])
all_map = df1.dot(df2)
all_map.columns = ['excellent', 'good', 'fair', 'bad']
all_map['intent'] = pd.Series(gl.INTENT_TYPE).values
all_map = all_map.loc[:, ['intent', 'excellent', 'good', 'fair', 'bad']]
all_map[['excellent', 'good', 'fair', 'bad']] = all_map[['excellent',
'good', 'fair', 'bad']].astype(int)
return all_map
def process_mile(price, use_time, mile):
"""
mile处理
"""
mile_per_month = mile / use_time
if mile_per_month < gl.MILE_THRESHOLD_2_5:
return price + 0.035 * (1 - mile_per_month / gl.MILE_THRESHOLD_2_5
) * price
elif gl.MILE_THRESHOLD_2_5 <= mile_per_month < gl.MILE_THRESHOLD_5:
return price - (0.04 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.035
) * price
elif gl.MILE_THRESHOLD_5 <= mile_per_month < gl.MILE_THRESHOLD_10:
return price - (0.05 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.075
) * price
else:
return price - 0.125 * price
<|reserved_special_token_0|>
def process_unreasonable_history_price(data, nums):
"""
处理不合理历史价格趋势
"""
if nums == 0:
return data
temp = data[1:]
temp.sort()
for i, value in enumerate(temp):
data[i + 1] = temp[i]
for i in range(0, nums):
rate = (data[i + 1] - data[i]) / data[i + 1]
if (data[i] >= data[i + 1]) | (0.003 > rate) | (0.0157 < rate):
data[i + 1] = int(data[i] * 1.0083)
return data
<|reserved_special_token_0|>
def process_fill_zero(hedge):
temp = hedge
if len(hedge) < 18:
for i in range(0, 18 - len(hedge)):
temp = '0' + temp
return temp
def predict_from_db(model_detail_slug, city, use_time):
"""
从生产库查询预测
"""
city_id = province_city_map.loc[city, 'city_id']
model_detail_slug_id = model_detail_map.loc[model_detail_slug,
'final_model_detail_slug_id']
if use_time % 6 == 0:
column_num = str(int(use_time / 6) - 1)
month_num = 6
else:
column_num = str(int(use_time / 6))
month_num = use_time % 6
record = db_operate.query_valuate(model_detail_slug_id, city_id,
column_num, use_time)
dealer_hedge = str(record.loc[0, 'b2c_year_' + column_num])
dealer_hedge = process_fill_zero(dealer_hedge)
dealer_hedge = dealer_hedge[(month_num - 1) * 3:month_num * 3]
dealer_hedge = int(dealer_hedge) / 1000
cpersonal_hedge = str(record.loc[0, 'c2c_year_' + column_num])
cpersonal_hedge = process_fill_zero(cpersonal_hedge)
cpersonal_hedge = cpersonal_hedge[(month_num - 1) * 3:month_num * 3]
cpersonal_hedge = int(cpersonal_hedge) / 1000
return dealer_hedge, cpersonal_hedge
def predict_from_db_history(model_detail_slug, city, use_time):
"""
从生产库查询预测
"""
city_id = province_city_map.loc[city, 'city_id']
model_detail_slug_id = model_detail_map.loc[model_detail_slug,
'final_model_detail_slug_id']
if use_time % 6 == 0:
column_num = int(use_time / 6) - 1
month_num = 6
else:
column_num = int(use_time / 6)
month_num = use_time % 6
dealer_hedge, cpersonal_hedge = db_operate.query_valuate_history(
model_detail_slug_id, city_id, column_num, use_time)
result = []
if len(dealer_hedge) == 1:
dealer_hedge = process_fill_zero(dealer_hedge[0])
cpersonal_hedge = process_fill_zero(cpersonal_hedge[0])
for i in range(0, use_time):
dealer_per = dealer_hedge[i * 3:(i + 1) * 3]
cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]
result.append([int(dealer_per) / 1000, int(cpersonal_per) /
1000, use_time])
result.reverse()
elif len(dealer_hedge) == 2:
dealer_hedge = process_fill_zero(dealer_hedge[0]) + process_fill_zero(
dealer_hedge[1])
cpersonal_hedge = process_fill_zero(cpersonal_hedge[0]
) + process_fill_zero(cpersonal_hedge[1])
for i in range(month_num - 1, month_num + 6):
dealer_per = dealer_hedge[i * 3:(i + 1) * 3]
cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]
result.append([int(dealer_per) / 1000, int(cpersonal_per) /
1000, use_time])
result.reverse()
return result
<|reserved_special_token_0|>
def process_prices_relate(dealer_price, cpersonal_price):
"""
人工处理三类价格的相关性
"""
buy = dealer_price
private = cpersonal_price
private_buy_rate = (buy - private) / private
if (private_buy_rate < 0) | (abs(private_buy_rate) > 0.12):
private = int(buy * (1 - 0.0875))
sell = int(private * (1 - 0.0525))
return buy, private, sell
<|reserved_special_token_0|>
def check_params_value(city, model_detail_slug, use_time, mile, category):
"""
校验参数
"""
if city not in cities:
raise ApiParamsValueError('city', city, 'Unknown city!')
if model_detail_slug not in models:
raise ApiParamsValueError('model_detail_slug', model_detail_slug,
'Unknown model!')
if not isinstance(mile, int) | isinstance(mile, float):
raise ApiParamsTypeError('mile', mile, 'Mile must be int or float!')
elif mile < 0:
raise ApiParamsValueError('mile', mile,
'Mile must be greater than zero!')
if not isinstance(use_time, int):
raise ApiParamsTypeError('use_time', use_time, 'Use_time must be int!')
if category == 'valuate':
if (use_time < 1) | (use_time > 240):
raise ApiParamsValueError('use_time', use_time,
'The use_time of Forecast must be in 1-240!')
elif category == 'history':
if (use_time < 1) | (use_time > 240):
raise ApiParamsValueError('use_time', use_time,
'The use_time of historical trend must be in 1-240!')
elif category == 'future':
if (use_time < 1) | (use_time > 240):
raise ApiParamsValueError('use_time', use_time,
'The use_time of future trend must be in 1-240!')
class Predict(object):
def __init__(self):
"""
加载各类匹配表和模型
"""
self.result = []
self.valuate_model = []
def add_process_intent(self, buy, private, sell, popularity, price_bn):
"""
根据交易方式修正预测值
"""
self.result = result_map.copy()
self.result.loc[self.result['intent'] == 'buy', 'predict_price'] = buy
self.result.loc[self.result['intent'] == 'private', 'predict_price'
] = private
self.result.loc[self.result['intent'] == 'sell', 'predict_price'
] = sell
self.result['predict_price'] = self.result['predict_price'].fillna(buy)
self.result['popularity'] = popularity
self.result['profit_rate'] = self.result.apply(process_profit_rate,
axis=1)
self.result['buy_profit_rate'] = self.result.apply(
process_buy_profit_rate, axis=1)
self.result['predict_price'] = self.result['predict_price'
] / self.result['buy_profit_rate']
self.result['predict_price'] = self.result['profit_rate'
] * self.result['predict_price']
self.result = cal_intent_condition(self.result.predict_price.values,
price_bn)
def follow_process(self, use_time, mile, price_bn, dealer_hedge,
cpersonal_hedge, province, model_slug, model_detail_slug):
"""
后续跟进处理
"""
dealer_price, cpersonal_price = (dealer_hedge * price_bn,
cpersonal_hedge * price_bn)
dealer_price = process_mile(dealer_price, use_time, mile)
cpersonal_price = process_mile(cpersonal_price, use_time, mile)
buy, private, sell = process_prices_relate(dealer_price,
cpersonal_price)
index = str(model_slug) + '_' + str(province)
if index in province_popularity_index:
popularity = province_popularity_map.loc[index, 'popularity']
else:
popularity = 'C'
rate = process_adjust_profit(model_detail_slug, popularity)
buy, private, sell = buy * (1 + rate), private * (1 + rate), sell * (
1 + rate)
return buy, private, sell, popularity
def predict(self, city='深圳', model_detail_slug='model_25023_cs',
use_time=12, mile=2, ret_type='records'):
"""
预测返回
"""
check_params_value(city, model_detail_slug, use_time, mile,
category='valuate')
price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']
price_bn = price_bn * 10000
province = province_city_map.loc[city, 'province']
model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']
final_model_detail_slug = model_detail_map.loc[model_detail_slug,
'final_model_detail_slug']
dealer_hedge, cpersonal_hedge = predict_from_db(final_model_detail_slug
, city, use_time)
buy, private, sell, popularity = self.follow_process(use_time, mile,
price_bn, dealer_hedge, cpersonal_hedge, province, model_slug,
model_detail_slug)
self.add_process_intent(buy, private, sell, popularity, price_bn)
if ret_type == 'records':
return self.result.to_dict('records')
else:
return self.result
def predict_for_history(self, city='深圳', model_detail_slug=
'model_25023_cs', use_time=12, mile=2):
"""
预测历史数据返回
"""
check_params_value(city, model_detail_slug, use_time, mile,
category='valuate')
price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']
price_bn = price_bn * 10000
province = province_city_map.loc[city, 'province']
model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']
final_model_detail_slug = model_detail_map.loc[model_detail_slug,
'final_model_detail_slug']
data_buy = []
data_sell = []
data_private = []
result = predict_from_db_history(final_model_detail_slug, city,
use_time)
for dealer_hedge, cpersonal_hedge, use_time_per in result:
buy, private, sell, popularity = self.follow_process(use_time_per,
mile, price_bn, dealer_hedge, cpersonal_hedge, province,
model_slug, model_detail_slug)
data_buy.append(int(buy))
data_private.append(int(private))
data_sell.append(int(sell))
return data_buy, data_private, data_sell
def predict_for_future(self, city='深圳', model_detail_slug=
'model_25023_cs', use_time=12, mile=2, times=3):
"""
预测历史数据返回
"""
check_params_value(city, model_detail_slug, use_time, mile,
category='valuate')
price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']
price_bn = price_bn * 10000
province = province_city_map.loc[city, 'province']
model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']
final_model_detail_slug = model_detail_map.loc[model_detail_slug,
'final_model_detail_slug']
data_buy = []
data_sell = []
data_private = []
result = predict_from_db_future(final_model_detail_slug, city,
use_time, times)
for dealer_hedge, cpersonal_hedge, use_time_per in result:
buy, private, sell, popularity = self.follow_process(use_time_per,
mile, price_bn, dealer_hedge, cpersonal_hedge, province,
model_slug, model_detail_slug)
data_buy.append(int(buy))
data_private.append(int(private))
data_sell.append(int(sell))
return data_buy, data_private, data_sell
def history_price_trend(self, city='深圳', model_detail_slug=
'model_25023_cs', use_time=12, mile=2, ret_type='records'):
"""
计算历史价格趋势
"""
check_params_value(city, model_detail_slug, use_time, mile,
category='history')
times_str = ['0', '-1', '-2', '-3', '-4', '-5', '-6']
nums = 6
if use_time <= 6:
times_str = []
nums = use_time - 1
for i in range(0, nums + 1):
times_str.append(str(-i))
data_buy, data_private, data_sell = self.predict_for_history(city,
model_detail_slug, use_time, mile)
data_buy = process_unreasonable_history_price(data_buy, nums)
data_sell = process_unreasonable_history_price(data_sell, nums)
data_private = process_unreasonable_history_price(data_private, nums)
result_b_2_c = pd.DataFrame([data_buy], columns=times_str)
result_b_2_c['type'] = 'buy'
result_c_2_b = pd.DataFrame([data_sell], columns=times_str)
result_c_2_b['type'] = 'sell'
result_c_2_c = pd.DataFrame([data_private], columns=times_str)
result_c_2_c['type'] = 'private'
result = result_b_2_c.append(result_c_2_b, ignore_index=True)
result = result.append(result_c_2_c, ignore_index=True)
if ret_type == 'records':
return result.to_dict('records')
else:
return result
def future_price_trend(self, city='深圳', model_detail_slug=
'model_25023_cs', use_time=365, mile=2, ret_type='records'):
"""
计算未来价格趋势
"""
check_params_value(city, model_detail_slug, use_time, mile,
category='future')
times_str = ['0', '12', '24', '36']
nums = 3
if use_time > 204:
times_str = []
nums = int((240 - use_time) / 12)
for i in range(0, nums + 1):
times_str.append(str(i * 12))
data_buy, data_private, data_sell = self.predict_for_future(city,
model_detail_slug, use_time, mile, len(times_str))
data_buy = process_unreasonable_future_price(data_buy, nums)
data_sell = process_unreasonable_future_price(data_sell, nums)
data_private = process_unreasonable_future_price(data_private, nums)
result_b_2_c = pd.DataFrame([data_buy], columns=times_str)
result_b_2_c['type'] = 'buy'
result_c_2_b = pd.DataFrame([data_sell], columns=times_str)
result_c_2_b['type'] = 'sell'
result_c_2_c = pd.DataFrame([data_private], columns=times_str)
result_c_2_c['type'] = 'private'
result = result_b_2_c.append(result_c_2_b, ignore_index=True)
result = result.append(result_c_2_c, ignore_index=True)
if ret_type == 'records':
return result.to_dict('records')
else:
return result
<|reserved_special_token_1|>
from valuate.predict import *
def get_profit_rate(intent, popularity):
"""
获取畅销系数
"""
# 按畅销程度分级,各交易方式相比于标价的固定比例
profits = gl.PROFITS
profit = profits[popularity]
# 计算各交易方式的价格相比于标价的固定比例
if intent == 'sell':
# 商家收购价相比加权平均价的比例
profit_rate = 1 - profit[0] - profit[1]
elif intent == 'buy':
# 商家真实售价相比加权平均价的比例
profit_rate = 1 - profit[0]
elif intent == 'release':
# 建议标价相比加权平均价的比例
profit_rate = 1
elif intent == 'private':
# C2C价格相比加权平均价的比例
profit_rate = 1 - profit[0] - profit[2]
elif intent == 'lowest':
# 最低成交价相比加权平均价的比例
profit_rate = 1 - profit[0] - profit[1] - profit[3]
elif intent == 'cpo':
# 认证二手车价相比加权平均价的差异比例
profit_rate = 1 - profit[0] - profit[8]
elif intent == 'replace':
# 4S店置换价相比加权平均价的比例
profit_rate = 1 - profit[0] - profit[4]
elif intent == 'auction':
# 拍卖价相比加权平均价的差异比例
profit_rate = 1 - profit[0] - profit[5]
elif intent == 'avg-buy':
# 平均买车价相比加权平均价的差异比例
profit_rate = 1 - profit[0] - profit[7]
elif intent == 'avg-sell':
# 平均卖车价价相比加权平均价的差异比例
profit_rate = 1 - profit[0] - profit[6]
return profit_rate
def cal_intent_condition(prices, price_bn):
"""
计算所有交易方式的4个级别车况价
"""
if(prices[2] * 1.03) > price_bn:
rate = (prices[2] * 1.03) / price_bn
prices = prices / rate
df1 = pd.DataFrame(prices)
df2 = pd.DataFrame([gl.CAR_CONDITION_COEFFICIENT_VALUES])
all_map = df1.dot(df2)
all_map.columns = ['excellent', 'good', 'fair', 'bad']
all_map['intent'] = pd.Series(gl.INTENT_TYPE).values
all_map = all_map.loc[:, ['intent', 'excellent', 'good', 'fair', 'bad']]
all_map[['excellent', 'good', 'fair', 'bad']] = all_map[['excellent', 'good', 'fair', 'bad']].astype(int)
return all_map
def process_mile(price, use_time, mile):
"""
mile处理
"""
# 正常行驶的车辆以一年2.5万公里为正常基数,低于2.5万公里的价格的浮动在+3.5%以内
# 大于2.5万公里的若每年的平均行驶里程大于2.5万公里小于5万公里价格浮动在-3.5-7.5%
# 若年平均形式里程大于5万公里及以上影响价格在-7.5-12.5%之间
mile_per_month = mile / use_time
if mile_per_month < gl.MILE_THRESHOLD_2_5:
return price + 0.035 * (1 - mile_per_month/gl.MILE_THRESHOLD_2_5) * price
elif gl.MILE_THRESHOLD_2_5 <= mile_per_month < gl.MILE_THRESHOLD_5:
return price - (0.04 * (mile_per_month/gl.MILE_THRESHOLD_5)+0.035) * price
elif gl.MILE_THRESHOLD_5 <= mile_per_month < gl.MILE_THRESHOLD_10:
return price - (0.05 * (mile_per_month/gl.MILE_THRESHOLD_5)+0.075) * price
else:
return price - 0.125 * price
def process_profit_rate(df):
"""
畅销系数处理
"""
return get_profit_rate(df['intent'], df['popularity'])
def process_buy_profit_rate(df):
"""
畅销系数处理
"""
return get_profit_rate(df['intent_source'], df['popularity'])
def process_unreasonable_history_price(data, nums):
"""
处理不合理历史价格趋势
"""
if nums == 0:
return data
temp = data[1:]
temp.sort()
for i, value in enumerate(temp):
data[i+1] = temp[i]
for i in range(0, nums):
rate = (data[i + 1] - data[i]) / data[i + 1]
if (data[i] >= data[i + 1]) | (0.003 > rate) | (0.0157 < rate):
data[i + 1] = int(data[i] * 1.0083)
return data
def process_unreasonable_future_price(data, nums):
"""
处理不合理未来价格趋势
"""
temp = data[1:]
temp.sort(reverse=True)
for i, value in enumerate(temp):
data[i+1] = temp[i]
for i in range(0, nums):
rate = (data[i] - data[i + 1]) / data[i]
if (data[i] <= data[i + 1]) | (0.036 > rate) | (0.188 < rate):
data[i + 1] = int(data[i] * 0.9)
return data
def process_fill_zero(hedge):
temp = hedge
if len(hedge) < 18:
for i in range(0, (18-len(hedge))):
temp = '0'+temp
return temp
def predict_from_db(model_detail_slug, city, use_time):
"""
从生产库查询预测
"""
# 查找city和model_detail_slug编号
city_id = province_city_map.loc[city, 'city_id']
model_detail_slug_id = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug_id']
# 计算查询字段编号和月编号
if (use_time % 6) == 0:
column_num = str(int(use_time / 6) - 1)
month_num = 6
else:
column_num = str(int(use_time / 6))
month_num = use_time % 6
# 查询
record = db_operate.query_valuate(model_detail_slug_id, city_id, column_num, use_time)
# 查找对应值
dealer_hedge = str(record.loc[0, 'b2c_year_'+column_num])
dealer_hedge = process_fill_zero(dealer_hedge)
dealer_hedge = dealer_hedge[(month_num-1)*3:month_num*3]
dealer_hedge = int(dealer_hedge) / 1000
cpersonal_hedge = str(record.loc[0, 'c2c_year_'+column_num])
cpersonal_hedge = process_fill_zero(cpersonal_hedge)
cpersonal_hedge = cpersonal_hedge[(month_num-1)*3:month_num*3]
cpersonal_hedge = int(cpersonal_hedge) / 1000
return dealer_hedge, cpersonal_hedge
def predict_from_db_history(model_detail_slug, city, use_time):
"""
从生产库查询预测
"""
# 查找city和model_detail_slug编号
city_id = province_city_map.loc[city, 'city_id']
model_detail_slug_id = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug_id']
# 计算查询字段编号和月编号
if (use_time % 6) == 0:
column_num = int(use_time / 6) - 1
month_num = 6
else:
column_num = int(use_time / 6)
month_num = use_time % 6
# 查询
dealer_hedge, cpersonal_hedge = db_operate.query_valuate_history(model_detail_slug_id, city_id, column_num, use_time)
# 查找对应值
result = []
if len(dealer_hedge) == 1:
dealer_hedge = process_fill_zero(dealer_hedge[0])
cpersonal_hedge = process_fill_zero(cpersonal_hedge[0])
for i in range(0, use_time):
dealer_per = dealer_hedge[i*3:(i+1)*3]
cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]
result.append([int(dealer_per)/1000, int(cpersonal_per)/1000, use_time])
result.reverse()
elif len(dealer_hedge) == 2:
dealer_hedge = process_fill_zero(dealer_hedge[0])+process_fill_zero(dealer_hedge[1])
cpersonal_hedge = process_fill_zero(cpersonal_hedge[0])+process_fill_zero(cpersonal_hedge[1])
for i in range(month_num-1, month_num+6):
dealer_per = dealer_hedge[i*3:(i+1)*3]
cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]
result.append([int(dealer_per)/1000, int(cpersonal_per)/1000, use_time])
result.reverse()
return result
def predict_from_db_future(model_detail_slug, city, use_time, times):
"""
从生产库查询预测
"""
# 查找city和model_detail_slug编号
city_id = province_city_map.loc[city, 'city_id']
model_detail_slug_id = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug_id']
# 计算查询字段编号和月编号
if (use_time % 6) == 0:
column_num = int(use_time / 6) - 1
month_num = 6
else:
column_num = int(use_time / 6)
month_num = use_time % 6
# 查询
record = db_operate.query_valuate_future(model_detail_slug_id, city_id)
# 查找对应值
result = []
for i in range(0, times):
dealer_hedge = str(record.loc[0, 'b2c_year_' + str(column_num+i*2)])
dealer_hedge = process_fill_zero(dealer_hedge)
dealer_hedge = dealer_hedge[(month_num - 1) * 3:month_num * 3]
dealer_hedge = int(dealer_hedge) / 1000
cpersonal_hedge = str(record.loc[0, 'c2c_year_' + str(column_num+i*2)])
cpersonal_hedge = process_fill_zero(cpersonal_hedge)
cpersonal_hedge = cpersonal_hedge[(month_num - 1) * 3:month_num * 3]
cpersonal_hedge = int(cpersonal_hedge) / 1000
result.append([dealer_hedge, cpersonal_hedge, use_time+i*12])
return result
def process_prices_relate(dealer_price, cpersonal_price):
"""
人工处理三类价格的相关性
"""
buy = dealer_price
private = cpersonal_price
# 计算buy与private的比例关系
private_buy_rate = (buy - private) / private
# 人工处理预测不合理的三类价格
if (private_buy_rate < 0) | (abs(private_buy_rate) > 0.12):
private = int(buy * (1 - 0.0875))
sell = int(private * (1 - 0.0525))
return buy, private, sell
def process_adjust_profit(model_detail_slug, popularity):
"""
调整值调整
"""
index = str(model_detail_slug)+'_'+str(popularity)
if index in model_detail_slug_popularity_index:
rate = adjust_profit.loc[index, 'rate']
else:
rate = 0
return rate
def check_params_value(city, model_detail_slug, use_time, mile, category):
"""
校验参数
"""
# 校验city
if city not in cities:
raise ApiParamsValueError('city', city, 'Unknown city!')
# 校验model
if model_detail_slug not in models:
raise ApiParamsValueError('model_detail_slug', model_detail_slug, 'Unknown model!')
# 校验mile
if not ((isinstance(mile, int)) | (isinstance(mile, float))):
raise ApiParamsTypeError('mile', mile, 'Mile must be int or float!')
elif mile < 0:
raise ApiParamsValueError('mile', mile, 'Mile must be greater than zero!')
# 校验use_time
if not isinstance(use_time, int):
raise ApiParamsTypeError('use_time', use_time, 'Use_time must be int!')
if category == 'valuate':
if (use_time < 1) | (use_time > 240):
raise ApiParamsValueError('use_time', use_time, 'The use_time of Forecast must be in 1-240!')
elif category == 'history':
if (use_time < 1) | (use_time > 240):
raise ApiParamsValueError('use_time', use_time, 'The use_time of historical trend must be in 1-240!')
elif category == 'future':
if (use_time < 1) | (use_time > 240):
raise ApiParamsValueError('use_time', use_time, 'The use_time of future trend must be in 1-240!')
class Predict(object):
def __init__(self):
"""
加载各类匹配表和模型
"""
self.result = []
self.valuate_model = []
def add_process_intent(self, buy, private, sell, popularity, price_bn):
"""
根据交易方式修正预测值
"""
# 组合结果
self.result = result_map.copy()
self.result.loc[(self.result['intent'] == 'buy'), 'predict_price'] = buy
self.result.loc[(self.result['intent'] == 'private'), 'predict_price'] = private
self.result.loc[(self.result['intent'] == 'sell'), 'predict_price'] = sell
self.result['predict_price'] = self.result['predict_price'].fillna(buy)
self.result['popularity'] = popularity
self.result['profit_rate'] = self.result.apply(process_profit_rate, axis=1)
self.result['buy_profit_rate'] = self.result.apply(process_buy_profit_rate, axis=1)
self.result['predict_price'] = self.result['predict_price'] / self.result['buy_profit_rate']
self.result['predict_price'] = self.result['profit_rate'] * self.result['predict_price']
# 计算所有交易类型
self.result = cal_intent_condition(self.result.predict_price.values, price_bn)
def follow_process(self, use_time, mile, price_bn, dealer_hedge, cpersonal_hedge, province, model_slug, model_detail_slug):
"""
后续跟进处理
"""
# 获取价格
dealer_price, cpersonal_price = dealer_hedge * price_bn, cpersonal_hedge * price_bn
# 处理mile
dealer_price = process_mile(dealer_price, use_time, mile)
cpersonal_price = process_mile(cpersonal_price, use_time, mile)
# 处理价格之间的相关性
buy, private, sell = process_prices_relate(dealer_price, cpersonal_price)
# 获取流行度
index = str(model_slug) + '_' + str(province)
if index in province_popularity_index:
popularity = province_popularity_map.loc[index, 'popularity']
else:
popularity = 'C'
# 进行调整值最终调整
rate = process_adjust_profit(model_detail_slug, popularity)
buy, private, sell = buy * (1 + rate), private * (1 + rate), sell * (1 + rate)
return buy, private, sell, popularity
def predict(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2, ret_type='records'):
"""
预测返回
"""
# 校验参数
check_params_value(city, model_detail_slug, use_time, mile, category='valuate')
# 查找款型对应的新车指导价,调整后的款型
price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']
price_bn = price_bn * 10000
province = province_city_map.loc[city, 'province']
model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']
final_model_detail_slug = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug']
# 预测返回保值率
dealer_hedge, cpersonal_hedge = predict_from_db(final_model_detail_slug, city, use_time)
buy, private, sell, popularity = self.follow_process(use_time, mile, price_bn, dealer_hedge, cpersonal_hedge, province, model_slug, model_detail_slug)
# 根据交易方式修正预测值
self.add_process_intent(buy, private, sell, popularity, price_bn)
if ret_type == 'records':
return self.result.to_dict('records')
else:
return self.result
def predict_for_history(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2):
"""
预测历史数据返回
"""
# 校验参数
check_params_value(city, model_detail_slug, use_time, mile, category='valuate')
# 查找款型对应的新车指导价,调整后的款型
price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']
price_bn = price_bn * 10000
province = province_city_map.loc[city, 'province']
model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']
final_model_detail_slug = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug']
# 预测返回保值率
data_buy = []
data_sell = []
data_private = []
result = predict_from_db_history(final_model_detail_slug, city, use_time)
for dealer_hedge, cpersonal_hedge, use_time_per in result:
buy, private, sell, popularity = self.follow_process(use_time_per, mile, price_bn, dealer_hedge,
cpersonal_hedge, province, model_slug,
model_detail_slug)
data_buy.append(int(buy))
data_private.append(int(private))
data_sell.append(int(sell))
return data_buy, data_private, data_sell
def predict_for_future(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2, times=3):
"""
预测历史数据返回
"""
# 校验参数
check_params_value(city, model_detail_slug, use_time, mile, category='valuate')
# 查找款型对应的新车指导价,调整后的款型
price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']
price_bn = price_bn * 10000
province = province_city_map.loc[city, 'province']
model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']
final_model_detail_slug = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug']
# 预测返回保值率
data_buy = []
data_sell = []
data_private = []
result = predict_from_db_future(final_model_detail_slug, city, use_time, times)
for dealer_hedge, cpersonal_hedge, use_time_per in result:
buy, private, sell, popularity = self.follow_process(use_time_per, mile, price_bn, dealer_hedge,
cpersonal_hedge, province, model_slug,
model_detail_slug)
data_buy.append(int(buy))
data_private.append(int(private))
data_sell.append(int(sell))
return data_buy, data_private, data_sell
def history_price_trend(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2, ret_type='records'):
"""
计算历史价格趋势
"""
# 校验参数
check_params_value(city, model_detail_slug, use_time, mile, category='history')
# 计算时间
times_str = ['0', '-1', '-2', '-3', '-4', '-5', '-6']
nums = 6
if use_time <= 6:
times_str = []
nums = use_time-1
for i in range(0, nums+1):
times_str.append(str(-i))
# 计算车商交易价,车商收购价的历史价格走势
data_buy, data_private, data_sell = self.predict_for_history(city, model_detail_slug, use_time, mile)
# 处理异常值
data_buy = process_unreasonable_history_price(data_buy, nums)
data_sell = process_unreasonable_history_price(data_sell, nums)
data_private = process_unreasonable_history_price(data_private, nums)
result_b_2_c = pd.DataFrame([data_buy], columns=times_str)
result_b_2_c['type'] = 'buy'
result_c_2_b = pd.DataFrame([data_sell], columns=times_str)
result_c_2_b['type'] = 'sell'
result_c_2_c = pd.DataFrame([data_private], columns=times_str)
result_c_2_c['type'] = 'private'
result = result_b_2_c.append(result_c_2_b, ignore_index=True)
result = result.append(result_c_2_c, ignore_index=True)
if ret_type == 'records':
return result.to_dict('records')
else:
return result
def future_price_trend(self, city='深圳', model_detail_slug='model_25023_cs', use_time=365, mile=2, ret_type='records'):
"""
计算未来价格趋势
"""
# 校验参数
check_params_value(city, model_detail_slug, use_time, mile, category='future')
# 计算时间
times_str = ['0', '12', '24', '36']
nums = 3
if use_time > 204:
times_str = []
nums = int((240-use_time) / 12)
for i in range(0, nums+1):
times_str.append(str(i*12))
# 计算个人交易价的未来价格趋势
data_buy, data_private, data_sell = self.predict_for_future(city, model_detail_slug, use_time, mile, len(times_str))
data_buy = process_unreasonable_future_price(data_buy, nums)
data_sell = process_unreasonable_future_price(data_sell, nums)
data_private = process_unreasonable_future_price(data_private, nums)
result_b_2_c = pd.DataFrame([data_buy], columns=times_str)
result_b_2_c['type'] = 'buy'
result_c_2_b = pd.DataFrame([data_sell], columns=times_str)
result_c_2_b['type'] = 'sell'
result_c_2_c = pd.DataFrame([data_private], columns=times_str)
result_c_2_c['type'] = 'private'
result = result_b_2_c.append(result_c_2_b, ignore_index=True)
result = result.append(result_c_2_c, ignore_index=True)
if ret_type == 'records':
return result.to_dict('records')
else:
return result
|
flexible
|
{
"blob_id": "1f01989f10be5404d415d4abd1ef9ab6c8695aba",
"index": 6069,
"step-1": "<mask token>\n\n\ndef process_mile(price, use_time, mile):\n \"\"\"\n mile处理\n \"\"\"\n mile_per_month = mile / use_time\n if mile_per_month < gl.MILE_THRESHOLD_2_5:\n return price + 0.035 * (1 - mile_per_month / gl.MILE_THRESHOLD_2_5\n ) * price\n elif gl.MILE_THRESHOLD_2_5 <= mile_per_month < gl.MILE_THRESHOLD_5:\n return price - (0.04 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.035\n ) * price\n elif gl.MILE_THRESHOLD_5 <= mile_per_month < gl.MILE_THRESHOLD_10:\n return price - (0.05 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.075\n ) * price\n else:\n return price - 0.125 * price\n\n\n<mask token>\n\n\ndef process_prices_relate(dealer_price, cpersonal_price):\n \"\"\"\n 人工处理三类价格的相关性\n \"\"\"\n buy = dealer_price\n private = cpersonal_price\n private_buy_rate = (buy - private) / private\n if (private_buy_rate < 0) | (abs(private_buy_rate) > 0.12):\n private = int(buy * (1 - 0.0875))\n sell = int(private * (1 - 0.0525))\n return buy, private, sell\n\n\n<mask token>\n\n\ndef check_params_value(city, model_detail_slug, use_time, mile, category):\n \"\"\"\n 校验参数\n \"\"\"\n if city not in cities:\n raise ApiParamsValueError('city', city, 'Unknown city!')\n if model_detail_slug not in models:\n raise ApiParamsValueError('model_detail_slug', model_detail_slug,\n 'Unknown model!')\n if not isinstance(mile, int) | isinstance(mile, float):\n raise ApiParamsTypeError('mile', mile, 'Mile must be int or float!')\n elif mile < 0:\n raise ApiParamsValueError('mile', mile,\n 'Mile must be greater than zero!')\n if not isinstance(use_time, int):\n raise ApiParamsTypeError('use_time', use_time, 'Use_time must be int!')\n if category == 'valuate':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of Forecast must be in 1-240!')\n elif category == 'history':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of historical trend must be in 1-240!')\n elif category == 'future':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of future trend must be in 1-240!')\n\n\nclass Predict(object):\n\n def __init__(self):\n \"\"\"\n 加载各类匹配表和模型\n \"\"\"\n self.result = []\n self.valuate_model = []\n\n def add_process_intent(self, buy, private, sell, popularity, price_bn):\n \"\"\"\n 根据交易方式修正预测值\n \"\"\"\n self.result = result_map.copy()\n self.result.loc[self.result['intent'] == 'buy', 'predict_price'] = buy\n self.result.loc[self.result['intent'] == 'private', 'predict_price'\n ] = private\n self.result.loc[self.result['intent'] == 'sell', 'predict_price'\n ] = sell\n self.result['predict_price'] = self.result['predict_price'].fillna(buy)\n self.result['popularity'] = popularity\n self.result['profit_rate'] = self.result.apply(process_profit_rate,\n axis=1)\n self.result['buy_profit_rate'] = self.result.apply(\n process_buy_profit_rate, axis=1)\n self.result['predict_price'] = self.result['predict_price'\n ] / self.result['buy_profit_rate']\n self.result['predict_price'] = self.result['profit_rate'\n ] * self.result['predict_price']\n self.result = cal_intent_condition(self.result.predict_price.values,\n price_bn)\n\n def follow_process(self, use_time, mile, price_bn, dealer_hedge,\n cpersonal_hedge, province, model_slug, model_detail_slug):\n \"\"\"\n 后续跟进处理\n \"\"\"\n dealer_price, cpersonal_price = (dealer_hedge * price_bn, \n cpersonal_hedge * price_bn)\n dealer_price = process_mile(dealer_price, use_time, mile)\n cpersonal_price = process_mile(cpersonal_price, use_time, mile)\n buy, private, sell = process_prices_relate(dealer_price,\n cpersonal_price)\n index = str(model_slug) + '_' + str(province)\n if index in province_popularity_index:\n popularity = province_popularity_map.loc[index, 'popularity']\n else:\n popularity = 'C'\n rate = process_adjust_profit(model_detail_slug, popularity)\n buy, private, sell = buy * (1 + rate), private * (1 + rate), sell * (\n 1 + rate)\n return buy, private, sell, popularity\n\n def predict(self, city='深圳', model_detail_slug='model_25023_cs',\n use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 预测返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n dealer_hedge, cpersonal_hedge = predict_from_db(final_model_detail_slug\n , city, use_time)\n buy, private, sell, popularity = self.follow_process(use_time, mile,\n price_bn, dealer_hedge, cpersonal_hedge, province, model_slug,\n model_detail_slug)\n self.add_process_intent(buy, private, sell, popularity, price_bn)\n if ret_type == 'records':\n return self.result.to_dict('records')\n else:\n return self.result\n\n def predict_for_history(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_history(final_model_detail_slug, city,\n use_time)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def predict_for_future(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, times=3):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_future(final_model_detail_slug, city,\n use_time, times)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def history_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 计算历史价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='history')\n times_str = ['0', '-1', '-2', '-3', '-4', '-5', '-6']\n nums = 6\n if use_time <= 6:\n times_str = []\n nums = use_time - 1\n for i in range(0, nums + 1):\n times_str.append(str(-i))\n data_buy, data_private, data_sell = self.predict_for_history(city,\n model_detail_slug, use_time, mile)\n data_buy = process_unreasonable_history_price(data_buy, nums)\n data_sell = process_unreasonable_history_price(data_sell, nums)\n data_private = process_unreasonable_history_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n\n def future_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=365, mile=2, ret_type='records'):\n \"\"\"\n 计算未来价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='future')\n times_str = ['0', '12', '24', '36']\n nums = 3\n if use_time > 204:\n times_str = []\n nums = int((240 - use_time) / 12)\n for i in range(0, nums + 1):\n times_str.append(str(i * 12))\n data_buy, data_private, data_sell = self.predict_for_future(city,\n model_detail_slug, use_time, mile, len(times_str))\n data_buy = process_unreasonable_future_price(data_buy, nums)\n data_sell = process_unreasonable_future_price(data_sell, nums)\n data_private = process_unreasonable_future_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n",
"step-2": "<mask token>\n\n\ndef process_mile(price, use_time, mile):\n \"\"\"\n mile处理\n \"\"\"\n mile_per_month = mile / use_time\n if mile_per_month < gl.MILE_THRESHOLD_2_5:\n return price + 0.035 * (1 - mile_per_month / gl.MILE_THRESHOLD_2_5\n ) * price\n elif gl.MILE_THRESHOLD_2_5 <= mile_per_month < gl.MILE_THRESHOLD_5:\n return price - (0.04 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.035\n ) * price\n elif gl.MILE_THRESHOLD_5 <= mile_per_month < gl.MILE_THRESHOLD_10:\n return price - (0.05 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.075\n ) * price\n else:\n return price - 0.125 * price\n\n\n<mask token>\n\n\ndef process_unreasonable_history_price(data, nums):\n \"\"\"\n 处理不合理历史价格趋势\n \"\"\"\n if nums == 0:\n return data\n temp = data[1:]\n temp.sort()\n for i, value in enumerate(temp):\n data[i + 1] = temp[i]\n for i in range(0, nums):\n rate = (data[i + 1] - data[i]) / data[i + 1]\n if (data[i] >= data[i + 1]) | (0.003 > rate) | (0.0157 < rate):\n data[i + 1] = int(data[i] * 1.0083)\n return data\n\n\n<mask token>\n\n\ndef process_fill_zero(hedge):\n temp = hedge\n if len(hedge) < 18:\n for i in range(0, 18 - len(hedge)):\n temp = '0' + temp\n return temp\n\n\n<mask token>\n\n\ndef predict_from_db_history(model_detail_slug, city, use_time):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug_id']\n if use_time % 6 == 0:\n column_num = int(use_time / 6) - 1\n month_num = 6\n else:\n column_num = int(use_time / 6)\n month_num = use_time % 6\n dealer_hedge, cpersonal_hedge = db_operate.query_valuate_history(\n model_detail_slug_id, city_id, column_num, use_time)\n result = []\n if len(dealer_hedge) == 1:\n dealer_hedge = process_fill_zero(dealer_hedge[0])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0])\n for i in range(0, use_time):\n dealer_per = dealer_hedge[i * 3:(i + 1) * 3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per) / 1000, int(cpersonal_per) / \n 1000, use_time])\n result.reverse()\n elif len(dealer_hedge) == 2:\n dealer_hedge = process_fill_zero(dealer_hedge[0]) + process_fill_zero(\n dealer_hedge[1])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0]\n ) + process_fill_zero(cpersonal_hedge[1])\n for i in range(month_num - 1, month_num + 6):\n dealer_per = dealer_hedge[i * 3:(i + 1) * 3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per) / 1000, int(cpersonal_per) / \n 1000, use_time])\n result.reverse()\n return result\n\n\n<mask token>\n\n\ndef process_prices_relate(dealer_price, cpersonal_price):\n \"\"\"\n 人工处理三类价格的相关性\n \"\"\"\n buy = dealer_price\n private = cpersonal_price\n private_buy_rate = (buy - private) / private\n if (private_buy_rate < 0) | (abs(private_buy_rate) > 0.12):\n private = int(buy * (1 - 0.0875))\n sell = int(private * (1 - 0.0525))\n return buy, private, sell\n\n\n<mask token>\n\n\ndef check_params_value(city, model_detail_slug, use_time, mile, category):\n \"\"\"\n 校验参数\n \"\"\"\n if city not in cities:\n raise ApiParamsValueError('city', city, 'Unknown city!')\n if model_detail_slug not in models:\n raise ApiParamsValueError('model_detail_slug', model_detail_slug,\n 'Unknown model!')\n if not isinstance(mile, int) | isinstance(mile, float):\n raise ApiParamsTypeError('mile', mile, 'Mile must be int or float!')\n elif mile < 0:\n raise ApiParamsValueError('mile', mile,\n 'Mile must be greater than zero!')\n if not isinstance(use_time, int):\n raise ApiParamsTypeError('use_time', use_time, 'Use_time must be int!')\n if category == 'valuate':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of Forecast must be in 1-240!')\n elif category == 'history':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of historical trend must be in 1-240!')\n elif category == 'future':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of future trend must be in 1-240!')\n\n\nclass Predict(object):\n\n def __init__(self):\n \"\"\"\n 加载各类匹配表和模型\n \"\"\"\n self.result = []\n self.valuate_model = []\n\n def add_process_intent(self, buy, private, sell, popularity, price_bn):\n \"\"\"\n 根据交易方式修正预测值\n \"\"\"\n self.result = result_map.copy()\n self.result.loc[self.result['intent'] == 'buy', 'predict_price'] = buy\n self.result.loc[self.result['intent'] == 'private', 'predict_price'\n ] = private\n self.result.loc[self.result['intent'] == 'sell', 'predict_price'\n ] = sell\n self.result['predict_price'] = self.result['predict_price'].fillna(buy)\n self.result['popularity'] = popularity\n self.result['profit_rate'] = self.result.apply(process_profit_rate,\n axis=1)\n self.result['buy_profit_rate'] = self.result.apply(\n process_buy_profit_rate, axis=1)\n self.result['predict_price'] = self.result['predict_price'\n ] / self.result['buy_profit_rate']\n self.result['predict_price'] = self.result['profit_rate'\n ] * self.result['predict_price']\n self.result = cal_intent_condition(self.result.predict_price.values,\n price_bn)\n\n def follow_process(self, use_time, mile, price_bn, dealer_hedge,\n cpersonal_hedge, province, model_slug, model_detail_slug):\n \"\"\"\n 后续跟进处理\n \"\"\"\n dealer_price, cpersonal_price = (dealer_hedge * price_bn, \n cpersonal_hedge * price_bn)\n dealer_price = process_mile(dealer_price, use_time, mile)\n cpersonal_price = process_mile(cpersonal_price, use_time, mile)\n buy, private, sell = process_prices_relate(dealer_price,\n cpersonal_price)\n index = str(model_slug) + '_' + str(province)\n if index in province_popularity_index:\n popularity = province_popularity_map.loc[index, 'popularity']\n else:\n popularity = 'C'\n rate = process_adjust_profit(model_detail_slug, popularity)\n buy, private, sell = buy * (1 + rate), private * (1 + rate), sell * (\n 1 + rate)\n return buy, private, sell, popularity\n\n def predict(self, city='深圳', model_detail_slug='model_25023_cs',\n use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 预测返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n dealer_hedge, cpersonal_hedge = predict_from_db(final_model_detail_slug\n , city, use_time)\n buy, private, sell, popularity = self.follow_process(use_time, mile,\n price_bn, dealer_hedge, cpersonal_hedge, province, model_slug,\n model_detail_slug)\n self.add_process_intent(buy, private, sell, popularity, price_bn)\n if ret_type == 'records':\n return self.result.to_dict('records')\n else:\n return self.result\n\n def predict_for_history(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_history(final_model_detail_slug, city,\n use_time)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def predict_for_future(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, times=3):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_future(final_model_detail_slug, city,\n use_time, times)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def history_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 计算历史价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='history')\n times_str = ['0', '-1', '-2', '-3', '-4', '-5', '-6']\n nums = 6\n if use_time <= 6:\n times_str = []\n nums = use_time - 1\n for i in range(0, nums + 1):\n times_str.append(str(-i))\n data_buy, data_private, data_sell = self.predict_for_history(city,\n model_detail_slug, use_time, mile)\n data_buy = process_unreasonable_history_price(data_buy, nums)\n data_sell = process_unreasonable_history_price(data_sell, nums)\n data_private = process_unreasonable_history_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n\n def future_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=365, mile=2, ret_type='records'):\n \"\"\"\n 计算未来价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='future')\n times_str = ['0', '12', '24', '36']\n nums = 3\n if use_time > 204:\n times_str = []\n nums = int((240 - use_time) / 12)\n for i in range(0, nums + 1):\n times_str.append(str(i * 12))\n data_buy, data_private, data_sell = self.predict_for_future(city,\n model_detail_slug, use_time, mile, len(times_str))\n data_buy = process_unreasonable_future_price(data_buy, nums)\n data_sell = process_unreasonable_future_price(data_sell, nums)\n data_private = process_unreasonable_future_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n",
"step-3": "<mask token>\n\n\ndef process_mile(price, use_time, mile):\n \"\"\"\n mile处理\n \"\"\"\n mile_per_month = mile / use_time\n if mile_per_month < gl.MILE_THRESHOLD_2_5:\n return price + 0.035 * (1 - mile_per_month / gl.MILE_THRESHOLD_2_5\n ) * price\n elif gl.MILE_THRESHOLD_2_5 <= mile_per_month < gl.MILE_THRESHOLD_5:\n return price - (0.04 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.035\n ) * price\n elif gl.MILE_THRESHOLD_5 <= mile_per_month < gl.MILE_THRESHOLD_10:\n return price - (0.05 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.075\n ) * price\n else:\n return price - 0.125 * price\n\n\n<mask token>\n\n\ndef process_unreasonable_history_price(data, nums):\n \"\"\"\n 处理不合理历史价格趋势\n \"\"\"\n if nums == 0:\n return data\n temp = data[1:]\n temp.sort()\n for i, value in enumerate(temp):\n data[i + 1] = temp[i]\n for i in range(0, nums):\n rate = (data[i + 1] - data[i]) / data[i + 1]\n if (data[i] >= data[i + 1]) | (0.003 > rate) | (0.0157 < rate):\n data[i + 1] = int(data[i] * 1.0083)\n return data\n\n\n<mask token>\n\n\ndef process_fill_zero(hedge):\n temp = hedge\n if len(hedge) < 18:\n for i in range(0, 18 - len(hedge)):\n temp = '0' + temp\n return temp\n\n\ndef predict_from_db(model_detail_slug, city, use_time):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug_id']\n if use_time % 6 == 0:\n column_num = str(int(use_time / 6) - 1)\n month_num = 6\n else:\n column_num = str(int(use_time / 6))\n month_num = use_time % 6\n record = db_operate.query_valuate(model_detail_slug_id, city_id,\n column_num, use_time)\n dealer_hedge = str(record.loc[0, 'b2c_year_' + column_num])\n dealer_hedge = process_fill_zero(dealer_hedge)\n dealer_hedge = dealer_hedge[(month_num - 1) * 3:month_num * 3]\n dealer_hedge = int(dealer_hedge) / 1000\n cpersonal_hedge = str(record.loc[0, 'c2c_year_' + column_num])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge)\n cpersonal_hedge = cpersonal_hedge[(month_num - 1) * 3:month_num * 3]\n cpersonal_hedge = int(cpersonal_hedge) / 1000\n return dealer_hedge, cpersonal_hedge\n\n\ndef predict_from_db_history(model_detail_slug, city, use_time):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug_id']\n if use_time % 6 == 0:\n column_num = int(use_time / 6) - 1\n month_num = 6\n else:\n column_num = int(use_time / 6)\n month_num = use_time % 6\n dealer_hedge, cpersonal_hedge = db_operate.query_valuate_history(\n model_detail_slug_id, city_id, column_num, use_time)\n result = []\n if len(dealer_hedge) == 1:\n dealer_hedge = process_fill_zero(dealer_hedge[0])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0])\n for i in range(0, use_time):\n dealer_per = dealer_hedge[i * 3:(i + 1) * 3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per) / 1000, int(cpersonal_per) / \n 1000, use_time])\n result.reverse()\n elif len(dealer_hedge) == 2:\n dealer_hedge = process_fill_zero(dealer_hedge[0]) + process_fill_zero(\n dealer_hedge[1])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0]\n ) + process_fill_zero(cpersonal_hedge[1])\n for i in range(month_num - 1, month_num + 6):\n dealer_per = dealer_hedge[i * 3:(i + 1) * 3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per) / 1000, int(cpersonal_per) / \n 1000, use_time])\n result.reverse()\n return result\n\n\n<mask token>\n\n\ndef process_prices_relate(dealer_price, cpersonal_price):\n \"\"\"\n 人工处理三类价格的相关性\n \"\"\"\n buy = dealer_price\n private = cpersonal_price\n private_buy_rate = (buy - private) / private\n if (private_buy_rate < 0) | (abs(private_buy_rate) > 0.12):\n private = int(buy * (1 - 0.0875))\n sell = int(private * (1 - 0.0525))\n return buy, private, sell\n\n\n<mask token>\n\n\ndef check_params_value(city, model_detail_slug, use_time, mile, category):\n \"\"\"\n 校验参数\n \"\"\"\n if city not in cities:\n raise ApiParamsValueError('city', city, 'Unknown city!')\n if model_detail_slug not in models:\n raise ApiParamsValueError('model_detail_slug', model_detail_slug,\n 'Unknown model!')\n if not isinstance(mile, int) | isinstance(mile, float):\n raise ApiParamsTypeError('mile', mile, 'Mile must be int or float!')\n elif mile < 0:\n raise ApiParamsValueError('mile', mile,\n 'Mile must be greater than zero!')\n if not isinstance(use_time, int):\n raise ApiParamsTypeError('use_time', use_time, 'Use_time must be int!')\n if category == 'valuate':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of Forecast must be in 1-240!')\n elif category == 'history':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of historical trend must be in 1-240!')\n elif category == 'future':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of future trend must be in 1-240!')\n\n\nclass Predict(object):\n\n def __init__(self):\n \"\"\"\n 加载各类匹配表和模型\n \"\"\"\n self.result = []\n self.valuate_model = []\n\n def add_process_intent(self, buy, private, sell, popularity, price_bn):\n \"\"\"\n 根据交易方式修正预测值\n \"\"\"\n self.result = result_map.copy()\n self.result.loc[self.result['intent'] == 'buy', 'predict_price'] = buy\n self.result.loc[self.result['intent'] == 'private', 'predict_price'\n ] = private\n self.result.loc[self.result['intent'] == 'sell', 'predict_price'\n ] = sell\n self.result['predict_price'] = self.result['predict_price'].fillna(buy)\n self.result['popularity'] = popularity\n self.result['profit_rate'] = self.result.apply(process_profit_rate,\n axis=1)\n self.result['buy_profit_rate'] = self.result.apply(\n process_buy_profit_rate, axis=1)\n self.result['predict_price'] = self.result['predict_price'\n ] / self.result['buy_profit_rate']\n self.result['predict_price'] = self.result['profit_rate'\n ] * self.result['predict_price']\n self.result = cal_intent_condition(self.result.predict_price.values,\n price_bn)\n\n def follow_process(self, use_time, mile, price_bn, dealer_hedge,\n cpersonal_hedge, province, model_slug, model_detail_slug):\n \"\"\"\n 后续跟进处理\n \"\"\"\n dealer_price, cpersonal_price = (dealer_hedge * price_bn, \n cpersonal_hedge * price_bn)\n dealer_price = process_mile(dealer_price, use_time, mile)\n cpersonal_price = process_mile(cpersonal_price, use_time, mile)\n buy, private, sell = process_prices_relate(dealer_price,\n cpersonal_price)\n index = str(model_slug) + '_' + str(province)\n if index in province_popularity_index:\n popularity = province_popularity_map.loc[index, 'popularity']\n else:\n popularity = 'C'\n rate = process_adjust_profit(model_detail_slug, popularity)\n buy, private, sell = buy * (1 + rate), private * (1 + rate), sell * (\n 1 + rate)\n return buy, private, sell, popularity\n\n def predict(self, city='深圳', model_detail_slug='model_25023_cs',\n use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 预测返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n dealer_hedge, cpersonal_hedge = predict_from_db(final_model_detail_slug\n , city, use_time)\n buy, private, sell, popularity = self.follow_process(use_time, mile,\n price_bn, dealer_hedge, cpersonal_hedge, province, model_slug,\n model_detail_slug)\n self.add_process_intent(buy, private, sell, popularity, price_bn)\n if ret_type == 'records':\n return self.result.to_dict('records')\n else:\n return self.result\n\n def predict_for_history(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_history(final_model_detail_slug, city,\n use_time)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def predict_for_future(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, times=3):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_future(final_model_detail_slug, city,\n use_time, times)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def history_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 计算历史价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='history')\n times_str = ['0', '-1', '-2', '-3', '-4', '-5', '-6']\n nums = 6\n if use_time <= 6:\n times_str = []\n nums = use_time - 1\n for i in range(0, nums + 1):\n times_str.append(str(-i))\n data_buy, data_private, data_sell = self.predict_for_history(city,\n model_detail_slug, use_time, mile)\n data_buy = process_unreasonable_history_price(data_buy, nums)\n data_sell = process_unreasonable_history_price(data_sell, nums)\n data_private = process_unreasonable_history_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n\n def future_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=365, mile=2, ret_type='records'):\n \"\"\"\n 计算未来价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='future')\n times_str = ['0', '12', '24', '36']\n nums = 3\n if use_time > 204:\n times_str = []\n nums = int((240 - use_time) / 12)\n for i in range(0, nums + 1):\n times_str.append(str(i * 12))\n data_buy, data_private, data_sell = self.predict_for_future(city,\n model_detail_slug, use_time, mile, len(times_str))\n data_buy = process_unreasonable_future_price(data_buy, nums)\n data_sell = process_unreasonable_future_price(data_sell, nums)\n data_private = process_unreasonable_future_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n",
"step-4": "<mask token>\n\n\ndef get_profit_rate(intent, popularity):\n \"\"\"\n 获取畅销系数\n \"\"\"\n profits = gl.PROFITS\n profit = profits[popularity]\n if intent == 'sell':\n profit_rate = 1 - profit[0] - profit[1]\n elif intent == 'buy':\n profit_rate = 1 - profit[0]\n elif intent == 'release':\n profit_rate = 1\n elif intent == 'private':\n profit_rate = 1 - profit[0] - profit[2]\n elif intent == 'lowest':\n profit_rate = 1 - profit[0] - profit[1] - profit[3]\n elif intent == 'cpo':\n profit_rate = 1 - profit[0] - profit[8]\n elif intent == 'replace':\n profit_rate = 1 - profit[0] - profit[4]\n elif intent == 'auction':\n profit_rate = 1 - profit[0] - profit[5]\n elif intent == 'avg-buy':\n profit_rate = 1 - profit[0] - profit[7]\n elif intent == 'avg-sell':\n profit_rate = 1 - profit[0] - profit[6]\n return profit_rate\n\n\ndef cal_intent_condition(prices, price_bn):\n \"\"\"\n 计算所有交易方式的4个级别车况价\n \"\"\"\n if prices[2] * 1.03 > price_bn:\n rate = prices[2] * 1.03 / price_bn\n prices = prices / rate\n df1 = pd.DataFrame(prices)\n df2 = pd.DataFrame([gl.CAR_CONDITION_COEFFICIENT_VALUES])\n all_map = df1.dot(df2)\n all_map.columns = ['excellent', 'good', 'fair', 'bad']\n all_map['intent'] = pd.Series(gl.INTENT_TYPE).values\n all_map = all_map.loc[:, ['intent', 'excellent', 'good', 'fair', 'bad']]\n all_map[['excellent', 'good', 'fair', 'bad']] = all_map[['excellent',\n 'good', 'fair', 'bad']].astype(int)\n return all_map\n\n\ndef process_mile(price, use_time, mile):\n \"\"\"\n mile处理\n \"\"\"\n mile_per_month = mile / use_time\n if mile_per_month < gl.MILE_THRESHOLD_2_5:\n return price + 0.035 * (1 - mile_per_month / gl.MILE_THRESHOLD_2_5\n ) * price\n elif gl.MILE_THRESHOLD_2_5 <= mile_per_month < gl.MILE_THRESHOLD_5:\n return price - (0.04 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.035\n ) * price\n elif gl.MILE_THRESHOLD_5 <= mile_per_month < gl.MILE_THRESHOLD_10:\n return price - (0.05 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.075\n ) * price\n else:\n return price - 0.125 * price\n\n\n<mask token>\n\n\ndef process_unreasonable_history_price(data, nums):\n \"\"\"\n 处理不合理历史价格趋势\n \"\"\"\n if nums == 0:\n return data\n temp = data[1:]\n temp.sort()\n for i, value in enumerate(temp):\n data[i + 1] = temp[i]\n for i in range(0, nums):\n rate = (data[i + 1] - data[i]) / data[i + 1]\n if (data[i] >= data[i + 1]) | (0.003 > rate) | (0.0157 < rate):\n data[i + 1] = int(data[i] * 1.0083)\n return data\n\n\n<mask token>\n\n\ndef process_fill_zero(hedge):\n temp = hedge\n if len(hedge) < 18:\n for i in range(0, 18 - len(hedge)):\n temp = '0' + temp\n return temp\n\n\ndef predict_from_db(model_detail_slug, city, use_time):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug_id']\n if use_time % 6 == 0:\n column_num = str(int(use_time / 6) - 1)\n month_num = 6\n else:\n column_num = str(int(use_time / 6))\n month_num = use_time % 6\n record = db_operate.query_valuate(model_detail_slug_id, city_id,\n column_num, use_time)\n dealer_hedge = str(record.loc[0, 'b2c_year_' + column_num])\n dealer_hedge = process_fill_zero(dealer_hedge)\n dealer_hedge = dealer_hedge[(month_num - 1) * 3:month_num * 3]\n dealer_hedge = int(dealer_hedge) / 1000\n cpersonal_hedge = str(record.loc[0, 'c2c_year_' + column_num])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge)\n cpersonal_hedge = cpersonal_hedge[(month_num - 1) * 3:month_num * 3]\n cpersonal_hedge = int(cpersonal_hedge) / 1000\n return dealer_hedge, cpersonal_hedge\n\n\ndef predict_from_db_history(model_detail_slug, city, use_time):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug_id']\n if use_time % 6 == 0:\n column_num = int(use_time / 6) - 1\n month_num = 6\n else:\n column_num = int(use_time / 6)\n month_num = use_time % 6\n dealer_hedge, cpersonal_hedge = db_operate.query_valuate_history(\n model_detail_slug_id, city_id, column_num, use_time)\n result = []\n if len(dealer_hedge) == 1:\n dealer_hedge = process_fill_zero(dealer_hedge[0])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0])\n for i in range(0, use_time):\n dealer_per = dealer_hedge[i * 3:(i + 1) * 3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per) / 1000, int(cpersonal_per) / \n 1000, use_time])\n result.reverse()\n elif len(dealer_hedge) == 2:\n dealer_hedge = process_fill_zero(dealer_hedge[0]) + process_fill_zero(\n dealer_hedge[1])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0]\n ) + process_fill_zero(cpersonal_hedge[1])\n for i in range(month_num - 1, month_num + 6):\n dealer_per = dealer_hedge[i * 3:(i + 1) * 3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per) / 1000, int(cpersonal_per) / \n 1000, use_time])\n result.reverse()\n return result\n\n\n<mask token>\n\n\ndef process_prices_relate(dealer_price, cpersonal_price):\n \"\"\"\n 人工处理三类价格的相关性\n \"\"\"\n buy = dealer_price\n private = cpersonal_price\n private_buy_rate = (buy - private) / private\n if (private_buy_rate < 0) | (abs(private_buy_rate) > 0.12):\n private = int(buy * (1 - 0.0875))\n sell = int(private * (1 - 0.0525))\n return buy, private, sell\n\n\n<mask token>\n\n\ndef check_params_value(city, model_detail_slug, use_time, mile, category):\n \"\"\"\n 校验参数\n \"\"\"\n if city not in cities:\n raise ApiParamsValueError('city', city, 'Unknown city!')\n if model_detail_slug not in models:\n raise ApiParamsValueError('model_detail_slug', model_detail_slug,\n 'Unknown model!')\n if not isinstance(mile, int) | isinstance(mile, float):\n raise ApiParamsTypeError('mile', mile, 'Mile must be int or float!')\n elif mile < 0:\n raise ApiParamsValueError('mile', mile,\n 'Mile must be greater than zero!')\n if not isinstance(use_time, int):\n raise ApiParamsTypeError('use_time', use_time, 'Use_time must be int!')\n if category == 'valuate':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of Forecast must be in 1-240!')\n elif category == 'history':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of historical trend must be in 1-240!')\n elif category == 'future':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of future trend must be in 1-240!')\n\n\nclass Predict(object):\n\n def __init__(self):\n \"\"\"\n 加载各类匹配表和模型\n \"\"\"\n self.result = []\n self.valuate_model = []\n\n def add_process_intent(self, buy, private, sell, popularity, price_bn):\n \"\"\"\n 根据交易方式修正预测值\n \"\"\"\n self.result = result_map.copy()\n self.result.loc[self.result['intent'] == 'buy', 'predict_price'] = buy\n self.result.loc[self.result['intent'] == 'private', 'predict_price'\n ] = private\n self.result.loc[self.result['intent'] == 'sell', 'predict_price'\n ] = sell\n self.result['predict_price'] = self.result['predict_price'].fillna(buy)\n self.result['popularity'] = popularity\n self.result['profit_rate'] = self.result.apply(process_profit_rate,\n axis=1)\n self.result['buy_profit_rate'] = self.result.apply(\n process_buy_profit_rate, axis=1)\n self.result['predict_price'] = self.result['predict_price'\n ] / self.result['buy_profit_rate']\n self.result['predict_price'] = self.result['profit_rate'\n ] * self.result['predict_price']\n self.result = cal_intent_condition(self.result.predict_price.values,\n price_bn)\n\n def follow_process(self, use_time, mile, price_bn, dealer_hedge,\n cpersonal_hedge, province, model_slug, model_detail_slug):\n \"\"\"\n 后续跟进处理\n \"\"\"\n dealer_price, cpersonal_price = (dealer_hedge * price_bn, \n cpersonal_hedge * price_bn)\n dealer_price = process_mile(dealer_price, use_time, mile)\n cpersonal_price = process_mile(cpersonal_price, use_time, mile)\n buy, private, sell = process_prices_relate(dealer_price,\n cpersonal_price)\n index = str(model_slug) + '_' + str(province)\n if index in province_popularity_index:\n popularity = province_popularity_map.loc[index, 'popularity']\n else:\n popularity = 'C'\n rate = process_adjust_profit(model_detail_slug, popularity)\n buy, private, sell = buy * (1 + rate), private * (1 + rate), sell * (\n 1 + rate)\n return buy, private, sell, popularity\n\n def predict(self, city='深圳', model_detail_slug='model_25023_cs',\n use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 预测返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n dealer_hedge, cpersonal_hedge = predict_from_db(final_model_detail_slug\n , city, use_time)\n buy, private, sell, popularity = self.follow_process(use_time, mile,\n price_bn, dealer_hedge, cpersonal_hedge, province, model_slug,\n model_detail_slug)\n self.add_process_intent(buy, private, sell, popularity, price_bn)\n if ret_type == 'records':\n return self.result.to_dict('records')\n else:\n return self.result\n\n def predict_for_history(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_history(final_model_detail_slug, city,\n use_time)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def predict_for_future(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, times=3):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_future(final_model_detail_slug, city,\n use_time, times)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def history_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 计算历史价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='history')\n times_str = ['0', '-1', '-2', '-3', '-4', '-5', '-6']\n nums = 6\n if use_time <= 6:\n times_str = []\n nums = use_time - 1\n for i in range(0, nums + 1):\n times_str.append(str(-i))\n data_buy, data_private, data_sell = self.predict_for_history(city,\n model_detail_slug, use_time, mile)\n data_buy = process_unreasonable_history_price(data_buy, nums)\n data_sell = process_unreasonable_history_price(data_sell, nums)\n data_private = process_unreasonable_history_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n\n def future_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=365, mile=2, ret_type='records'):\n \"\"\"\n 计算未来价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='future')\n times_str = ['0', '12', '24', '36']\n nums = 3\n if use_time > 204:\n times_str = []\n nums = int((240 - use_time) / 12)\n for i in range(0, nums + 1):\n times_str.append(str(i * 12))\n data_buy, data_private, data_sell = self.predict_for_future(city,\n model_detail_slug, use_time, mile, len(times_str))\n data_buy = process_unreasonable_future_price(data_buy, nums)\n data_sell = process_unreasonable_future_price(data_sell, nums)\n data_private = process_unreasonable_future_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n",
"step-5": "from valuate.predict import *\n\n\ndef get_profit_rate(intent, popularity):\n \"\"\"\n 获取畅销系数\n \"\"\"\n # 按畅销程度分级,各交易方式相比于标价的固定比例\n profits = gl.PROFITS\n profit = profits[popularity]\n # 计算各交易方式的价格相比于标价的固定比例\n if intent == 'sell':\n # 商家收购价相比加权平均价的比例\n profit_rate = 1 - profit[0] - profit[1]\n elif intent == 'buy':\n # 商家真实售价相比加权平均价的比例\n profit_rate = 1 - profit[0]\n elif intent == 'release':\n # 建议标价相比加权平均价的比例\n profit_rate = 1\n elif intent == 'private':\n # C2C价格相比加权平均价的比例\n profit_rate = 1 - profit[0] - profit[2]\n elif intent == 'lowest':\n # 最低成交价相比加权平均价的比例\n profit_rate = 1 - profit[0] - profit[1] - profit[3]\n elif intent == 'cpo':\n # 认证二手车价相比加权平均价的差异比例\n profit_rate = 1 - profit[0] - profit[8]\n elif intent == 'replace':\n # 4S店置换价相比加权平均价的比例\n profit_rate = 1 - profit[0] - profit[4]\n elif intent == 'auction':\n # 拍卖价相比加权平均价的差异比例\n profit_rate = 1 - profit[0] - profit[5]\n elif intent == 'avg-buy':\n # 平均买车价相比加权平均价的差异比例\n profit_rate = 1 - profit[0] - profit[7]\n elif intent == 'avg-sell':\n # 平均卖车价价相比加权平均价的差异比例\n profit_rate = 1 - profit[0] - profit[6]\n return profit_rate\n\n\ndef cal_intent_condition(prices, price_bn):\n \"\"\"\n 计算所有交易方式的4个级别车况价\n \"\"\"\n if(prices[2] * 1.03) > price_bn:\n rate = (prices[2] * 1.03) / price_bn\n prices = prices / rate\n\n df1 = pd.DataFrame(prices)\n df2 = pd.DataFrame([gl.CAR_CONDITION_COEFFICIENT_VALUES])\n all_map = df1.dot(df2)\n all_map.columns = ['excellent', 'good', 'fair', 'bad']\n all_map['intent'] = pd.Series(gl.INTENT_TYPE).values\n all_map = all_map.loc[:, ['intent', 'excellent', 'good', 'fair', 'bad']]\n all_map[['excellent', 'good', 'fair', 'bad']] = all_map[['excellent', 'good', 'fair', 'bad']].astype(int)\n return all_map\n\n\ndef process_mile(price, use_time, mile):\n \"\"\"\n mile处理\n \"\"\"\n # 正常行驶的车辆以一年2.5万公里为正常基数,低于2.5万公里的价格的浮动在+3.5%以内\n # 大于2.5万公里的若每年的平均行驶里程大于2.5万公里小于5万公里价格浮动在-3.5-7.5%\n # 若年平均形式里程大于5万公里及以上影响价格在-7.5-12.5%之间\n mile_per_month = mile / use_time\n if mile_per_month < gl.MILE_THRESHOLD_2_5:\n return price + 0.035 * (1 - mile_per_month/gl.MILE_THRESHOLD_2_5) * price\n elif gl.MILE_THRESHOLD_2_5 <= mile_per_month < gl.MILE_THRESHOLD_5:\n return price - (0.04 * (mile_per_month/gl.MILE_THRESHOLD_5)+0.035) * price\n elif gl.MILE_THRESHOLD_5 <= mile_per_month < gl.MILE_THRESHOLD_10:\n return price - (0.05 * (mile_per_month/gl.MILE_THRESHOLD_5)+0.075) * price\n else:\n return price - 0.125 * price\n\n\ndef process_profit_rate(df):\n \"\"\"\n 畅销系数处理\n \"\"\"\n return get_profit_rate(df['intent'], df['popularity'])\n\n\ndef process_buy_profit_rate(df):\n \"\"\"\n 畅销系数处理\n \"\"\"\n return get_profit_rate(df['intent_source'], df['popularity'])\n\n\ndef process_unreasonable_history_price(data, nums):\n \"\"\"\n 处理不合理历史价格趋势\n \"\"\"\n if nums == 0:\n return data\n\n temp = data[1:]\n temp.sort()\n for i, value in enumerate(temp):\n data[i+1] = temp[i]\n\n for i in range(0, nums):\n rate = (data[i + 1] - data[i]) / data[i + 1]\n if (data[i] >= data[i + 1]) | (0.003 > rate) | (0.0157 < rate):\n data[i + 1] = int(data[i] * 1.0083)\n\n return data\n\n\ndef process_unreasonable_future_price(data, nums):\n \"\"\"\n 处理不合理未来价格趋势\n \"\"\"\n temp = data[1:]\n temp.sort(reverse=True)\n for i, value in enumerate(temp):\n data[i+1] = temp[i]\n\n for i in range(0, nums):\n rate = (data[i] - data[i + 1]) / data[i]\n if (data[i] <= data[i + 1]) | (0.036 > rate) | (0.188 < rate):\n data[i + 1] = int(data[i] * 0.9)\n\n return data\n\n\ndef process_fill_zero(hedge):\n temp = hedge\n if len(hedge) < 18:\n for i in range(0, (18-len(hedge))):\n temp = '0'+temp\n return temp\n\n\ndef predict_from_db(model_detail_slug, city, use_time):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n # 查找city和model_detail_slug编号\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug_id']\n # 计算查询字段编号和月编号\n if (use_time % 6) == 0:\n column_num = str(int(use_time / 6) - 1)\n month_num = 6\n else:\n column_num = str(int(use_time / 6))\n month_num = use_time % 6\n # 查询\n record = db_operate.query_valuate(model_detail_slug_id, city_id, column_num, use_time)\n # 查找对应值\n dealer_hedge = str(record.loc[0, 'b2c_year_'+column_num])\n dealer_hedge = process_fill_zero(dealer_hedge)\n dealer_hedge = dealer_hedge[(month_num-1)*3:month_num*3]\n dealer_hedge = int(dealer_hedge) / 1000\n cpersonal_hedge = str(record.loc[0, 'c2c_year_'+column_num])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge)\n cpersonal_hedge = cpersonal_hedge[(month_num-1)*3:month_num*3]\n cpersonal_hedge = int(cpersonal_hedge) / 1000\n return dealer_hedge, cpersonal_hedge\n\n\ndef predict_from_db_history(model_detail_slug, city, use_time):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n # 查找city和model_detail_slug编号\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug_id']\n # 计算查询字段编号和月编号\n if (use_time % 6) == 0:\n column_num = int(use_time / 6) - 1\n month_num = 6\n else:\n column_num = int(use_time / 6)\n month_num = use_time % 6\n # 查询\n dealer_hedge, cpersonal_hedge = db_operate.query_valuate_history(model_detail_slug_id, city_id, column_num, use_time)\n # 查找对应值\n result = []\n if len(dealer_hedge) == 1:\n dealer_hedge = process_fill_zero(dealer_hedge[0])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0])\n for i in range(0, use_time):\n dealer_per = dealer_hedge[i*3:(i+1)*3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per)/1000, int(cpersonal_per)/1000, use_time])\n result.reverse()\n elif len(dealer_hedge) == 2:\n dealer_hedge = process_fill_zero(dealer_hedge[0])+process_fill_zero(dealer_hedge[1])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0])+process_fill_zero(cpersonal_hedge[1])\n for i in range(month_num-1, month_num+6):\n dealer_per = dealer_hedge[i*3:(i+1)*3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per)/1000, int(cpersonal_per)/1000, use_time])\n result.reverse()\n return result\n\n\ndef predict_from_db_future(model_detail_slug, city, use_time, times):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n # 查找city和model_detail_slug编号\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug_id']\n # 计算查询字段编号和月编号\n if (use_time % 6) == 0:\n column_num = int(use_time / 6) - 1\n month_num = 6\n else:\n column_num = int(use_time / 6)\n month_num = use_time % 6\n # 查询\n record = db_operate.query_valuate_future(model_detail_slug_id, city_id)\n # 查找对应值\n result = []\n for i in range(0, times):\n dealer_hedge = str(record.loc[0, 'b2c_year_' + str(column_num+i*2)])\n dealer_hedge = process_fill_zero(dealer_hedge)\n dealer_hedge = dealer_hedge[(month_num - 1) * 3:month_num * 3]\n dealer_hedge = int(dealer_hedge) / 1000\n cpersonal_hedge = str(record.loc[0, 'c2c_year_' + str(column_num+i*2)])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge)\n cpersonal_hedge = cpersonal_hedge[(month_num - 1) * 3:month_num * 3]\n cpersonal_hedge = int(cpersonal_hedge) / 1000\n result.append([dealer_hedge, cpersonal_hedge, use_time+i*12])\n return result\n\n\ndef process_prices_relate(dealer_price, cpersonal_price):\n \"\"\"\n 人工处理三类价格的相关性\n \"\"\"\n buy = dealer_price\n private = cpersonal_price\n # 计算buy与private的比例关系\n private_buy_rate = (buy - private) / private\n # 人工处理预测不合理的三类价格\n if (private_buy_rate < 0) | (abs(private_buy_rate) > 0.12):\n private = int(buy * (1 - 0.0875))\n\n sell = int(private * (1 - 0.0525))\n return buy, private, sell\n\n\ndef process_adjust_profit(model_detail_slug, popularity):\n \"\"\"\n 调整值调整\n \"\"\"\n index = str(model_detail_slug)+'_'+str(popularity)\n if index in model_detail_slug_popularity_index:\n rate = adjust_profit.loc[index, 'rate']\n else:\n rate = 0\n return rate\n\n\ndef check_params_value(city, model_detail_slug, use_time, mile, category):\n \"\"\"\n 校验参数\n \"\"\"\n # 校验city\n if city not in cities:\n raise ApiParamsValueError('city', city, 'Unknown city!')\n # 校验model\n if model_detail_slug not in models:\n raise ApiParamsValueError('model_detail_slug', model_detail_slug, 'Unknown model!')\n # 校验mile\n if not ((isinstance(mile, int)) | (isinstance(mile, float))):\n raise ApiParamsTypeError('mile', mile, 'Mile must be int or float!')\n elif mile < 0:\n raise ApiParamsValueError('mile', mile, 'Mile must be greater than zero!')\n # 校验use_time\n if not isinstance(use_time, int):\n raise ApiParamsTypeError('use_time', use_time, 'Use_time must be int!')\n if category == 'valuate':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time, 'The use_time of Forecast must be in 1-240!')\n elif category == 'history':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time, 'The use_time of historical trend must be in 1-240!')\n elif category == 'future':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time, 'The use_time of future trend must be in 1-240!')\n\n\nclass Predict(object):\n\n def __init__(self):\n \"\"\"\n 加载各类匹配表和模型\n \"\"\"\n self.result = []\n self.valuate_model = []\n\n def add_process_intent(self, buy, private, sell, popularity, price_bn):\n \"\"\"\n 根据交易方式修正预测值\n \"\"\"\n # 组合结果\n self.result = result_map.copy()\n self.result.loc[(self.result['intent'] == 'buy'), 'predict_price'] = buy\n self.result.loc[(self.result['intent'] == 'private'), 'predict_price'] = private\n self.result.loc[(self.result['intent'] == 'sell'), 'predict_price'] = sell\n self.result['predict_price'] = self.result['predict_price'].fillna(buy)\n\n self.result['popularity'] = popularity\n self.result['profit_rate'] = self.result.apply(process_profit_rate, axis=1)\n self.result['buy_profit_rate'] = self.result.apply(process_buy_profit_rate, axis=1)\n self.result['predict_price'] = self.result['predict_price'] / self.result['buy_profit_rate']\n self.result['predict_price'] = self.result['profit_rate'] * self.result['predict_price']\n\n # 计算所有交易类型\n self.result = cal_intent_condition(self.result.predict_price.values, price_bn)\n\n def follow_process(self, use_time, mile, price_bn, dealer_hedge, cpersonal_hedge, province, model_slug, model_detail_slug):\n \"\"\"\n 后续跟进处理\n \"\"\"\n # 获取价格\n dealer_price, cpersonal_price = dealer_hedge * price_bn, cpersonal_hedge * price_bn\n # 处理mile\n dealer_price = process_mile(dealer_price, use_time, mile)\n cpersonal_price = process_mile(cpersonal_price, use_time, mile)\n # 处理价格之间的相关性\n buy, private, sell = process_prices_relate(dealer_price, cpersonal_price)\n # 获取流行度\n index = str(model_slug) + '_' + str(province)\n if index in province_popularity_index:\n popularity = province_popularity_map.loc[index, 'popularity']\n else:\n popularity = 'C'\n # 进行调整值最终调整\n rate = process_adjust_profit(model_detail_slug, popularity)\n buy, private, sell = buy * (1 + rate), private * (1 + rate), sell * (1 + rate)\n return buy, private, sell, popularity\n\n def predict(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 预测返回\n \"\"\"\n # 校验参数\n check_params_value(city, model_detail_slug, use_time, mile, category='valuate')\n\n # 查找款型对应的新车指导价,调整后的款型\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug']\n # 预测返回保值率\n dealer_hedge, cpersonal_hedge = predict_from_db(final_model_detail_slug, city, use_time)\n buy, private, sell, popularity = self.follow_process(use_time, mile, price_bn, dealer_hedge, cpersonal_hedge, province, model_slug, model_detail_slug)\n # 根据交易方式修正预测值\n self.add_process_intent(buy, private, sell, popularity, price_bn)\n\n if ret_type == 'records':\n return self.result.to_dict('records')\n else:\n return self.result\n\n def predict_for_history(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n # 校验参数\n check_params_value(city, model_detail_slug, use_time, mile, category='valuate')\n\n # 查找款型对应的新车指导价,调整后的款型\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug']\n\n # 预测返回保值率\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_history(final_model_detail_slug, city, use_time)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per, mile, price_bn, dealer_hedge,\n cpersonal_hedge, province, model_slug,\n model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def predict_for_future(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2, times=3):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n # 校验参数\n check_params_value(city, model_detail_slug, use_time, mile, category='valuate')\n\n # 查找款型对应的新车指导价,调整后的款型\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug']\n\n # 预测返回保值率\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_future(final_model_detail_slug, city, use_time, times)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per, mile, price_bn, dealer_hedge,\n cpersonal_hedge, province, model_slug,\n model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def history_price_trend(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 计算历史价格趋势\n \"\"\"\n # 校验参数\n check_params_value(city, model_detail_slug, use_time, mile, category='history')\n # 计算时间\n times_str = ['0', '-1', '-2', '-3', '-4', '-5', '-6']\n nums = 6\n if use_time <= 6:\n times_str = []\n nums = use_time-1\n for i in range(0, nums+1):\n times_str.append(str(-i))\n # 计算车商交易价,车商收购价的历史价格走势\n data_buy, data_private, data_sell = self.predict_for_history(city, model_detail_slug, use_time, mile)\n\n # 处理异常值\n data_buy = process_unreasonable_history_price(data_buy, nums)\n data_sell = process_unreasonable_history_price(data_sell, nums)\n data_private = process_unreasonable_history_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n\n def future_price_trend(self, city='深圳', model_detail_slug='model_25023_cs', use_time=365, mile=2, ret_type='records'):\n \"\"\"\n 计算未来价格趋势\n \"\"\"\n # 校验参数\n check_params_value(city, model_detail_slug, use_time, mile, category='future')\n # 计算时间\n times_str = ['0', '12', '24', '36']\n nums = 3\n if use_time > 204:\n times_str = []\n nums = int((240-use_time) / 12)\n for i in range(0, nums+1):\n times_str.append(str(i*12))\n # 计算个人交易价的未来价格趋势\n data_buy, data_private, data_sell = self.predict_for_future(city, model_detail_slug, use_time, mile, len(times_str))\n\n data_buy = process_unreasonable_future_price(data_buy, nums)\n data_sell = process_unreasonable_future_price(data_sell, nums)\n data_private = process_unreasonable_future_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n\n",
"step-ids": [
12,
15,
16,
18,
25
]
}
|
[
12,
15,
16,
18,
25
] |
import torch, torchvision
import torch.nn.functional as F
import transformers
from transformers import BertTokenizer, BertModel
from transformers.models.bert.modeling_bert import BertPreTrainingHeads
from utils import construct_bert_input, EvaluationDataset, save_json
from fashionbert_evaluator_parser import Evaluation_negpairs, get_all_paired_test_set
import argparse
import numpy as np
from tqdm import tqdm
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class FashionbertEvaluator(transformers.BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.im_to_embedding = torch.nn.Linear(2048, 768)
self.im_to_embedding_norm = torch.nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def text2img_scores(self,
input_ids,
embeds,
att_mask,
embeds_n, # list
att_mask_n, # list
):
"""
INPUTS:
input_ids [1, 448]
embeds: [1, 512, 768]
att_mask: [1, 448]
embeds_n: list with 100 of [1, 512, 768]
att_mask_n: list with 100 of [1, 448]
"""
# Score for positive
query_dict_scores = []
query_scores = []
query_labels = []
score_pos = self.get_scores_and_metrics(
embeds=embeds.to(device),
attention_mask=att_mask.to(device),
labels=input_ids.to(device),
is_paired=torch.tensor(True).to(device),
only_alignment=True,
)
# label = score_pos[1]
score_p = score_pos[0].squeeze()
score_p = score_p[1].detach().item() # confidence that is actually positive
score_pos_dict = {'text': input_ids,
'score': score_p,
'label': True}
query_dict_scores.append(score_pos_dict)
query_scores.append(score_p)
query_labels.append(True)
# Scores for negative
for n in range(len(embeds_n)):
score_neg = self.get_scores_and_metrics(
embeds=embeds_n[n].to(device),
attention_mask=att_mask_n[n].to(device),
labels=input_ids.to(device),
is_paired=torch.tensor(False).to(device),
only_alignment=True,
)
score_n = score_neg[0].squeeze()
score_n = score_n[1].detach().item() # confidence that is actually positive
score_neg_dict = {'text': input_ids,
'score': score_n,
'label': False}
query_dict_scores.append(score_neg_dict)
query_scores.append(score_n)
query_labels.append(False)
S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key=lambda x: x[0], reverse=True)]
return S
def img2text_scores(self, input_ids_p, embeds_p, att_mask_p, input_ids_n, embeds_n, att_mask_n):
"""
INPUTS:
input_ids_p : [1, 448]
embeds_p: [1, 512, 768]
att_mask_p: [1, 448]
input_ids_n: list with 100 of [1, 448]
embeds_n: list with 100 of [1, 512, 768]
att_mask_n: list with 100 of [1, 448]
"""
# Score for positive
query_dict_scores = []
query_scores = []
query_labels = []
score_pos = self.get_scores_and_metrics(
embeds=embeds_p.to(device),
attention_mask=att_mask_p.to(device),
labels=input_ids_p.to(device),
is_paired=torch.tensor(True).to(device),
only_alignment=True,
)
# label = score_pos[1]
score_p = score_pos[0].squeeze()
score_p = score_p[1].detach().item() # confidence that is actually positive
score_pos_dict = {'text': input_ids_p,
'score': score_p,
'label': True}
query_dict_scores.append(score_pos_dict)
query_scores.append(score_p)
query_labels.append(True)
# Scores for negative
for n in range(len(embeds_n)):
score_neg = self.get_scores_and_metrics(
embeds=embeds_n[n].to(device),
attention_mask=att_mask_n[n].to(device),
labels=input_ids_n[n].to(device),
is_paired=torch.tensor(False).to(device),
only_alignment=True,
)
score_n = score_neg[0].squeeze()
score_n = score_n[1].detach().item() # confidence that is actually positive
score_neg_dict = {'text': input_ids_n[n],
'score': score_n,
'label': False}
query_dict_scores.append(score_neg_dict)
query_scores.append(score_n)
query_labels.append(False)
# print(evaluator.tokenizer.convert_ids_to_tokens(ids))
S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key=lambda x: x[0], reverse=True)]
return S
def rank_at_K(self, dict_scores, img2text=True):
logs = ''
if img2text:
l1 = '------ Image 2 Text ------\n'
logs += l1
print(l1)
else:
l2 = '------ Text 2 Image ------\n'
print(l2)
Ks = [1, 5, 10]
for K in Ks:
found = 0
for key, val in dict_scores.items():
tmp_range = K if K < len(val) else len(val)
for i in range(tmp_range):
score, label = val[i]
if label:
found += 1
break
l3 = '------ Rank @ {} = {} ------\n'.format(K, (found / len(dict_scores.keys())))
logs += l3
print(l3)
return logs
def get_scores_and_metrics(
self,
embeds, # text + image embedded
attention_mask, # text + image attention mask
labels=None, # [batch, 448]
is_paired=None, # [batch]
only_alignment=False,
):
batch_size = embeds.shape[0]
seq_length = embeds.shape[1]
hidden_dim = embeds.shape[2]
embeds = embeds.to(device)
attention_mask = attention_mask.to(device)
outputs = self.bert(inputs_embeds=embeds,
attention_mask=attention_mask,
return_dict=True)
sequence_output = outputs.last_hidden_state # [batch, seq_length, hidden_size]
pooler_output = outputs.pooler_output # [batch_size, hidden_size] last layer of hidden-state of first token (CLS) + linear layer + tanh
# hidden states corresponding to the text part
text_output = sequence_output[:, :labels.shape[1], :] # [batch, 448, 768]
# hidden states corresponding to the image part
image_output = sequence_output[:, labels.shape[1]:, :] # [batch, 64, 768]
### FOR TEXT
# Predict the masked text tokens and alignment scores (whether image and text match)
prediction_scores, alignment_scores = self.cls(text_output, pooler_output)
# prediction score is [batch, 448, vocab_size = 30522]
# aligment score is [batch, 2] 2 with logits corresponding to 1 and 0
if only_alignment:
return alignment_scores, is_paired
text_evaluator = {'text_pred_logits': prediction_scores,
'text_labels': labels}
alignment_evaluator = {'alignment_logits': alignment_scores,
'alignment_labels': is_paired}
text_acc, alig_acc = self.accuracy_scores(text_evaluator, alignment_evaluator)
return text_acc, alig_acc
def accuracy_scores(self, text_evaluator, alignment_evaluator):
"""
Text evaluator: dictionary with preds and labels (aligned)
Image evaluator: dictionary with image output and image patches (aligned)
"""
# Text
text_pred_logits = text_evaluator['text_pred_logits'] # [num_aligned, 448, vocab_size]
text_labels = text_evaluator['text_labels'] # [num_aligned, 448]
text_preds_logits = text_pred_logits.detach().cpu().numpy()
text_labels = text_labels.cpu().numpy().flatten()
text_preds = np.argmax(text_preds_logits, axis=2).flatten() # [num_algined, 448]
# Alignment
alig_pred_logits = alignment_evaluator['alignment_logits'] # [1, 2]
alig_labels = alignment_evaluator['alignment_labels'] # [2]
alig_pred_logits = alig_pred_logits.detach().cpu().numpy()
alig_labels = np.asarray([alig_labels])
# alig_labels = alig_labels.double().cpu().numpy().flatten()
alig_preds = np.argmax(alig_pred_logits, axis=1).flatten() # [1, 2]
text_acc = accuracy_score(text_labels, text_preds)
alig_acc = accuracy_score(alig_labels, alig_preds)
return text_acc, alig_acc
def image2text(patches, neg_patches, input_ids, is_paired, attention_mask, neg_input_ids, neg_attention_mask,
evaluator, random_patches):
"""
image2text retrieval:
Query = Image
Paired with: 1 positive text, 100 negative texts
"""
im_seq_len = patches.shape[1]
bs = input_ids.shape[0]
len_neg_inputs = neg_input_ids.shape[1]
embeds = construct_bert_input(patches, input_ids, evaluator, device=device, random_patches=random_patches)
attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] - input_ids.shape[1]), value=1)
# NEGATIVE SAMPLE # [batch, 100, 448]
all_embeds_neg = []
all_att_mask = []
all_neg_inputs = []
for j in range(len_neg_inputs):
neg_input_id_sample = neg_input_ids[:, j, :] # [1, 448]
neg_attention_mask_sample = neg_attention_mask[:, j, :]
embeds_neg = construct_bert_input(patches, neg_input_id_sample, evaluator, device=device, random_patches=random_patches)
attention_mask_neg = F.pad(neg_attention_mask_sample, (0, embeds_neg.shape[1] - neg_input_id_sample.shape[1]),
value=1)
all_embeds_neg.append(embeds_neg)
all_att_mask.append(attention_mask_neg)
all_neg_inputs.append(neg_input_id_sample.detach())
# Now I have all joint embeddings for 1 positive sample and 100 neg samples
all_scores_query = evaluator.img2text_scores(
input_ids_p=input_ids,
embeds_p=embeds,
att_mask_p=attention_mask_mm,
input_ids_n=all_neg_inputs,
embeds_n=all_embeds_neg,
att_mask_n=all_att_mask)
# Accuracy: only in positive example
txt_acc, alig_acc = evaluator.get_scores_and_metrics(
embeds, # text + image embedded
attention_mask_mm,
labels=input_ids, # [batch, 448]
is_paired=is_paired, # [batch]
only_alignment=False,
)
return all_scores_query, txt_acc, alig_acc
def text2image(patches, neg_patches, input_ids, is_paired, attention_mask, neg_input_ids, neg_attention_mask,
evaluator, random_patches):
"""
text2image retrieval:
Query = Text
Paired with: 1 positive image, 100 negative images
"""
im_seq_len = patches.shape[1]
bs = input_ids.shape[0]
len_neg_inputs = neg_input_ids.shape[1]
# before constructing bert, att mask is 448 long
# POSITIVE IMAGE
embeds = construct_bert_input(patches, input_ids, evaluator, device=device, random_patches=random_patches)
attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] - input_ids.shape[1]), value=1) # [1, 512]
# NEGATIVE SAMPLES
all_embeds_neg = []
all_att_mask = []
for p in range(len_neg_inputs):
neg_patches_sample = neg_patches[:, p, :, :]
embeds_neg = construct_bert_input(neg_patches_sample, input_ids, evaluator, device=device, random_patches=random_patches)
attention_mask_neg = F.pad(attention_mask, (0, embeds_neg.shape[1] - input_ids.shape[1]), value=1)
all_embeds_neg.append(embeds_neg)
all_att_mask.append(attention_mask_neg)
# Now I have all joint embeddings for 1 positive sample and 100 neg samples
all_scores_query = evaluator.text2img_scores(
input_ids=input_ids,
embeds=embeds,
att_mask=attention_mask_mm,
embeds_n=all_embeds_neg, # list
att_mask_n=all_att_mask) # list
# Accuracy: only in positive example
txt_acc, alig_acc = evaluator.get_scores_and_metrics(
embeds, # text + image embedded
attention_mask_mm, # [batch,
labels=input_ids, # [batch, 448]
is_paired=is_paired, # [batch]
only_alignment=False,
)
return all_scores_query, txt_acc, alig_acc
def test(dataset, device, save_file_name, pretrained_model=None, random_patches=False):
torch.cuda.empty_cache()
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=1,
shuffle=False,
)
if pretrained_model != None:
evaluator = FashionbertEvaluator.from_pretrained(pretrained_model, return_dict=True)
else:
evaluator = FashionbertEvaluator.from_pretrained('bert-base-uncased', return_dict=True)
evaluator.to(device)
evaluator.eval()
query_dict_im2txt = {}
query_dict_txt2im = {}
running_acc_alignment_im2txt = 0.0
running_acc_pred_im2txt = 0.0
running_acc_alignment_txt2im = 0.0
running_acc_pred_txt2im = 0.0
with torch.no_grad():
for i, (
patches, neg_patches, input_ids, attention_mask, neg_input_ids, neg_attention_mask, img_name) in enumerate(
tqdm(dataloader)):
# ****** Shapes ********
# input_ids shape: [1, 448]
# neg_input_ids shape: [1, NUM_SAMPLES=100, 448]
# neg_patches: [1, NUM_SAMPLES=100, 64, 2048]
# IMAGE 2 TEXT
is_paired = 1.
# print('im2text..')
im2txt_query_scores, im2txt_pred_acc, im2txt_alig_acc = image2text(patches, neg_patches, input_ids,
is_paired, attention_mask,
neg_input_ids, neg_attention_mask,
evaluator, random_patches)
# print('done')
# Accuracies
running_acc_pred_im2txt += im2txt_pred_acc
running_acc_alignment_im2txt += im2txt_alig_acc
# For Rank @ K
query_dict_im2txt[img_name[0]] = im2txt_query_scores
# TEXT 2 IMAGE
# print('txt2img..')
txt2im_query_scores, txt2im_pred_acc, txt2im_alig_acc = text2image(patches, neg_patches, input_ids,
is_paired, attention_mask,
neg_input_ids, neg_attention_mask,
evaluator, random_patches)
# print('done')
# Accuracies
running_acc_pred_txt2im += txt2im_pred_acc
running_acc_alignment_txt2im += txt2im_alig_acc
# For Rank @ K
query_dict_txt2im[img_name[0]] = txt2im_query_scores
im2txt_test_set_accuracy_pred = (running_acc_pred_im2txt / len(dataloader))
im2txt_test_set_accuracy_alig = (running_acc_alignment_im2txt / len(dataloader))
txt2im_test_set_accuracy_pred = (running_acc_pred_txt2im / len(dataloader))
txt2im_test_set_accuracy_alig = (running_acc_alignment_txt2im / len(dataloader))
print()
results = ''
log1 = '---- IMAGE 2 TEXT EVALUATIONS ---------------------\n'
log2 = evaluator.rank_at_K(query_dict_im2txt, True)
log3 = '---- Accuracy in token predictions: {} -----\n'.format(im2txt_test_set_accuracy_pred)
log4 = '---- Accuracy in text-image alignment: {} -----\n'.format(im2txt_test_set_accuracy_alig)
print(log1)
print(log2)
print(log3)
print(log4)
print()
log5 = '---- TEXT 2 IMAGE EVALUATIONS ---------------------\n'
log6 = evaluator.rank_at_K(query_dict_txt2im, False)
log7 = '---- Accuracy in token predictions: {} -----\n'.format(txt2im_test_set_accuracy_pred)
log8 = '---- Accuracy in text-image alignment: {} -----\n'.format(txt2im_test_set_accuracy_alig)
print(log5)
print(log6)
print(log7)
print(log8)
results += log1
results += log2
results += log3
results += log4
results += log5
results += log6
results += log7
results += log8
save_json(save_file_name, results)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluate FashionBert')
parser.add_argument('--path_to_train_dataset', help='Absolute path to .pkl file used for training')
parser.add_argument('--path_to_pretrained_model', help='Path to pretrained model', default=None)
parser.add_argument('--save_test_set', help='Name to save test set .pkl', default='test_set.pkl')
parser.add_argument('--save_results_name', help='Name to save file with results', default='results.json')
parser.add_argument('--random_patches', help='using random_patches True or False', default=False)
args = parser.parse_args()
# 1) Builds the 1000 sample dataset. This corresponds to the fashionibert_evaluator_parser file
print('Processing the dataset...')
dataset = EvaluationDataset(args.path_to_train_dataset)
# savefile_path = '../../../__fashionbert_trained/fashionbert_vanilla_adaptive/evaluation_set_fashionbert_vanilla.pkl'
print('Done!')
print('\nGetting aligned pairs...')
get_all_paired_test_set(dataset, args.save_test_set, num_samples=1000)
# print('Done!')
# 2) Evaluate-
# eval_set_path = '../../../__fashionbert_trained/fashionbert_vanilla_adaptive/evaluation_set_fashionbert_vanilla.pkl'
# path_to_trained_model = '../../../__fashionbert_trained/fashionbert_vanilla_adaptive/'
# path_to_save_json = '../../../__fashionbert_trained/fashionbert_vanilla_adaptive/results.json'
print('Loading dataset...')
dataset = Evaluation_negpairs(args.save_test_set)
print('Starting evaluation...')
# test(dataset, device, args.num_subsamples, args.save_file_name, args.path_to_pretrained_model)
test(dataset, device, args.save_results_name, pretrained_model=args.path_to_pretrained_model, random_patches=args.random_patches)
print('Done!!!')
|
normal
|
{
"blob_id": "7a01bffa5d7f0d5ecff57c97478f2cf5e9a27538",
"index": 1210,
"step-1": "<mask token>\n\n\nclass FashionbertEvaluator(transformers.BertPreTrainedModel):\n\n def __init__(self, config):\n super().__init__(config)\n self.bert = BertModel(config)\n self.im_to_embedding = torch.nn.Linear(2048, 768)\n self.im_to_embedding_norm = torch.nn.LayerNorm(config.hidden_size,\n eps=config.layer_norm_eps)\n self.cls = BertPreTrainingHeads(config)\n self.init_weights()\n\n def text2img_scores(self, input_ids, embeds, att_mask, embeds_n, att_mask_n\n ):\n \"\"\"\n INPUTS:\n input_ids [1, 448]\n embeds: [1, 512, 768]\n att_mask: [1, 448]\n embeds_n: list with 100 of [1, 512, 768]\n att_mask_n: list with 100 of [1, 448]\n \"\"\"\n query_dict_scores = []\n query_scores = []\n query_labels = []\n score_pos = self.get_scores_and_metrics(embeds=embeds.to(device),\n attention_mask=att_mask.to(device), labels=input_ids.to(device),\n is_paired=torch.tensor(True).to(device), only_alignment=True)\n score_p = score_pos[0].squeeze()\n score_p = score_p[1].detach().item()\n score_pos_dict = {'text': input_ids, 'score': score_p, 'label': True}\n query_dict_scores.append(score_pos_dict)\n query_scores.append(score_p)\n query_labels.append(True)\n for n in range(len(embeds_n)):\n score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(\n device), attention_mask=att_mask_n[n].to(device), labels=\n input_ids.to(device), is_paired=torch.tensor(False).to(\n device), only_alignment=True)\n score_n = score_neg[0].squeeze()\n score_n = score_n[1].detach().item()\n score_neg_dict = {'text': input_ids, 'score': score_n, 'label':\n False}\n query_dict_scores.append(score_neg_dict)\n query_scores.append(score_n)\n query_labels.append(False)\n S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key\n =lambda x: x[0], reverse=True)]\n return S\n\n def img2text_scores(self, input_ids_p, embeds_p, att_mask_p,\n input_ids_n, embeds_n, att_mask_n):\n \"\"\"\n INPUTS:\n input_ids_p : [1, 448]\n embeds_p: [1, 512, 768]\n att_mask_p: [1, 448]\n input_ids_n: list with 100 of [1, 448]\n embeds_n: list with 100 of [1, 512, 768]\n att_mask_n: list with 100 of [1, 448]\n \"\"\"\n query_dict_scores = []\n query_scores = []\n query_labels = []\n score_pos = self.get_scores_and_metrics(embeds=embeds_p.to(device),\n attention_mask=att_mask_p.to(device), labels=input_ids_p.to(\n device), is_paired=torch.tensor(True).to(device),\n only_alignment=True)\n score_p = score_pos[0].squeeze()\n score_p = score_p[1].detach().item()\n score_pos_dict = {'text': input_ids_p, 'score': score_p, 'label': True}\n query_dict_scores.append(score_pos_dict)\n query_scores.append(score_p)\n query_labels.append(True)\n for n in range(len(embeds_n)):\n score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(\n device), attention_mask=att_mask_n[n].to(device), labels=\n input_ids_n[n].to(device), is_paired=torch.tensor(False).to\n (device), only_alignment=True)\n score_n = score_neg[0].squeeze()\n score_n = score_n[1].detach().item()\n score_neg_dict = {'text': input_ids_n[n], 'score': score_n,\n 'label': False}\n query_dict_scores.append(score_neg_dict)\n query_scores.append(score_n)\n query_labels.append(False)\n S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key\n =lambda x: x[0], reverse=True)]\n return S\n\n def rank_at_K(self, dict_scores, img2text=True):\n logs = ''\n if img2text:\n l1 = '------ Image 2 Text ------\\n'\n logs += l1\n print(l1)\n else:\n l2 = '------ Text 2 Image ------\\n'\n print(l2)\n Ks = [1, 5, 10]\n for K in Ks:\n found = 0\n for key, val in dict_scores.items():\n tmp_range = K if K < len(val) else len(val)\n for i in range(tmp_range):\n score, label = val[i]\n if label:\n found += 1\n break\n l3 = '------ Rank @ {} = {} ------\\n'.format(K, found / len(\n dict_scores.keys()))\n logs += l3\n print(l3)\n return logs\n\n def get_scores_and_metrics(self, embeds, attention_mask, labels=None,\n is_paired=None, only_alignment=False):\n batch_size = embeds.shape[0]\n seq_length = embeds.shape[1]\n hidden_dim = embeds.shape[2]\n embeds = embeds.to(device)\n attention_mask = attention_mask.to(device)\n outputs = self.bert(inputs_embeds=embeds, attention_mask=\n attention_mask, return_dict=True)\n sequence_output = outputs.last_hidden_state\n pooler_output = outputs.pooler_output\n text_output = sequence_output[:, :labels.shape[1], :]\n image_output = sequence_output[:, labels.shape[1]:, :]\n prediction_scores, alignment_scores = self.cls(text_output,\n pooler_output)\n if only_alignment:\n return alignment_scores, is_paired\n text_evaluator = {'text_pred_logits': prediction_scores,\n 'text_labels': labels}\n alignment_evaluator = {'alignment_logits': alignment_scores,\n 'alignment_labels': is_paired}\n text_acc, alig_acc = self.accuracy_scores(text_evaluator,\n alignment_evaluator)\n return text_acc, alig_acc\n\n def accuracy_scores(self, text_evaluator, alignment_evaluator):\n \"\"\"\n Text evaluator: dictionary with preds and labels (aligned)\n Image evaluator: dictionary with image output and image patches (aligned)\n \"\"\"\n text_pred_logits = text_evaluator['text_pred_logits']\n text_labels = text_evaluator['text_labels']\n text_preds_logits = text_pred_logits.detach().cpu().numpy()\n text_labels = text_labels.cpu().numpy().flatten()\n text_preds = np.argmax(text_preds_logits, axis=2).flatten()\n alig_pred_logits = alignment_evaluator['alignment_logits']\n alig_labels = alignment_evaluator['alignment_labels']\n alig_pred_logits = alig_pred_logits.detach().cpu().numpy()\n alig_labels = np.asarray([alig_labels])\n alig_preds = np.argmax(alig_pred_logits, axis=1).flatten()\n text_acc = accuracy_score(text_labels, text_preds)\n alig_acc = accuracy_score(alig_labels, alig_preds)\n return text_acc, alig_acc\n\n\ndef image2text(patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches):\n \"\"\"\n image2text retrieval:\n Query = Image\n Paired with: 1 positive text, 100 negative texts\n \"\"\"\n im_seq_len = patches.shape[1]\n bs = input_ids.shape[0]\n len_neg_inputs = neg_input_ids.shape[1]\n embeds = construct_bert_input(patches, input_ids, evaluator, device=\n device, random_patches=random_patches)\n attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -\n input_ids.shape[1]), value=1)\n all_embeds_neg = []\n all_att_mask = []\n all_neg_inputs = []\n for j in range(len_neg_inputs):\n neg_input_id_sample = neg_input_ids[:, j, :]\n neg_attention_mask_sample = neg_attention_mask[:, j, :]\n embeds_neg = construct_bert_input(patches, neg_input_id_sample,\n evaluator, device=device, random_patches=random_patches)\n attention_mask_neg = F.pad(neg_attention_mask_sample, (0, \n embeds_neg.shape[1] - neg_input_id_sample.shape[1]), value=1)\n all_embeds_neg.append(embeds_neg)\n all_att_mask.append(attention_mask_neg)\n all_neg_inputs.append(neg_input_id_sample.detach())\n all_scores_query = evaluator.img2text_scores(input_ids_p=input_ids,\n embeds_p=embeds, att_mask_p=attention_mask_mm, input_ids_n=\n all_neg_inputs, embeds_n=all_embeds_neg, att_mask_n=all_att_mask)\n txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,\n attention_mask_mm, labels=input_ids, is_paired=is_paired,\n only_alignment=False)\n return all_scores_query, txt_acc, alig_acc\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass FashionbertEvaluator(transformers.BertPreTrainedModel):\n\n def __init__(self, config):\n super().__init__(config)\n self.bert = BertModel(config)\n self.im_to_embedding = torch.nn.Linear(2048, 768)\n self.im_to_embedding_norm = torch.nn.LayerNorm(config.hidden_size,\n eps=config.layer_norm_eps)\n self.cls = BertPreTrainingHeads(config)\n self.init_weights()\n\n def text2img_scores(self, input_ids, embeds, att_mask, embeds_n, att_mask_n\n ):\n \"\"\"\n INPUTS:\n input_ids [1, 448]\n embeds: [1, 512, 768]\n att_mask: [1, 448]\n embeds_n: list with 100 of [1, 512, 768]\n att_mask_n: list with 100 of [1, 448]\n \"\"\"\n query_dict_scores = []\n query_scores = []\n query_labels = []\n score_pos = self.get_scores_and_metrics(embeds=embeds.to(device),\n attention_mask=att_mask.to(device), labels=input_ids.to(device),\n is_paired=torch.tensor(True).to(device), only_alignment=True)\n score_p = score_pos[0].squeeze()\n score_p = score_p[1].detach().item()\n score_pos_dict = {'text': input_ids, 'score': score_p, 'label': True}\n query_dict_scores.append(score_pos_dict)\n query_scores.append(score_p)\n query_labels.append(True)\n for n in range(len(embeds_n)):\n score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(\n device), attention_mask=att_mask_n[n].to(device), labels=\n input_ids.to(device), is_paired=torch.tensor(False).to(\n device), only_alignment=True)\n score_n = score_neg[0].squeeze()\n score_n = score_n[1].detach().item()\n score_neg_dict = {'text': input_ids, 'score': score_n, 'label':\n False}\n query_dict_scores.append(score_neg_dict)\n query_scores.append(score_n)\n query_labels.append(False)\n S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key\n =lambda x: x[0], reverse=True)]\n return S\n\n def img2text_scores(self, input_ids_p, embeds_p, att_mask_p,\n input_ids_n, embeds_n, att_mask_n):\n \"\"\"\n INPUTS:\n input_ids_p : [1, 448]\n embeds_p: [1, 512, 768]\n att_mask_p: [1, 448]\n input_ids_n: list with 100 of [1, 448]\n embeds_n: list with 100 of [1, 512, 768]\n att_mask_n: list with 100 of [1, 448]\n \"\"\"\n query_dict_scores = []\n query_scores = []\n query_labels = []\n score_pos = self.get_scores_and_metrics(embeds=embeds_p.to(device),\n attention_mask=att_mask_p.to(device), labels=input_ids_p.to(\n device), is_paired=torch.tensor(True).to(device),\n only_alignment=True)\n score_p = score_pos[0].squeeze()\n score_p = score_p[1].detach().item()\n score_pos_dict = {'text': input_ids_p, 'score': score_p, 'label': True}\n query_dict_scores.append(score_pos_dict)\n query_scores.append(score_p)\n query_labels.append(True)\n for n in range(len(embeds_n)):\n score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(\n device), attention_mask=att_mask_n[n].to(device), labels=\n input_ids_n[n].to(device), is_paired=torch.tensor(False).to\n (device), only_alignment=True)\n score_n = score_neg[0].squeeze()\n score_n = score_n[1].detach().item()\n score_neg_dict = {'text': input_ids_n[n], 'score': score_n,\n 'label': False}\n query_dict_scores.append(score_neg_dict)\n query_scores.append(score_n)\n query_labels.append(False)\n S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key\n =lambda x: x[0], reverse=True)]\n return S\n\n def rank_at_K(self, dict_scores, img2text=True):\n logs = ''\n if img2text:\n l1 = '------ Image 2 Text ------\\n'\n logs += l1\n print(l1)\n else:\n l2 = '------ Text 2 Image ------\\n'\n print(l2)\n Ks = [1, 5, 10]\n for K in Ks:\n found = 0\n for key, val in dict_scores.items():\n tmp_range = K if K < len(val) else len(val)\n for i in range(tmp_range):\n score, label = val[i]\n if label:\n found += 1\n break\n l3 = '------ Rank @ {} = {} ------\\n'.format(K, found / len(\n dict_scores.keys()))\n logs += l3\n print(l3)\n return logs\n\n def get_scores_and_metrics(self, embeds, attention_mask, labels=None,\n is_paired=None, only_alignment=False):\n batch_size = embeds.shape[0]\n seq_length = embeds.shape[1]\n hidden_dim = embeds.shape[2]\n embeds = embeds.to(device)\n attention_mask = attention_mask.to(device)\n outputs = self.bert(inputs_embeds=embeds, attention_mask=\n attention_mask, return_dict=True)\n sequence_output = outputs.last_hidden_state\n pooler_output = outputs.pooler_output\n text_output = sequence_output[:, :labels.shape[1], :]\n image_output = sequence_output[:, labels.shape[1]:, :]\n prediction_scores, alignment_scores = self.cls(text_output,\n pooler_output)\n if only_alignment:\n return alignment_scores, is_paired\n text_evaluator = {'text_pred_logits': prediction_scores,\n 'text_labels': labels}\n alignment_evaluator = {'alignment_logits': alignment_scores,\n 'alignment_labels': is_paired}\n text_acc, alig_acc = self.accuracy_scores(text_evaluator,\n alignment_evaluator)\n return text_acc, alig_acc\n\n def accuracy_scores(self, text_evaluator, alignment_evaluator):\n \"\"\"\n Text evaluator: dictionary with preds and labels (aligned)\n Image evaluator: dictionary with image output and image patches (aligned)\n \"\"\"\n text_pred_logits = text_evaluator['text_pred_logits']\n text_labels = text_evaluator['text_labels']\n text_preds_logits = text_pred_logits.detach().cpu().numpy()\n text_labels = text_labels.cpu().numpy().flatten()\n text_preds = np.argmax(text_preds_logits, axis=2).flatten()\n alig_pred_logits = alignment_evaluator['alignment_logits']\n alig_labels = alignment_evaluator['alignment_labels']\n alig_pred_logits = alig_pred_logits.detach().cpu().numpy()\n alig_labels = np.asarray([alig_labels])\n alig_preds = np.argmax(alig_pred_logits, axis=1).flatten()\n text_acc = accuracy_score(text_labels, text_preds)\n alig_acc = accuracy_score(alig_labels, alig_preds)\n return text_acc, alig_acc\n\n\ndef image2text(patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches):\n \"\"\"\n image2text retrieval:\n Query = Image\n Paired with: 1 positive text, 100 negative texts\n \"\"\"\n im_seq_len = patches.shape[1]\n bs = input_ids.shape[0]\n len_neg_inputs = neg_input_ids.shape[1]\n embeds = construct_bert_input(patches, input_ids, evaluator, device=\n device, random_patches=random_patches)\n attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -\n input_ids.shape[1]), value=1)\n all_embeds_neg = []\n all_att_mask = []\n all_neg_inputs = []\n for j in range(len_neg_inputs):\n neg_input_id_sample = neg_input_ids[:, j, :]\n neg_attention_mask_sample = neg_attention_mask[:, j, :]\n embeds_neg = construct_bert_input(patches, neg_input_id_sample,\n evaluator, device=device, random_patches=random_patches)\n attention_mask_neg = F.pad(neg_attention_mask_sample, (0, \n embeds_neg.shape[1] - neg_input_id_sample.shape[1]), value=1)\n all_embeds_neg.append(embeds_neg)\n all_att_mask.append(attention_mask_neg)\n all_neg_inputs.append(neg_input_id_sample.detach())\n all_scores_query = evaluator.img2text_scores(input_ids_p=input_ids,\n embeds_p=embeds, att_mask_p=attention_mask_mm, input_ids_n=\n all_neg_inputs, embeds_n=all_embeds_neg, att_mask_n=all_att_mask)\n txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,\n attention_mask_mm, labels=input_ids, is_paired=is_paired,\n only_alignment=False)\n return all_scores_query, txt_acc, alig_acc\n\n\ndef text2image(patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches):\n \"\"\"\n text2image retrieval:\n Query = Text\n Paired with: 1 positive image, 100 negative images\n \"\"\"\n im_seq_len = patches.shape[1]\n bs = input_ids.shape[0]\n len_neg_inputs = neg_input_ids.shape[1]\n embeds = construct_bert_input(patches, input_ids, evaluator, device=\n device, random_patches=random_patches)\n attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -\n input_ids.shape[1]), value=1)\n all_embeds_neg = []\n all_att_mask = []\n for p in range(len_neg_inputs):\n neg_patches_sample = neg_patches[:, p, :, :]\n embeds_neg = construct_bert_input(neg_patches_sample, input_ids,\n evaluator, device=device, random_patches=random_patches)\n attention_mask_neg = F.pad(attention_mask, (0, embeds_neg.shape[1] -\n input_ids.shape[1]), value=1)\n all_embeds_neg.append(embeds_neg)\n all_att_mask.append(attention_mask_neg)\n all_scores_query = evaluator.text2img_scores(input_ids=input_ids,\n embeds=embeds, att_mask=attention_mask_mm, embeds_n=all_embeds_neg,\n att_mask_n=all_att_mask)\n txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,\n attention_mask_mm, labels=input_ids, is_paired=is_paired,\n only_alignment=False)\n return all_scores_query, txt_acc, alig_acc\n\n\ndef test(dataset, device, save_file_name, pretrained_model=None,\n random_patches=False):\n torch.cuda.empty_cache()\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle\n =False)\n if pretrained_model != None:\n evaluator = FashionbertEvaluator.from_pretrained(pretrained_model,\n return_dict=True)\n else:\n evaluator = FashionbertEvaluator.from_pretrained('bert-base-uncased',\n return_dict=True)\n evaluator.to(device)\n evaluator.eval()\n query_dict_im2txt = {}\n query_dict_txt2im = {}\n running_acc_alignment_im2txt = 0.0\n running_acc_pred_im2txt = 0.0\n running_acc_alignment_txt2im = 0.0\n running_acc_pred_txt2im = 0.0\n with torch.no_grad():\n for i, (patches, neg_patches, input_ids, attention_mask,\n neg_input_ids, neg_attention_mask, img_name) in enumerate(tqdm(\n dataloader)):\n is_paired = 1.0\n im2txt_query_scores, im2txt_pred_acc, im2txt_alig_acc = image2text(\n patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches)\n running_acc_pred_im2txt += im2txt_pred_acc\n running_acc_alignment_im2txt += im2txt_alig_acc\n query_dict_im2txt[img_name[0]] = im2txt_query_scores\n txt2im_query_scores, txt2im_pred_acc, txt2im_alig_acc = text2image(\n patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches)\n running_acc_pred_txt2im += txt2im_pred_acc\n running_acc_alignment_txt2im += txt2im_alig_acc\n query_dict_txt2im[img_name[0]] = txt2im_query_scores\n im2txt_test_set_accuracy_pred = running_acc_pred_im2txt / len(dataloader)\n im2txt_test_set_accuracy_alig = running_acc_alignment_im2txt / len(\n dataloader)\n txt2im_test_set_accuracy_pred = running_acc_pred_txt2im / len(dataloader)\n txt2im_test_set_accuracy_alig = running_acc_alignment_txt2im / len(\n dataloader)\n print()\n results = ''\n log1 = '---- IMAGE 2 TEXT EVALUATIONS ---------------------\\n'\n log2 = evaluator.rank_at_K(query_dict_im2txt, True)\n log3 = '---- Accuracy in token predictions: {} -----\\n'.format(\n im2txt_test_set_accuracy_pred)\n log4 = '---- Accuracy in text-image alignment: {} -----\\n'.format(\n im2txt_test_set_accuracy_alig)\n print(log1)\n print(log2)\n print(log3)\n print(log4)\n print()\n log5 = '---- TEXT 2 IMAGE EVALUATIONS ---------------------\\n'\n log6 = evaluator.rank_at_K(query_dict_txt2im, False)\n log7 = '---- Accuracy in token predictions: {} -----\\n'.format(\n txt2im_test_set_accuracy_pred)\n log8 = '---- Accuracy in text-image alignment: {} -----\\n'.format(\n txt2im_test_set_accuracy_alig)\n print(log5)\n print(log6)\n print(log7)\n print(log8)\n results += log1\n results += log2\n results += log3\n results += log4\n results += log5\n results += log6\n results += log7\n results += log8\n save_json(save_file_name, results)\n\n\n<mask token>\n",
"step-3": "<mask token>\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n\nclass FashionbertEvaluator(transformers.BertPreTrainedModel):\n\n def __init__(self, config):\n super().__init__(config)\n self.bert = BertModel(config)\n self.im_to_embedding = torch.nn.Linear(2048, 768)\n self.im_to_embedding_norm = torch.nn.LayerNorm(config.hidden_size,\n eps=config.layer_norm_eps)\n self.cls = BertPreTrainingHeads(config)\n self.init_weights()\n\n def text2img_scores(self, input_ids, embeds, att_mask, embeds_n, att_mask_n\n ):\n \"\"\"\n INPUTS:\n input_ids [1, 448]\n embeds: [1, 512, 768]\n att_mask: [1, 448]\n embeds_n: list with 100 of [1, 512, 768]\n att_mask_n: list with 100 of [1, 448]\n \"\"\"\n query_dict_scores = []\n query_scores = []\n query_labels = []\n score_pos = self.get_scores_and_metrics(embeds=embeds.to(device),\n attention_mask=att_mask.to(device), labels=input_ids.to(device),\n is_paired=torch.tensor(True).to(device), only_alignment=True)\n score_p = score_pos[0].squeeze()\n score_p = score_p[1].detach().item()\n score_pos_dict = {'text': input_ids, 'score': score_p, 'label': True}\n query_dict_scores.append(score_pos_dict)\n query_scores.append(score_p)\n query_labels.append(True)\n for n in range(len(embeds_n)):\n score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(\n device), attention_mask=att_mask_n[n].to(device), labels=\n input_ids.to(device), is_paired=torch.tensor(False).to(\n device), only_alignment=True)\n score_n = score_neg[0].squeeze()\n score_n = score_n[1].detach().item()\n score_neg_dict = {'text': input_ids, 'score': score_n, 'label':\n False}\n query_dict_scores.append(score_neg_dict)\n query_scores.append(score_n)\n query_labels.append(False)\n S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key\n =lambda x: x[0], reverse=True)]\n return S\n\n def img2text_scores(self, input_ids_p, embeds_p, att_mask_p,\n input_ids_n, embeds_n, att_mask_n):\n \"\"\"\n INPUTS:\n input_ids_p : [1, 448]\n embeds_p: [1, 512, 768]\n att_mask_p: [1, 448]\n input_ids_n: list with 100 of [1, 448]\n embeds_n: list with 100 of [1, 512, 768]\n att_mask_n: list with 100 of [1, 448]\n \"\"\"\n query_dict_scores = []\n query_scores = []\n query_labels = []\n score_pos = self.get_scores_and_metrics(embeds=embeds_p.to(device),\n attention_mask=att_mask_p.to(device), labels=input_ids_p.to(\n device), is_paired=torch.tensor(True).to(device),\n only_alignment=True)\n score_p = score_pos[0].squeeze()\n score_p = score_p[1].detach().item()\n score_pos_dict = {'text': input_ids_p, 'score': score_p, 'label': True}\n query_dict_scores.append(score_pos_dict)\n query_scores.append(score_p)\n query_labels.append(True)\n for n in range(len(embeds_n)):\n score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(\n device), attention_mask=att_mask_n[n].to(device), labels=\n input_ids_n[n].to(device), is_paired=torch.tensor(False).to\n (device), only_alignment=True)\n score_n = score_neg[0].squeeze()\n score_n = score_n[1].detach().item()\n score_neg_dict = {'text': input_ids_n[n], 'score': score_n,\n 'label': False}\n query_dict_scores.append(score_neg_dict)\n query_scores.append(score_n)\n query_labels.append(False)\n S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key\n =lambda x: x[0], reverse=True)]\n return S\n\n def rank_at_K(self, dict_scores, img2text=True):\n logs = ''\n if img2text:\n l1 = '------ Image 2 Text ------\\n'\n logs += l1\n print(l1)\n else:\n l2 = '------ Text 2 Image ------\\n'\n print(l2)\n Ks = [1, 5, 10]\n for K in Ks:\n found = 0\n for key, val in dict_scores.items():\n tmp_range = K if K < len(val) else len(val)\n for i in range(tmp_range):\n score, label = val[i]\n if label:\n found += 1\n break\n l3 = '------ Rank @ {} = {} ------\\n'.format(K, found / len(\n dict_scores.keys()))\n logs += l3\n print(l3)\n return logs\n\n def get_scores_and_metrics(self, embeds, attention_mask, labels=None,\n is_paired=None, only_alignment=False):\n batch_size = embeds.shape[0]\n seq_length = embeds.shape[1]\n hidden_dim = embeds.shape[2]\n embeds = embeds.to(device)\n attention_mask = attention_mask.to(device)\n outputs = self.bert(inputs_embeds=embeds, attention_mask=\n attention_mask, return_dict=True)\n sequence_output = outputs.last_hidden_state\n pooler_output = outputs.pooler_output\n text_output = sequence_output[:, :labels.shape[1], :]\n image_output = sequence_output[:, labels.shape[1]:, :]\n prediction_scores, alignment_scores = self.cls(text_output,\n pooler_output)\n if only_alignment:\n return alignment_scores, is_paired\n text_evaluator = {'text_pred_logits': prediction_scores,\n 'text_labels': labels}\n alignment_evaluator = {'alignment_logits': alignment_scores,\n 'alignment_labels': is_paired}\n text_acc, alig_acc = self.accuracy_scores(text_evaluator,\n alignment_evaluator)\n return text_acc, alig_acc\n\n def accuracy_scores(self, text_evaluator, alignment_evaluator):\n \"\"\"\n Text evaluator: dictionary with preds and labels (aligned)\n Image evaluator: dictionary with image output and image patches (aligned)\n \"\"\"\n text_pred_logits = text_evaluator['text_pred_logits']\n text_labels = text_evaluator['text_labels']\n text_preds_logits = text_pred_logits.detach().cpu().numpy()\n text_labels = text_labels.cpu().numpy().flatten()\n text_preds = np.argmax(text_preds_logits, axis=2).flatten()\n alig_pred_logits = alignment_evaluator['alignment_logits']\n alig_labels = alignment_evaluator['alignment_labels']\n alig_pred_logits = alig_pred_logits.detach().cpu().numpy()\n alig_labels = np.asarray([alig_labels])\n alig_preds = np.argmax(alig_pred_logits, axis=1).flatten()\n text_acc = accuracy_score(text_labels, text_preds)\n alig_acc = accuracy_score(alig_labels, alig_preds)\n return text_acc, alig_acc\n\n\ndef image2text(patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches):\n \"\"\"\n image2text retrieval:\n Query = Image\n Paired with: 1 positive text, 100 negative texts\n \"\"\"\n im_seq_len = patches.shape[1]\n bs = input_ids.shape[0]\n len_neg_inputs = neg_input_ids.shape[1]\n embeds = construct_bert_input(patches, input_ids, evaluator, device=\n device, random_patches=random_patches)\n attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -\n input_ids.shape[1]), value=1)\n all_embeds_neg = []\n all_att_mask = []\n all_neg_inputs = []\n for j in range(len_neg_inputs):\n neg_input_id_sample = neg_input_ids[:, j, :]\n neg_attention_mask_sample = neg_attention_mask[:, j, :]\n embeds_neg = construct_bert_input(patches, neg_input_id_sample,\n evaluator, device=device, random_patches=random_patches)\n attention_mask_neg = F.pad(neg_attention_mask_sample, (0, \n embeds_neg.shape[1] - neg_input_id_sample.shape[1]), value=1)\n all_embeds_neg.append(embeds_neg)\n all_att_mask.append(attention_mask_neg)\n all_neg_inputs.append(neg_input_id_sample.detach())\n all_scores_query = evaluator.img2text_scores(input_ids_p=input_ids,\n embeds_p=embeds, att_mask_p=attention_mask_mm, input_ids_n=\n all_neg_inputs, embeds_n=all_embeds_neg, att_mask_n=all_att_mask)\n txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,\n attention_mask_mm, labels=input_ids, is_paired=is_paired,\n only_alignment=False)\n return all_scores_query, txt_acc, alig_acc\n\n\ndef text2image(patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches):\n \"\"\"\n text2image retrieval:\n Query = Text\n Paired with: 1 positive image, 100 negative images\n \"\"\"\n im_seq_len = patches.shape[1]\n bs = input_ids.shape[0]\n len_neg_inputs = neg_input_ids.shape[1]\n embeds = construct_bert_input(patches, input_ids, evaluator, device=\n device, random_patches=random_patches)\n attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -\n input_ids.shape[1]), value=1)\n all_embeds_neg = []\n all_att_mask = []\n for p in range(len_neg_inputs):\n neg_patches_sample = neg_patches[:, p, :, :]\n embeds_neg = construct_bert_input(neg_patches_sample, input_ids,\n evaluator, device=device, random_patches=random_patches)\n attention_mask_neg = F.pad(attention_mask, (0, embeds_neg.shape[1] -\n input_ids.shape[1]), value=1)\n all_embeds_neg.append(embeds_neg)\n all_att_mask.append(attention_mask_neg)\n all_scores_query = evaluator.text2img_scores(input_ids=input_ids,\n embeds=embeds, att_mask=attention_mask_mm, embeds_n=all_embeds_neg,\n att_mask_n=all_att_mask)\n txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,\n attention_mask_mm, labels=input_ids, is_paired=is_paired,\n only_alignment=False)\n return all_scores_query, txt_acc, alig_acc\n\n\ndef test(dataset, device, save_file_name, pretrained_model=None,\n random_patches=False):\n torch.cuda.empty_cache()\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle\n =False)\n if pretrained_model != None:\n evaluator = FashionbertEvaluator.from_pretrained(pretrained_model,\n return_dict=True)\n else:\n evaluator = FashionbertEvaluator.from_pretrained('bert-base-uncased',\n return_dict=True)\n evaluator.to(device)\n evaluator.eval()\n query_dict_im2txt = {}\n query_dict_txt2im = {}\n running_acc_alignment_im2txt = 0.0\n running_acc_pred_im2txt = 0.0\n running_acc_alignment_txt2im = 0.0\n running_acc_pred_txt2im = 0.0\n with torch.no_grad():\n for i, (patches, neg_patches, input_ids, attention_mask,\n neg_input_ids, neg_attention_mask, img_name) in enumerate(tqdm(\n dataloader)):\n is_paired = 1.0\n im2txt_query_scores, im2txt_pred_acc, im2txt_alig_acc = image2text(\n patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches)\n running_acc_pred_im2txt += im2txt_pred_acc\n running_acc_alignment_im2txt += im2txt_alig_acc\n query_dict_im2txt[img_name[0]] = im2txt_query_scores\n txt2im_query_scores, txt2im_pred_acc, txt2im_alig_acc = text2image(\n patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches)\n running_acc_pred_txt2im += txt2im_pred_acc\n running_acc_alignment_txt2im += txt2im_alig_acc\n query_dict_txt2im[img_name[0]] = txt2im_query_scores\n im2txt_test_set_accuracy_pred = running_acc_pred_im2txt / len(dataloader)\n im2txt_test_set_accuracy_alig = running_acc_alignment_im2txt / len(\n dataloader)\n txt2im_test_set_accuracy_pred = running_acc_pred_txt2im / len(dataloader)\n txt2im_test_set_accuracy_alig = running_acc_alignment_txt2im / len(\n dataloader)\n print()\n results = ''\n log1 = '---- IMAGE 2 TEXT EVALUATIONS ---------------------\\n'\n log2 = evaluator.rank_at_K(query_dict_im2txt, True)\n log3 = '---- Accuracy in token predictions: {} -----\\n'.format(\n im2txt_test_set_accuracy_pred)\n log4 = '---- Accuracy in text-image alignment: {} -----\\n'.format(\n im2txt_test_set_accuracy_alig)\n print(log1)\n print(log2)\n print(log3)\n print(log4)\n print()\n log5 = '---- TEXT 2 IMAGE EVALUATIONS ---------------------\\n'\n log6 = evaluator.rank_at_K(query_dict_txt2im, False)\n log7 = '---- Accuracy in token predictions: {} -----\\n'.format(\n txt2im_test_set_accuracy_pred)\n log8 = '---- Accuracy in text-image alignment: {} -----\\n'.format(\n txt2im_test_set_accuracy_alig)\n print(log5)\n print(log6)\n print(log7)\n print(log8)\n results += log1\n results += log2\n results += log3\n results += log4\n results += log5\n results += log6\n results += log7\n results += log8\n save_json(save_file_name, results)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Evaluate FashionBert')\n parser.add_argument('--path_to_train_dataset', help=\n 'Absolute path to .pkl file used for training')\n parser.add_argument('--path_to_pretrained_model', help=\n 'Path to pretrained model', default=None)\n parser.add_argument('--save_test_set', help=\n 'Name to save test set .pkl', default='test_set.pkl')\n parser.add_argument('--save_results_name', help=\n 'Name to save file with results', default='results.json')\n parser.add_argument('--random_patches', help=\n 'using random_patches True or False', default=False)\n args = parser.parse_args()\n print('Processing the dataset...')\n dataset = EvaluationDataset(args.path_to_train_dataset)\n print('Done!')\n print('\\nGetting aligned pairs...')\n get_all_paired_test_set(dataset, args.save_test_set, num_samples=1000)\n print('Loading dataset...')\n dataset = Evaluation_negpairs(args.save_test_set)\n print('Starting evaluation...')\n test(dataset, device, args.save_results_name, pretrained_model=args.\n path_to_pretrained_model, random_patches=args.random_patches)\n print('Done!!!')\n",
"step-4": "import torch, torchvision\nimport torch.nn.functional as F\nimport transformers\nfrom transformers import BertTokenizer, BertModel\nfrom transformers.models.bert.modeling_bert import BertPreTrainingHeads\nfrom utils import construct_bert_input, EvaluationDataset, save_json\nfrom fashionbert_evaluator_parser import Evaluation_negpairs, get_all_paired_test_set\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.metrics import accuracy_score, precision_recall_fscore_support\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n\nclass FashionbertEvaluator(transformers.BertPreTrainedModel):\n\n def __init__(self, config):\n super().__init__(config)\n self.bert = BertModel(config)\n self.im_to_embedding = torch.nn.Linear(2048, 768)\n self.im_to_embedding_norm = torch.nn.LayerNorm(config.hidden_size,\n eps=config.layer_norm_eps)\n self.cls = BertPreTrainingHeads(config)\n self.init_weights()\n\n def text2img_scores(self, input_ids, embeds, att_mask, embeds_n, att_mask_n\n ):\n \"\"\"\n INPUTS:\n input_ids [1, 448]\n embeds: [1, 512, 768]\n att_mask: [1, 448]\n embeds_n: list with 100 of [1, 512, 768]\n att_mask_n: list with 100 of [1, 448]\n \"\"\"\n query_dict_scores = []\n query_scores = []\n query_labels = []\n score_pos = self.get_scores_and_metrics(embeds=embeds.to(device),\n attention_mask=att_mask.to(device), labels=input_ids.to(device),\n is_paired=torch.tensor(True).to(device), only_alignment=True)\n score_p = score_pos[0].squeeze()\n score_p = score_p[1].detach().item()\n score_pos_dict = {'text': input_ids, 'score': score_p, 'label': True}\n query_dict_scores.append(score_pos_dict)\n query_scores.append(score_p)\n query_labels.append(True)\n for n in range(len(embeds_n)):\n score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(\n device), attention_mask=att_mask_n[n].to(device), labels=\n input_ids.to(device), is_paired=torch.tensor(False).to(\n device), only_alignment=True)\n score_n = score_neg[0].squeeze()\n score_n = score_n[1].detach().item()\n score_neg_dict = {'text': input_ids, 'score': score_n, 'label':\n False}\n query_dict_scores.append(score_neg_dict)\n query_scores.append(score_n)\n query_labels.append(False)\n S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key\n =lambda x: x[0], reverse=True)]\n return S\n\n def img2text_scores(self, input_ids_p, embeds_p, att_mask_p,\n input_ids_n, embeds_n, att_mask_n):\n \"\"\"\n INPUTS:\n input_ids_p : [1, 448]\n embeds_p: [1, 512, 768]\n att_mask_p: [1, 448]\n input_ids_n: list with 100 of [1, 448]\n embeds_n: list with 100 of [1, 512, 768]\n att_mask_n: list with 100 of [1, 448]\n \"\"\"\n query_dict_scores = []\n query_scores = []\n query_labels = []\n score_pos = self.get_scores_and_metrics(embeds=embeds_p.to(device),\n attention_mask=att_mask_p.to(device), labels=input_ids_p.to(\n device), is_paired=torch.tensor(True).to(device),\n only_alignment=True)\n score_p = score_pos[0].squeeze()\n score_p = score_p[1].detach().item()\n score_pos_dict = {'text': input_ids_p, 'score': score_p, 'label': True}\n query_dict_scores.append(score_pos_dict)\n query_scores.append(score_p)\n query_labels.append(True)\n for n in range(len(embeds_n)):\n score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(\n device), attention_mask=att_mask_n[n].to(device), labels=\n input_ids_n[n].to(device), is_paired=torch.tensor(False).to\n (device), only_alignment=True)\n score_n = score_neg[0].squeeze()\n score_n = score_n[1].detach().item()\n score_neg_dict = {'text': input_ids_n[n], 'score': score_n,\n 'label': False}\n query_dict_scores.append(score_neg_dict)\n query_scores.append(score_n)\n query_labels.append(False)\n S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key\n =lambda x: x[0], reverse=True)]\n return S\n\n def rank_at_K(self, dict_scores, img2text=True):\n logs = ''\n if img2text:\n l1 = '------ Image 2 Text ------\\n'\n logs += l1\n print(l1)\n else:\n l2 = '------ Text 2 Image ------\\n'\n print(l2)\n Ks = [1, 5, 10]\n for K in Ks:\n found = 0\n for key, val in dict_scores.items():\n tmp_range = K if K < len(val) else len(val)\n for i in range(tmp_range):\n score, label = val[i]\n if label:\n found += 1\n break\n l3 = '------ Rank @ {} = {} ------\\n'.format(K, found / len(\n dict_scores.keys()))\n logs += l3\n print(l3)\n return logs\n\n def get_scores_and_metrics(self, embeds, attention_mask, labels=None,\n is_paired=None, only_alignment=False):\n batch_size = embeds.shape[0]\n seq_length = embeds.shape[1]\n hidden_dim = embeds.shape[2]\n embeds = embeds.to(device)\n attention_mask = attention_mask.to(device)\n outputs = self.bert(inputs_embeds=embeds, attention_mask=\n attention_mask, return_dict=True)\n sequence_output = outputs.last_hidden_state\n pooler_output = outputs.pooler_output\n text_output = sequence_output[:, :labels.shape[1], :]\n image_output = sequence_output[:, labels.shape[1]:, :]\n prediction_scores, alignment_scores = self.cls(text_output,\n pooler_output)\n if only_alignment:\n return alignment_scores, is_paired\n text_evaluator = {'text_pred_logits': prediction_scores,\n 'text_labels': labels}\n alignment_evaluator = {'alignment_logits': alignment_scores,\n 'alignment_labels': is_paired}\n text_acc, alig_acc = self.accuracy_scores(text_evaluator,\n alignment_evaluator)\n return text_acc, alig_acc\n\n def accuracy_scores(self, text_evaluator, alignment_evaluator):\n \"\"\"\n Text evaluator: dictionary with preds and labels (aligned)\n Image evaluator: dictionary with image output and image patches (aligned)\n \"\"\"\n text_pred_logits = text_evaluator['text_pred_logits']\n text_labels = text_evaluator['text_labels']\n text_preds_logits = text_pred_logits.detach().cpu().numpy()\n text_labels = text_labels.cpu().numpy().flatten()\n text_preds = np.argmax(text_preds_logits, axis=2).flatten()\n alig_pred_logits = alignment_evaluator['alignment_logits']\n alig_labels = alignment_evaluator['alignment_labels']\n alig_pred_logits = alig_pred_logits.detach().cpu().numpy()\n alig_labels = np.asarray([alig_labels])\n alig_preds = np.argmax(alig_pred_logits, axis=1).flatten()\n text_acc = accuracy_score(text_labels, text_preds)\n alig_acc = accuracy_score(alig_labels, alig_preds)\n return text_acc, alig_acc\n\n\ndef image2text(patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches):\n \"\"\"\n image2text retrieval:\n Query = Image\n Paired with: 1 positive text, 100 negative texts\n \"\"\"\n im_seq_len = patches.shape[1]\n bs = input_ids.shape[0]\n len_neg_inputs = neg_input_ids.shape[1]\n embeds = construct_bert_input(patches, input_ids, evaluator, device=\n device, random_patches=random_patches)\n attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -\n input_ids.shape[1]), value=1)\n all_embeds_neg = []\n all_att_mask = []\n all_neg_inputs = []\n for j in range(len_neg_inputs):\n neg_input_id_sample = neg_input_ids[:, j, :]\n neg_attention_mask_sample = neg_attention_mask[:, j, :]\n embeds_neg = construct_bert_input(patches, neg_input_id_sample,\n evaluator, device=device, random_patches=random_patches)\n attention_mask_neg = F.pad(neg_attention_mask_sample, (0, \n embeds_neg.shape[1] - neg_input_id_sample.shape[1]), value=1)\n all_embeds_neg.append(embeds_neg)\n all_att_mask.append(attention_mask_neg)\n all_neg_inputs.append(neg_input_id_sample.detach())\n all_scores_query = evaluator.img2text_scores(input_ids_p=input_ids,\n embeds_p=embeds, att_mask_p=attention_mask_mm, input_ids_n=\n all_neg_inputs, embeds_n=all_embeds_neg, att_mask_n=all_att_mask)\n txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,\n attention_mask_mm, labels=input_ids, is_paired=is_paired,\n only_alignment=False)\n return all_scores_query, txt_acc, alig_acc\n\n\ndef text2image(patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches):\n \"\"\"\n text2image retrieval:\n Query = Text\n Paired with: 1 positive image, 100 negative images\n \"\"\"\n im_seq_len = patches.shape[1]\n bs = input_ids.shape[0]\n len_neg_inputs = neg_input_ids.shape[1]\n embeds = construct_bert_input(patches, input_ids, evaluator, device=\n device, random_patches=random_patches)\n attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -\n input_ids.shape[1]), value=1)\n all_embeds_neg = []\n all_att_mask = []\n for p in range(len_neg_inputs):\n neg_patches_sample = neg_patches[:, p, :, :]\n embeds_neg = construct_bert_input(neg_patches_sample, input_ids,\n evaluator, device=device, random_patches=random_patches)\n attention_mask_neg = F.pad(attention_mask, (0, embeds_neg.shape[1] -\n input_ids.shape[1]), value=1)\n all_embeds_neg.append(embeds_neg)\n all_att_mask.append(attention_mask_neg)\n all_scores_query = evaluator.text2img_scores(input_ids=input_ids,\n embeds=embeds, att_mask=attention_mask_mm, embeds_n=all_embeds_neg,\n att_mask_n=all_att_mask)\n txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,\n attention_mask_mm, labels=input_ids, is_paired=is_paired,\n only_alignment=False)\n return all_scores_query, txt_acc, alig_acc\n\n\ndef test(dataset, device, save_file_name, pretrained_model=None,\n random_patches=False):\n torch.cuda.empty_cache()\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle\n =False)\n if pretrained_model != None:\n evaluator = FashionbertEvaluator.from_pretrained(pretrained_model,\n return_dict=True)\n else:\n evaluator = FashionbertEvaluator.from_pretrained('bert-base-uncased',\n return_dict=True)\n evaluator.to(device)\n evaluator.eval()\n query_dict_im2txt = {}\n query_dict_txt2im = {}\n running_acc_alignment_im2txt = 0.0\n running_acc_pred_im2txt = 0.0\n running_acc_alignment_txt2im = 0.0\n running_acc_pred_txt2im = 0.0\n with torch.no_grad():\n for i, (patches, neg_patches, input_ids, attention_mask,\n neg_input_ids, neg_attention_mask, img_name) in enumerate(tqdm(\n dataloader)):\n is_paired = 1.0\n im2txt_query_scores, im2txt_pred_acc, im2txt_alig_acc = image2text(\n patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches)\n running_acc_pred_im2txt += im2txt_pred_acc\n running_acc_alignment_im2txt += im2txt_alig_acc\n query_dict_im2txt[img_name[0]] = im2txt_query_scores\n txt2im_query_scores, txt2im_pred_acc, txt2im_alig_acc = text2image(\n patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches)\n running_acc_pred_txt2im += txt2im_pred_acc\n running_acc_alignment_txt2im += txt2im_alig_acc\n query_dict_txt2im[img_name[0]] = txt2im_query_scores\n im2txt_test_set_accuracy_pred = running_acc_pred_im2txt / len(dataloader)\n im2txt_test_set_accuracy_alig = running_acc_alignment_im2txt / len(\n dataloader)\n txt2im_test_set_accuracy_pred = running_acc_pred_txt2im / len(dataloader)\n txt2im_test_set_accuracy_alig = running_acc_alignment_txt2im / len(\n dataloader)\n print()\n results = ''\n log1 = '---- IMAGE 2 TEXT EVALUATIONS ---------------------\\n'\n log2 = evaluator.rank_at_K(query_dict_im2txt, True)\n log3 = '---- Accuracy in token predictions: {} -----\\n'.format(\n im2txt_test_set_accuracy_pred)\n log4 = '---- Accuracy in text-image alignment: {} -----\\n'.format(\n im2txt_test_set_accuracy_alig)\n print(log1)\n print(log2)\n print(log3)\n print(log4)\n print()\n log5 = '---- TEXT 2 IMAGE EVALUATIONS ---------------------\\n'\n log6 = evaluator.rank_at_K(query_dict_txt2im, False)\n log7 = '---- Accuracy in token predictions: {} -----\\n'.format(\n txt2im_test_set_accuracy_pred)\n log8 = '---- Accuracy in text-image alignment: {} -----\\n'.format(\n txt2im_test_set_accuracy_alig)\n print(log5)\n print(log6)\n print(log7)\n print(log8)\n results += log1\n results += log2\n results += log3\n results += log4\n results += log5\n results += log6\n results += log7\n results += log8\n save_json(save_file_name, results)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Evaluate FashionBert')\n parser.add_argument('--path_to_train_dataset', help=\n 'Absolute path to .pkl file used for training')\n parser.add_argument('--path_to_pretrained_model', help=\n 'Path to pretrained model', default=None)\n parser.add_argument('--save_test_set', help=\n 'Name to save test set .pkl', default='test_set.pkl')\n parser.add_argument('--save_results_name', help=\n 'Name to save file with results', default='results.json')\n parser.add_argument('--random_patches', help=\n 'using random_patches True or False', default=False)\n args = parser.parse_args()\n print('Processing the dataset...')\n dataset = EvaluationDataset(args.path_to_train_dataset)\n print('Done!')\n print('\\nGetting aligned pairs...')\n get_all_paired_test_set(dataset, args.save_test_set, num_samples=1000)\n print('Loading dataset...')\n dataset = Evaluation_negpairs(args.save_test_set)\n print('Starting evaluation...')\n test(dataset, device, args.save_results_name, pretrained_model=args.\n path_to_pretrained_model, random_patches=args.random_patches)\n print('Done!!!')\n",
"step-5": "import torch, torchvision\nimport torch.nn.functional as F\nimport transformers\nfrom transformers import BertTokenizer, BertModel\nfrom transformers.models.bert.modeling_bert import BertPreTrainingHeads\nfrom utils import construct_bert_input, EvaluationDataset, save_json\nfrom fashionbert_evaluator_parser import Evaluation_negpairs, get_all_paired_test_set\n\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom sklearn.metrics import accuracy_score, precision_recall_fscore_support\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass FashionbertEvaluator(transformers.BertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.bert = BertModel(config)\n\n self.im_to_embedding = torch.nn.Linear(2048, 768)\n self.im_to_embedding_norm = torch.nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n self.cls = BertPreTrainingHeads(config)\n\n self.init_weights()\n\n def text2img_scores(self,\n input_ids,\n embeds,\n att_mask,\n embeds_n, # list\n att_mask_n, # list\n ):\n \"\"\"\n INPUTS:\n input_ids [1, 448]\n embeds: [1, 512, 768]\n att_mask: [1, 448]\n embeds_n: list with 100 of [1, 512, 768]\n att_mask_n: list with 100 of [1, 448]\n \"\"\"\n # Score for positive\n query_dict_scores = []\n query_scores = []\n query_labels = []\n\n score_pos = self.get_scores_and_metrics(\n embeds=embeds.to(device),\n attention_mask=att_mask.to(device),\n labels=input_ids.to(device),\n is_paired=torch.tensor(True).to(device),\n only_alignment=True,\n )\n\n # label = score_pos[1]\n score_p = score_pos[0].squeeze()\n score_p = score_p[1].detach().item() # confidence that is actually positive\n score_pos_dict = {'text': input_ids,\n 'score': score_p,\n 'label': True}\n query_dict_scores.append(score_pos_dict)\n query_scores.append(score_p)\n query_labels.append(True)\n\n # Scores for negative\n for n in range(len(embeds_n)):\n score_neg = self.get_scores_and_metrics(\n embeds=embeds_n[n].to(device),\n attention_mask=att_mask_n[n].to(device),\n labels=input_ids.to(device),\n is_paired=torch.tensor(False).to(device),\n only_alignment=True,\n )\n\n score_n = score_neg[0].squeeze()\n score_n = score_n[1].detach().item() # confidence that is actually positive\n score_neg_dict = {'text': input_ids,\n 'score': score_n,\n 'label': False}\n\n query_dict_scores.append(score_neg_dict)\n query_scores.append(score_n)\n query_labels.append(False)\n\n S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key=lambda x: x[0], reverse=True)]\n return S\n\n def img2text_scores(self, input_ids_p, embeds_p, att_mask_p, input_ids_n, embeds_n, att_mask_n):\n \"\"\"\n INPUTS:\n input_ids_p : [1, 448]\n embeds_p: [1, 512, 768]\n att_mask_p: [1, 448]\n input_ids_n: list with 100 of [1, 448]\n embeds_n: list with 100 of [1, 512, 768]\n att_mask_n: list with 100 of [1, 448]\n \"\"\"\n # Score for positive\n query_dict_scores = []\n query_scores = []\n query_labels = []\n\n score_pos = self.get_scores_and_metrics(\n embeds=embeds_p.to(device),\n attention_mask=att_mask_p.to(device),\n labels=input_ids_p.to(device),\n is_paired=torch.tensor(True).to(device),\n only_alignment=True,\n )\n\n # label = score_pos[1]\n score_p = score_pos[0].squeeze()\n score_p = score_p[1].detach().item() # confidence that is actually positive\n score_pos_dict = {'text': input_ids_p,\n 'score': score_p,\n 'label': True}\n query_dict_scores.append(score_pos_dict)\n query_scores.append(score_p)\n query_labels.append(True)\n\n # Scores for negative\n for n in range(len(embeds_n)):\n score_neg = self.get_scores_and_metrics(\n embeds=embeds_n[n].to(device),\n attention_mask=att_mask_n[n].to(device),\n labels=input_ids_n[n].to(device),\n is_paired=torch.tensor(False).to(device),\n only_alignment=True,\n )\n\n score_n = score_neg[0].squeeze()\n score_n = score_n[1].detach().item() # confidence that is actually positive\n score_neg_dict = {'text': input_ids_n[n],\n 'score': score_n,\n 'label': False}\n\n query_dict_scores.append(score_neg_dict)\n query_scores.append(score_n)\n query_labels.append(False)\n\n # print(evaluator.tokenizer.convert_ids_to_tokens(ids))\n S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key=lambda x: x[0], reverse=True)]\n\n return S\n\n def rank_at_K(self, dict_scores, img2text=True):\n logs = ''\n\n if img2text:\n l1 = '------ Image 2 Text ------\\n'\n logs += l1\n print(l1)\n else:\n l2 = '------ Text 2 Image ------\\n'\n print(l2)\n\n Ks = [1, 5, 10]\n for K in Ks:\n found = 0\n for key, val in dict_scores.items():\n tmp_range = K if K < len(val) else len(val)\n for i in range(tmp_range):\n score, label = val[i]\n if label:\n found += 1\n break\n l3 = '------ Rank @ {} = {} ------\\n'.format(K, (found / len(dict_scores.keys())))\n logs += l3\n print(l3)\n\n return logs\n\n def get_scores_and_metrics(\n self,\n embeds, # text + image embedded\n attention_mask, # text + image attention mask\n labels=None, # [batch, 448]\n is_paired=None, # [batch]\n only_alignment=False,\n ):\n\n batch_size = embeds.shape[0]\n seq_length = embeds.shape[1]\n hidden_dim = embeds.shape[2]\n\n embeds = embeds.to(device)\n attention_mask = attention_mask.to(device)\n\n outputs = self.bert(inputs_embeds=embeds,\n attention_mask=attention_mask,\n return_dict=True)\n\n sequence_output = outputs.last_hidden_state # [batch, seq_length, hidden_size]\n pooler_output = outputs.pooler_output # [batch_size, hidden_size] last layer of hidden-state of first token (CLS) + linear layer + tanh\n\n # hidden states corresponding to the text part\n text_output = sequence_output[:, :labels.shape[1], :] # [batch, 448, 768]\n # hidden states corresponding to the image part\n image_output = sequence_output[:, labels.shape[1]:, :] # [batch, 64, 768]\n\n ### FOR TEXT\n # Predict the masked text tokens and alignment scores (whether image and text match)\n prediction_scores, alignment_scores = self.cls(text_output, pooler_output)\n # prediction score is [batch, 448, vocab_size = 30522]\n # aligment score is [batch, 2] 2 with logits corresponding to 1 and 0\n\n if only_alignment:\n return alignment_scores, is_paired\n\n text_evaluator = {'text_pred_logits': prediction_scores,\n 'text_labels': labels}\n\n alignment_evaluator = {'alignment_logits': alignment_scores,\n 'alignment_labels': is_paired}\n\n text_acc, alig_acc = self.accuracy_scores(text_evaluator, alignment_evaluator)\n return text_acc, alig_acc\n\n def accuracy_scores(self, text_evaluator, alignment_evaluator):\n \"\"\"\n Text evaluator: dictionary with preds and labels (aligned)\n Image evaluator: dictionary with image output and image patches (aligned)\n \"\"\"\n # Text\n text_pred_logits = text_evaluator['text_pred_logits'] # [num_aligned, 448, vocab_size]\n text_labels = text_evaluator['text_labels'] # [num_aligned, 448]\n\n text_preds_logits = text_pred_logits.detach().cpu().numpy()\n text_labels = text_labels.cpu().numpy().flatten()\n text_preds = np.argmax(text_preds_logits, axis=2).flatten() # [num_algined, 448]\n\n # Alignment\n alig_pred_logits = alignment_evaluator['alignment_logits'] # [1, 2]\n alig_labels = alignment_evaluator['alignment_labels'] # [2]\n\n alig_pred_logits = alig_pred_logits.detach().cpu().numpy()\n alig_labels = np.asarray([alig_labels])\n # alig_labels = alig_labels.double().cpu().numpy().flatten()\n alig_preds = np.argmax(alig_pred_logits, axis=1).flatten() # [1, 2]\n\n text_acc = accuracy_score(text_labels, text_preds)\n alig_acc = accuracy_score(alig_labels, alig_preds)\n\n return text_acc, alig_acc\n\n\ndef image2text(patches, neg_patches, input_ids, is_paired, attention_mask, neg_input_ids, neg_attention_mask,\n evaluator, random_patches):\n \"\"\"\n image2text retrieval:\n Query = Image\n Paired with: 1 positive text, 100 negative texts\n \"\"\"\n im_seq_len = patches.shape[1]\n bs = input_ids.shape[0]\n len_neg_inputs = neg_input_ids.shape[1]\n\n embeds = construct_bert_input(patches, input_ids, evaluator, device=device, random_patches=random_patches)\n attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] - input_ids.shape[1]), value=1)\n\n # NEGATIVE SAMPLE # [batch, 100, 448]\n all_embeds_neg = []\n all_att_mask = []\n all_neg_inputs = []\n\n for j in range(len_neg_inputs):\n neg_input_id_sample = neg_input_ids[:, j, :] # [1, 448]\n neg_attention_mask_sample = neg_attention_mask[:, j, :]\n\n embeds_neg = construct_bert_input(patches, neg_input_id_sample, evaluator, device=device, random_patches=random_patches)\n attention_mask_neg = F.pad(neg_attention_mask_sample, (0, embeds_neg.shape[1] - neg_input_id_sample.shape[1]),\n value=1)\n\n all_embeds_neg.append(embeds_neg)\n all_att_mask.append(attention_mask_neg)\n all_neg_inputs.append(neg_input_id_sample.detach())\n\n # Now I have all joint embeddings for 1 positive sample and 100 neg samples\n all_scores_query = evaluator.img2text_scores(\n input_ids_p=input_ids,\n embeds_p=embeds,\n att_mask_p=attention_mask_mm,\n input_ids_n=all_neg_inputs,\n embeds_n=all_embeds_neg,\n att_mask_n=all_att_mask)\n\n # Accuracy: only in positive example\n txt_acc, alig_acc = evaluator.get_scores_and_metrics(\n embeds, # text + image embedded\n attention_mask_mm,\n labels=input_ids, # [batch, 448]\n is_paired=is_paired, # [batch]\n only_alignment=False,\n )\n\n return all_scores_query, txt_acc, alig_acc\n\n\ndef text2image(patches, neg_patches, input_ids, is_paired, attention_mask, neg_input_ids, neg_attention_mask,\n evaluator, random_patches):\n \"\"\"\n text2image retrieval:\n Query = Text\n Paired with: 1 positive image, 100 negative images\n \"\"\"\n im_seq_len = patches.shape[1]\n bs = input_ids.shape[0]\n len_neg_inputs = neg_input_ids.shape[1]\n\n # before constructing bert, att mask is 448 long\n # POSITIVE IMAGE\n embeds = construct_bert_input(patches, input_ids, evaluator, device=device, random_patches=random_patches)\n attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] - input_ids.shape[1]), value=1) # [1, 512]\n\n # NEGATIVE SAMPLES\n all_embeds_neg = []\n all_att_mask = []\n\n for p in range(len_neg_inputs):\n neg_patches_sample = neg_patches[:, p, :, :]\n embeds_neg = construct_bert_input(neg_patches_sample, input_ids, evaluator, device=device, random_patches=random_patches)\n attention_mask_neg = F.pad(attention_mask, (0, embeds_neg.shape[1] - input_ids.shape[1]), value=1)\n\n all_embeds_neg.append(embeds_neg)\n all_att_mask.append(attention_mask_neg)\n\n # Now I have all joint embeddings for 1 positive sample and 100 neg samples\n all_scores_query = evaluator.text2img_scores(\n input_ids=input_ids,\n embeds=embeds,\n att_mask=attention_mask_mm,\n embeds_n=all_embeds_neg, # list\n att_mask_n=all_att_mask) # list\n\n # Accuracy: only in positive example\n txt_acc, alig_acc = evaluator.get_scores_and_metrics(\n embeds, # text + image embedded\n attention_mask_mm, # [batch,\n labels=input_ids, # [batch, 448]\n is_paired=is_paired, # [batch]\n only_alignment=False,\n )\n\n return all_scores_query, txt_acc, alig_acc\n\n\ndef test(dataset, device, save_file_name, pretrained_model=None, random_patches=False):\n torch.cuda.empty_cache()\n\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=1,\n shuffle=False,\n )\n\n if pretrained_model != None:\n evaluator = FashionbertEvaluator.from_pretrained(pretrained_model, return_dict=True)\n else:\n evaluator = FashionbertEvaluator.from_pretrained('bert-base-uncased', return_dict=True)\n\n evaluator.to(device)\n evaluator.eval()\n\n query_dict_im2txt = {}\n query_dict_txt2im = {}\n running_acc_alignment_im2txt = 0.0\n running_acc_pred_im2txt = 0.0\n running_acc_alignment_txt2im = 0.0\n running_acc_pred_txt2im = 0.0\n\n with torch.no_grad():\n for i, (\n patches, neg_patches, input_ids, attention_mask, neg_input_ids, neg_attention_mask, img_name) in enumerate(\n tqdm(dataloader)):\n # ****** Shapes ********\n # input_ids shape: [1, 448]\n # neg_input_ids shape: [1, NUM_SAMPLES=100, 448]\n # neg_patches: [1, NUM_SAMPLES=100, 64, 2048]\n\n # IMAGE 2 TEXT\n\n is_paired = 1.\n # print('im2text..')\n im2txt_query_scores, im2txt_pred_acc, im2txt_alig_acc = image2text(patches, neg_patches, input_ids,\n is_paired, attention_mask,\n neg_input_ids, neg_attention_mask,\n evaluator, random_patches)\n\n # print('done')\n\n # Accuracies\n running_acc_pred_im2txt += im2txt_pred_acc\n running_acc_alignment_im2txt += im2txt_alig_acc\n\n # For Rank @ K\n query_dict_im2txt[img_name[0]] = im2txt_query_scores\n\n # TEXT 2 IMAGE\n # print('txt2img..')\n txt2im_query_scores, txt2im_pred_acc, txt2im_alig_acc = text2image(patches, neg_patches, input_ids,\n is_paired, attention_mask,\n neg_input_ids, neg_attention_mask,\n evaluator, random_patches)\n\n # print('done')\n\n # Accuracies\n running_acc_pred_txt2im += txt2im_pred_acc\n running_acc_alignment_txt2im += txt2im_alig_acc\n\n # For Rank @ K\n query_dict_txt2im[img_name[0]] = txt2im_query_scores\n\n im2txt_test_set_accuracy_pred = (running_acc_pred_im2txt / len(dataloader))\n im2txt_test_set_accuracy_alig = (running_acc_alignment_im2txt / len(dataloader))\n txt2im_test_set_accuracy_pred = (running_acc_pred_txt2im / len(dataloader))\n txt2im_test_set_accuracy_alig = (running_acc_alignment_txt2im / len(dataloader))\n\n print()\n results = ''\n log1 = '---- IMAGE 2 TEXT EVALUATIONS ---------------------\\n'\n log2 = evaluator.rank_at_K(query_dict_im2txt, True)\n log3 = '---- Accuracy in token predictions: {} -----\\n'.format(im2txt_test_set_accuracy_pred)\n log4 = '---- Accuracy in text-image alignment: {} -----\\n'.format(im2txt_test_set_accuracy_alig)\n print(log1)\n print(log2)\n print(log3)\n print(log4)\n print()\n log5 = '---- TEXT 2 IMAGE EVALUATIONS ---------------------\\n'\n log6 = evaluator.rank_at_K(query_dict_txt2im, False)\n log7 = '---- Accuracy in token predictions: {} -----\\n'.format(txt2im_test_set_accuracy_pred)\n log8 = '---- Accuracy in text-image alignment: {} -----\\n'.format(txt2im_test_set_accuracy_alig)\n print(log5)\n print(log6)\n print(log7)\n print(log8)\n\n results += log1\n results += log2\n results += log3\n results += log4\n results += log5\n results += log6\n results += log7\n results += log8\n\n save_json(save_file_name, results)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Evaluate FashionBert')\n parser.add_argument('--path_to_train_dataset', help='Absolute path to .pkl file used for training')\n parser.add_argument('--path_to_pretrained_model', help='Path to pretrained model', default=None)\n parser.add_argument('--save_test_set', help='Name to save test set .pkl', default='test_set.pkl')\n parser.add_argument('--save_results_name', help='Name to save file with results', default='results.json')\n parser.add_argument('--random_patches', help='using random_patches True or False', default=False)\n args = parser.parse_args()\n\n # 1) Builds the 1000 sample dataset. This corresponds to the fashionibert_evaluator_parser file\n print('Processing the dataset...')\n dataset = EvaluationDataset(args.path_to_train_dataset)\n # savefile_path = '../../../__fashionbert_trained/fashionbert_vanilla_adaptive/evaluation_set_fashionbert_vanilla.pkl'\n print('Done!')\n print('\\nGetting aligned pairs...')\n get_all_paired_test_set(dataset, args.save_test_set, num_samples=1000)\n # print('Done!')\n\n # 2) Evaluate-\n\n # eval_set_path = '../../../__fashionbert_trained/fashionbert_vanilla_adaptive/evaluation_set_fashionbert_vanilla.pkl'\n # path_to_trained_model = '../../../__fashionbert_trained/fashionbert_vanilla_adaptive/'\n # path_to_save_json = '../../../__fashionbert_trained/fashionbert_vanilla_adaptive/results.json'\n print('Loading dataset...')\n dataset = Evaluation_negpairs(args.save_test_set)\n print('Starting evaluation...')\n # test(dataset, device, args.num_subsamples, args.save_file_name, args.path_to_pretrained_model)\n test(dataset, device, args.save_results_name, pretrained_model=args.path_to_pretrained_model, random_patches=args.random_patches)\n print('Done!!!')\n\n",
"step-ids": [
8,
10,
12,
13,
14
]
}
|
[
8,
10,
12,
13,
14
] |
# -*- coding: utf-8 -*-
import scrapy
import os
from topdb.items import BiqugeItem
class NovelsSpider(scrapy.Spider):
name = 'novels'
allowed_domains = ['xbiquge.la']
start_urls = ['http://www.xbiquge.la/xiaoshuodaquan/']
def parse(self, response):
# 小说分类
path = '/Users/qx/Documents/小说/new/'
all=response.xpath(".//div[@class='novellist']")
for oneitem in all:
classname=oneitem.xpath('./h2/text()').extract_first()
if classname=='奇幻小说、玄幻小说大全列表':
classname='xuanhuan'
if classname=='历史小说、军事小说、穿越小说大全列表':
classname='chuanyue'
if classname=='武侠小说、仙侠小说、修真小说大全列表':
classname='xiuzhen'
if classname=='言情小说、都市小说大全列表':
classname='dushi'
if classname=='异灵小说、科幻小说大全列表':
classname='kehuan'
if classname=='游戏小说、竞技小说、网游小说大全列表':
classname='wangyou'
urls=oneitem.xpath('./ul/li/a/@href').extract()
names=oneitem.xpath('./ul/li/a/text()').extract()
for i in range(len(urls)):
url=urls[i]
name=names[i]
yield scrapy.Request(url, meta={'name': name, 'classname': classname}, callback=self.url_parse)
def url_parse(self, response):
# 小说章节列表
print('小说章节')
path = '/Users/qx/Documents/小说/new/'
name = response.meta['name']
classname = response.meta['classname']
author = response.xpath("//div[@id ='info']/p/text()").extract_first()
if author:
author=author.split(':',1)[1]
print(name+'-'+author)
listurls = response.xpath("//div[@id ='list']/dl/dd/a/@href").extract()
chapternames = response.xpath("//div[@id ='list']/dl/dd/a/text()").extract()
for i in range(len(listurls)):
url = "http://www.xbiquge.la" + listurls[i]
chaptername=chapternames[i]
oldname=path+ classname+'/'+name+ '-作者:' + author
newname=path+ classname+'/'+name
if (os.path.exists(oldname)):
os.rename(oldname,newname)
if (not os.path.exists(newname)):
os.makedirs(newname)
if(not os.path.exists(newname+'/'+ str(i) + ".txt")):
yield scrapy.Request(url, meta={'chaptername':chaptername,'tag':classname,'name':name,'author':author,'index':i}, callback=self.detail_parse)
def detail_parse(self, response):
# 章节详细内容
tag = response.meta['tag']
name = response.meta['name']
author = response.meta['author']
chaptername = response.meta['chaptername']
index = response.meta['index']
item = BiqugeItem()
novel = response.xpath("//div[@id='content']/text()").extract()
item['novel'] = "\n".join(novel).replace(" ", " ")
item['name'] = name
item['tag'] = tag
item['author'] = author
item['chapter'] = chaptername
item['index'] = index
# print(item['classname'])
# print(item['name'])
# print(item['title'])
# print('\n')
yield item
# 这里是爬取整个网站且按照分类进行爬取 但是重点是 爬取太慢scrapy 是异步操作 还需要了解一下多线程的问题 这样速度能更快些
|
normal
|
{
"blob_id": "af668751074df6f182c7121821587270734ea5af",
"index": 1075,
"step-1": "<mask token>\n\n\nclass NovelsSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self, response):\n path = '/Users/qx/Documents/小说/new/'\n all = response.xpath(\".//div[@class='novellist']\")\n for oneitem in all:\n classname = oneitem.xpath('./h2/text()').extract_first()\n if classname == '奇幻小说、玄幻小说大全列表':\n classname = 'xuanhuan'\n if classname == '历史小说、军事小说、穿越小说大全列表':\n classname = 'chuanyue'\n if classname == '武侠小说、仙侠小说、修真小说大全列表':\n classname = 'xiuzhen'\n if classname == '言情小说、都市小说大全列表':\n classname = 'dushi'\n if classname == '异灵小说、科幻小说大全列表':\n classname = 'kehuan'\n if classname == '游戏小说、竞技小说、网游小说大全列表':\n classname = 'wangyou'\n urls = oneitem.xpath('./ul/li/a/@href').extract()\n names = oneitem.xpath('./ul/li/a/text()').extract()\n for i in range(len(urls)):\n url = urls[i]\n name = names[i]\n yield scrapy.Request(url, meta={'name': name, 'classname':\n classname}, callback=self.url_parse)\n\n def url_parse(self, response):\n print('小说章节')\n path = '/Users/qx/Documents/小说/new/'\n name = response.meta['name']\n classname = response.meta['classname']\n author = response.xpath(\"//div[@id ='info']/p/text()\").extract_first()\n if author:\n author = author.split(':', 1)[1]\n print(name + '-' + author)\n listurls = response.xpath(\"//div[@id ='list']/dl/dd/a/@href\").extract()\n chapternames = response.xpath(\"//div[@id ='list']/dl/dd/a/text()\"\n ).extract()\n for i in range(len(listurls)):\n url = 'http://www.xbiquge.la' + listurls[i]\n chaptername = chapternames[i]\n oldname = path + classname + '/' + name + '-作者:' + author\n newname = path + classname + '/' + name\n if os.path.exists(oldname):\n os.rename(oldname, newname)\n if not os.path.exists(newname):\n os.makedirs(newname)\n if not os.path.exists(newname + '/' + str(i) + '.txt'):\n yield scrapy.Request(url, meta={'chaptername': chaptername,\n 'tag': classname, 'name': name, 'author': author,\n 'index': i}, callback=self.detail_parse)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass NovelsSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self, response):\n path = '/Users/qx/Documents/小说/new/'\n all = response.xpath(\".//div[@class='novellist']\")\n for oneitem in all:\n classname = oneitem.xpath('./h2/text()').extract_first()\n if classname == '奇幻小说、玄幻小说大全列表':\n classname = 'xuanhuan'\n if classname == '历史小说、军事小说、穿越小说大全列表':\n classname = 'chuanyue'\n if classname == '武侠小说、仙侠小说、修真小说大全列表':\n classname = 'xiuzhen'\n if classname == '言情小说、都市小说大全列表':\n classname = 'dushi'\n if classname == '异灵小说、科幻小说大全列表':\n classname = 'kehuan'\n if classname == '游戏小说、竞技小说、网游小说大全列表':\n classname = 'wangyou'\n urls = oneitem.xpath('./ul/li/a/@href').extract()\n names = oneitem.xpath('./ul/li/a/text()').extract()\n for i in range(len(urls)):\n url = urls[i]\n name = names[i]\n yield scrapy.Request(url, meta={'name': name, 'classname':\n classname}, callback=self.url_parse)\n\n def url_parse(self, response):\n print('小说章节')\n path = '/Users/qx/Documents/小说/new/'\n name = response.meta['name']\n classname = response.meta['classname']\n author = response.xpath(\"//div[@id ='info']/p/text()\").extract_first()\n if author:\n author = author.split(':', 1)[1]\n print(name + '-' + author)\n listurls = response.xpath(\"//div[@id ='list']/dl/dd/a/@href\").extract()\n chapternames = response.xpath(\"//div[@id ='list']/dl/dd/a/text()\"\n ).extract()\n for i in range(len(listurls)):\n url = 'http://www.xbiquge.la' + listurls[i]\n chaptername = chapternames[i]\n oldname = path + classname + '/' + name + '-作者:' + author\n newname = path + classname + '/' + name\n if os.path.exists(oldname):\n os.rename(oldname, newname)\n if not os.path.exists(newname):\n os.makedirs(newname)\n if not os.path.exists(newname + '/' + str(i) + '.txt'):\n yield scrapy.Request(url, meta={'chaptername': chaptername,\n 'tag': classname, 'name': name, 'author': author,\n 'index': i}, callback=self.detail_parse)\n\n def detail_parse(self, response):\n tag = response.meta['tag']\n name = response.meta['name']\n author = response.meta['author']\n chaptername = response.meta['chaptername']\n index = response.meta['index']\n item = BiqugeItem()\n novel = response.xpath(\"//div[@id='content']/text()\").extract()\n item['novel'] = '\\n'.join(novel).replace(' ', ' ')\n item['name'] = name\n item['tag'] = tag\n item['author'] = author\n item['chapter'] = chaptername\n item['index'] = index\n yield item\n",
"step-3": "<mask token>\n\n\nclass NovelsSpider(scrapy.Spider):\n name = 'novels'\n allowed_domains = ['xbiquge.la']\n start_urls = ['http://www.xbiquge.la/xiaoshuodaquan/']\n\n def parse(self, response):\n path = '/Users/qx/Documents/小说/new/'\n all = response.xpath(\".//div[@class='novellist']\")\n for oneitem in all:\n classname = oneitem.xpath('./h2/text()').extract_first()\n if classname == '奇幻小说、玄幻小说大全列表':\n classname = 'xuanhuan'\n if classname == '历史小说、军事小说、穿越小说大全列表':\n classname = 'chuanyue'\n if classname == '武侠小说、仙侠小说、修真小说大全列表':\n classname = 'xiuzhen'\n if classname == '言情小说、都市小说大全列表':\n classname = 'dushi'\n if classname == '异灵小说、科幻小说大全列表':\n classname = 'kehuan'\n if classname == '游戏小说、竞技小说、网游小说大全列表':\n classname = 'wangyou'\n urls = oneitem.xpath('./ul/li/a/@href').extract()\n names = oneitem.xpath('./ul/li/a/text()').extract()\n for i in range(len(urls)):\n url = urls[i]\n name = names[i]\n yield scrapy.Request(url, meta={'name': name, 'classname':\n classname}, callback=self.url_parse)\n\n def url_parse(self, response):\n print('小说章节')\n path = '/Users/qx/Documents/小说/new/'\n name = response.meta['name']\n classname = response.meta['classname']\n author = response.xpath(\"//div[@id ='info']/p/text()\").extract_first()\n if author:\n author = author.split(':', 1)[1]\n print(name + '-' + author)\n listurls = response.xpath(\"//div[@id ='list']/dl/dd/a/@href\").extract()\n chapternames = response.xpath(\"//div[@id ='list']/dl/dd/a/text()\"\n ).extract()\n for i in range(len(listurls)):\n url = 'http://www.xbiquge.la' + listurls[i]\n chaptername = chapternames[i]\n oldname = path + classname + '/' + name + '-作者:' + author\n newname = path + classname + '/' + name\n if os.path.exists(oldname):\n os.rename(oldname, newname)\n if not os.path.exists(newname):\n os.makedirs(newname)\n if not os.path.exists(newname + '/' + str(i) + '.txt'):\n yield scrapy.Request(url, meta={'chaptername': chaptername,\n 'tag': classname, 'name': name, 'author': author,\n 'index': i}, callback=self.detail_parse)\n\n def detail_parse(self, response):\n tag = response.meta['tag']\n name = response.meta['name']\n author = response.meta['author']\n chaptername = response.meta['chaptername']\n index = response.meta['index']\n item = BiqugeItem()\n novel = response.xpath(\"//div[@id='content']/text()\").extract()\n item['novel'] = '\\n'.join(novel).replace(' ', ' ')\n item['name'] = name\n item['tag'] = tag\n item['author'] = author\n item['chapter'] = chaptername\n item['index'] = index\n yield item\n",
"step-4": "import scrapy\nimport os\nfrom topdb.items import BiqugeItem\n\n\nclass NovelsSpider(scrapy.Spider):\n name = 'novels'\n allowed_domains = ['xbiquge.la']\n start_urls = ['http://www.xbiquge.la/xiaoshuodaquan/']\n\n def parse(self, response):\n path = '/Users/qx/Documents/小说/new/'\n all = response.xpath(\".//div[@class='novellist']\")\n for oneitem in all:\n classname = oneitem.xpath('./h2/text()').extract_first()\n if classname == '奇幻小说、玄幻小说大全列表':\n classname = 'xuanhuan'\n if classname == '历史小说、军事小说、穿越小说大全列表':\n classname = 'chuanyue'\n if classname == '武侠小说、仙侠小说、修真小说大全列表':\n classname = 'xiuzhen'\n if classname == '言情小说、都市小说大全列表':\n classname = 'dushi'\n if classname == '异灵小说、科幻小说大全列表':\n classname = 'kehuan'\n if classname == '游戏小说、竞技小说、网游小说大全列表':\n classname = 'wangyou'\n urls = oneitem.xpath('./ul/li/a/@href').extract()\n names = oneitem.xpath('./ul/li/a/text()').extract()\n for i in range(len(urls)):\n url = urls[i]\n name = names[i]\n yield scrapy.Request(url, meta={'name': name, 'classname':\n classname}, callback=self.url_parse)\n\n def url_parse(self, response):\n print('小说章节')\n path = '/Users/qx/Documents/小说/new/'\n name = response.meta['name']\n classname = response.meta['classname']\n author = response.xpath(\"//div[@id ='info']/p/text()\").extract_first()\n if author:\n author = author.split(':', 1)[1]\n print(name + '-' + author)\n listurls = response.xpath(\"//div[@id ='list']/dl/dd/a/@href\").extract()\n chapternames = response.xpath(\"//div[@id ='list']/dl/dd/a/text()\"\n ).extract()\n for i in range(len(listurls)):\n url = 'http://www.xbiquge.la' + listurls[i]\n chaptername = chapternames[i]\n oldname = path + classname + '/' + name + '-作者:' + author\n newname = path + classname + '/' + name\n if os.path.exists(oldname):\n os.rename(oldname, newname)\n if not os.path.exists(newname):\n os.makedirs(newname)\n if not os.path.exists(newname + '/' + str(i) + '.txt'):\n yield scrapy.Request(url, meta={'chaptername': chaptername,\n 'tag': classname, 'name': name, 'author': author,\n 'index': i}, callback=self.detail_parse)\n\n def detail_parse(self, response):\n tag = response.meta['tag']\n name = response.meta['name']\n author = response.meta['author']\n chaptername = response.meta['chaptername']\n index = response.meta['index']\n item = BiqugeItem()\n novel = response.xpath(\"//div[@id='content']/text()\").extract()\n item['novel'] = '\\n'.join(novel).replace(' ', ' ')\n item['name'] = name\n item['tag'] = tag\n item['author'] = author\n item['chapter'] = chaptername\n item['index'] = index\n yield item\n",
"step-5": "# -*- coding: utf-8 -*-\nimport scrapy\n\nimport os\nfrom topdb.items import BiqugeItem\n\nclass NovelsSpider(scrapy.Spider):\n name = 'novels'\n allowed_domains = ['xbiquge.la']\n start_urls = ['http://www.xbiquge.la/xiaoshuodaquan/']\n\n def parse(self, response):\n # 小说分类\n path = '/Users/qx/Documents/小说/new/'\n\n all=response.xpath(\".//div[@class='novellist']\")\n\n for oneitem in all:\n\n classname=oneitem.xpath('./h2/text()').extract_first()\n if classname=='奇幻小说、玄幻小说大全列表':\n classname='xuanhuan'\n if classname=='历史小说、军事小说、穿越小说大全列表':\n classname='chuanyue'\n if classname=='武侠小说、仙侠小说、修真小说大全列表':\n classname='xiuzhen'\n if classname=='言情小说、都市小说大全列表':\n classname='dushi'\n if classname=='异灵小说、科幻小说大全列表':\n classname='kehuan'\n if classname=='游戏小说、竞技小说、网游小说大全列表':\n classname='wangyou'\n\n urls=oneitem.xpath('./ul/li/a/@href').extract()\n\n names=oneitem.xpath('./ul/li/a/text()').extract()\n\n for i in range(len(urls)):\n url=urls[i]\n name=names[i]\n yield scrapy.Request(url, meta={'name': name, 'classname': classname}, callback=self.url_parse)\n\n\n def url_parse(self, response):\n # 小说章节列表\n print('小说章节')\n path = '/Users/qx/Documents/小说/new/'\n\n name = response.meta['name']\n classname = response.meta['classname']\n\n author = response.xpath(\"//div[@id ='info']/p/text()\").extract_first()\n\n if author:\n author=author.split(':',1)[1]\n\n print(name+'-'+author)\n\n listurls = response.xpath(\"//div[@id ='list']/dl/dd/a/@href\").extract()\n chapternames = response.xpath(\"//div[@id ='list']/dl/dd/a/text()\").extract()\n\n for i in range(len(listurls)):\n url = \"http://www.xbiquge.la\" + listurls[i]\n chaptername=chapternames[i]\n\n oldname=path+ classname+'/'+name+ '-作者:' + author\n newname=path+ classname+'/'+name\n\n if (os.path.exists(oldname)):\n os.rename(oldname,newname)\n\n if (not os.path.exists(newname)):\n os.makedirs(newname)\n\n if(not os.path.exists(newname+'/'+ str(i) + \".txt\")):\n yield scrapy.Request(url, meta={'chaptername':chaptername,'tag':classname,'name':name,'author':author,'index':i}, callback=self.detail_parse)\n\n def detail_parse(self, response):\n # 章节详细内容\n\n tag = response.meta['tag']\n name = response.meta['name']\n author = response.meta['author']\n chaptername = response.meta['chaptername']\n index = response.meta['index']\n\n item = BiqugeItem()\n\n novel = response.xpath(\"//div[@id='content']/text()\").extract()\n item['novel'] = \"\\n\".join(novel).replace(\" \", \" \")\n item['name'] = name\n item['tag'] = tag\n item['author'] = author\n item['chapter'] = chaptername\n item['index'] = index\n\n # print(item['classname'])\n # print(item['name'])\n # print(item['title'])\n # print('\\n')\n yield item\n\n # 这里是爬取整个网站且按照分类进行爬取 但是重点是 爬取太慢scrapy 是异步操作 还需要了解一下多线程的问题 这样速度能更快些\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def join_game(request):
if request.method != 'POST':
return HttpResponseRedirect('/game')
form_data = json.loads(request.body.decode('utf-8'))
form = JoinForm(form_data)
if form.is_valid():
code = int(form.cleaned_data['code'])
input_name = form.cleaned_data['name']
else:
return JsonResponse(form.errors.as_json(), safe=False, status=400)
game = get_object_or_404(Game, code=code)
if game.joined_players < game.number_of_players:
game.joined_players = game.joined_players + 1
game.save()
new_player = Player(name=input_name, game_id=game, player_number=
game.joined_players)
new_player.save()
request.session['player_id'] = new_player.pk
if new_player.player_number == game.number_of_players:
game.deal_cards_to_players()
return JsonResponse(game.pk, safe=False)
def game(request, game_id):
err_str = ''
this_game = get_object_or_404(Game, pk=game_id)
print(request.session.keys())
if this_game.has_been_won:
return redirect(create_new_game)
players = Player.objects.filter(game_id=game_id)
if 'player_id' not in request.session:
err_str = 'Unauthenticated user'
this_player = get_object_or_404(Player, pk=request.session['player_id'])
if this_player not in players:
err_str = 'La partita richiesta non esiste o si è già conclusa.'
if err_str != '':
return render(request, 'error.html', {'error': err_str}, status=403)
return render(request, 'gametest.html', {'game_id': this_game.pk,
'number_of_players': this_game.number_of_players})
<|reserved_special_token_0|>
def restart_game(request, game_id):
this_game = get_object_or_404(Game, pk=game_id)
if not this_game.has_been_won:
return redirect(create_new_game)
players = Player.objects.filter(game_id=game_id)
if 'player_id' not in request.session:
return redirect(create_new_game)
this_player = get_object_or_404(Player, pk=request.session['player_id'])
if this_player not in players:
return redirect(create_new_game)
this_game.reset()
this_game.deal_cards_to_players()
return JsonResponse({'status': 'ok'})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_joined_players(request, game_id):
game = get_object_or_404(Game, pk=game_id)
return HttpResponse(str(game.joined_players))
<|reserved_special_token_0|>
def join_game(request):
if request.method != 'POST':
return HttpResponseRedirect('/game')
form_data = json.loads(request.body.decode('utf-8'))
form = JoinForm(form_data)
if form.is_valid():
code = int(form.cleaned_data['code'])
input_name = form.cleaned_data['name']
else:
return JsonResponse(form.errors.as_json(), safe=False, status=400)
game = get_object_or_404(Game, code=code)
if game.joined_players < game.number_of_players:
game.joined_players = game.joined_players + 1
game.save()
new_player = Player(name=input_name, game_id=game, player_number=
game.joined_players)
new_player.save()
request.session['player_id'] = new_player.pk
if new_player.player_number == game.number_of_players:
game.deal_cards_to_players()
return JsonResponse(game.pk, safe=False)
def game(request, game_id):
err_str = ''
this_game = get_object_or_404(Game, pk=game_id)
print(request.session.keys())
if this_game.has_been_won:
return redirect(create_new_game)
players = Player.objects.filter(game_id=game_id)
if 'player_id' not in request.session:
err_str = 'Unauthenticated user'
this_player = get_object_or_404(Player, pk=request.session['player_id'])
if this_player not in players:
err_str = 'La partita richiesta non esiste o si è già conclusa.'
if err_str != '':
return render(request, 'error.html', {'error': err_str}, status=403)
return render(request, 'gametest.html', {'game_id': this_game.pk,
'number_of_players': this_game.number_of_players})
def feedback_create(request):
if request.method != 'POST':
return HttpResponseRedirect('/game')
form_data = json.loads(request.body.decode('utf-8'))
form = FeedbackForm(form_data)
if form.is_valid():
sender_name = form.cleaned_data['sender_name']
email = form.cleaned_data['email']
message = form.cleaned_data['message']
else:
return JsonResponse(form.errors.as_json(), safe=False, status=400)
feedback = Feedback(sender_name=sender_name, email=email, message=message)
feedback.save()
return JsonResponse('[]', status=200, safe=False)
def restart_game(request, game_id):
this_game = get_object_or_404(Game, pk=game_id)
if not this_game.has_been_won:
return redirect(create_new_game)
players = Player.objects.filter(game_id=game_id)
if 'player_id' not in request.session:
return redirect(create_new_game)
this_player = get_object_or_404(Player, pk=request.session['player_id'])
if this_player not in players:
return redirect(create_new_game)
this_game.reset()
this_game.deal_cards_to_players()
return JsonResponse({'status': 'ok'})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_joined_players(request, game_id):
game = get_object_or_404(Game, pk=game_id)
return HttpResponse(str(game.joined_players))
def create_new_game(request):
if request.method == 'POST':
form_data = json.loads(request.body.decode('utf-8'))
form = GameForm(form_data)
if form.is_valid():
number_of_players = form.cleaned_data['number_of_players']
new_game = Game(number_of_players=int(number_of_players))
new_game.instantiate()
new_game.save()
new_player = Player(name=form.cleaned_data['creator_name'],
game_id=new_game)
new_player.save()
request.session['player_id'] = new_player.pk
return JsonResponse({'code': new_game.code, 'game_id': new_game
.pk, 'number_of_players': number_of_players})
else:
return JsonResponse(form.errors.as_json(), safe=False, status=400)
else:
if 'player_id' not in request.session:
request.session['player_id'] = 0
create_form = GameForm(initial={'number_of_players': '2'})
join_form = JoinForm()
feedback_form = FeedbackForm()
return render(request, 'newhome.html', {'create_form': create_form,
'join_form': join_form, 'feedback_form': feedback_form})
def join_game(request):
if request.method != 'POST':
return HttpResponseRedirect('/game')
form_data = json.loads(request.body.decode('utf-8'))
form = JoinForm(form_data)
if form.is_valid():
code = int(form.cleaned_data['code'])
input_name = form.cleaned_data['name']
else:
return JsonResponse(form.errors.as_json(), safe=False, status=400)
game = get_object_or_404(Game, code=code)
if game.joined_players < game.number_of_players:
game.joined_players = game.joined_players + 1
game.save()
new_player = Player(name=input_name, game_id=game, player_number=
game.joined_players)
new_player.save()
request.session['player_id'] = new_player.pk
if new_player.player_number == game.number_of_players:
game.deal_cards_to_players()
return JsonResponse(game.pk, safe=False)
def game(request, game_id):
err_str = ''
this_game = get_object_or_404(Game, pk=game_id)
print(request.session.keys())
if this_game.has_been_won:
return redirect(create_new_game)
players = Player.objects.filter(game_id=game_id)
if 'player_id' not in request.session:
err_str = 'Unauthenticated user'
this_player = get_object_or_404(Player, pk=request.session['player_id'])
if this_player not in players:
err_str = 'La partita richiesta non esiste o si è già conclusa.'
if err_str != '':
return render(request, 'error.html', {'error': err_str}, status=403)
return render(request, 'gametest.html', {'game_id': this_game.pk,
'number_of_players': this_game.number_of_players})
def feedback_create(request):
if request.method != 'POST':
return HttpResponseRedirect('/game')
form_data = json.loads(request.body.decode('utf-8'))
form = FeedbackForm(form_data)
if form.is_valid():
sender_name = form.cleaned_data['sender_name']
email = form.cleaned_data['email']
message = form.cleaned_data['message']
else:
return JsonResponse(form.errors.as_json(), safe=False, status=400)
feedback = Feedback(sender_name=sender_name, email=email, message=message)
feedback.save()
return JsonResponse('[]', status=200, safe=False)
def restart_game(request, game_id):
this_game = get_object_or_404(Game, pk=game_id)
if not this_game.has_been_won:
return redirect(create_new_game)
players = Player.objects.filter(game_id=game_id)
if 'player_id' not in request.session:
return redirect(create_new_game)
this_player = get_object_or_404(Player, pk=request.session['player_id'])
if this_player not in players:
return redirect(create_new_game)
this_game.reset()
this_game.deal_cards_to_players()
return JsonResponse({'status': 'ok'})
<|reserved_special_token_1|>
from django.shortcuts import render, redirect
from .models import Game, Player, CardsInHand, Feedback
from django.db.models import Q
from .forms import GameForm, JoinForm, FeedbackForm
from django.shortcuts import get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.views.generic import CreateView
import json
def get_joined_players(request, game_id):
game = get_object_or_404(Game, pk=game_id)
return HttpResponse(str(game.joined_players))
def create_new_game(request):
if request.method == 'POST':
form_data = json.loads(request.body.decode('utf-8'))
form = GameForm(form_data)
if form.is_valid():
number_of_players = form.cleaned_data['number_of_players']
new_game = Game(number_of_players=int(number_of_players))
new_game.instantiate()
new_game.save()
new_player = Player(name=form.cleaned_data['creator_name'],
game_id=new_game)
new_player.save()
request.session['player_id'] = new_player.pk
return JsonResponse({'code': new_game.code, 'game_id': new_game
.pk, 'number_of_players': number_of_players})
else:
return JsonResponse(form.errors.as_json(), safe=False, status=400)
else:
if 'player_id' not in request.session:
request.session['player_id'] = 0
create_form = GameForm(initial={'number_of_players': '2'})
join_form = JoinForm()
feedback_form = FeedbackForm()
return render(request, 'newhome.html', {'create_form': create_form,
'join_form': join_form, 'feedback_form': feedback_form})
def join_game(request):
if request.method != 'POST':
return HttpResponseRedirect('/game')
form_data = json.loads(request.body.decode('utf-8'))
form = JoinForm(form_data)
if form.is_valid():
code = int(form.cleaned_data['code'])
input_name = form.cleaned_data['name']
else:
return JsonResponse(form.errors.as_json(), safe=False, status=400)
game = get_object_or_404(Game, code=code)
if game.joined_players < game.number_of_players:
game.joined_players = game.joined_players + 1
game.save()
new_player = Player(name=input_name, game_id=game, player_number=
game.joined_players)
new_player.save()
request.session['player_id'] = new_player.pk
if new_player.player_number == game.number_of_players:
game.deal_cards_to_players()
return JsonResponse(game.pk, safe=False)
def game(request, game_id):
err_str = ''
this_game = get_object_or_404(Game, pk=game_id)
print(request.session.keys())
if this_game.has_been_won:
return redirect(create_new_game)
players = Player.objects.filter(game_id=game_id)
if 'player_id' not in request.session:
err_str = 'Unauthenticated user'
this_player = get_object_or_404(Player, pk=request.session['player_id'])
if this_player not in players:
err_str = 'La partita richiesta non esiste o si è già conclusa.'
if err_str != '':
return render(request, 'error.html', {'error': err_str}, status=403)
return render(request, 'gametest.html', {'game_id': this_game.pk,
'number_of_players': this_game.number_of_players})
def feedback_create(request):
if request.method != 'POST':
return HttpResponseRedirect('/game')
form_data = json.loads(request.body.decode('utf-8'))
form = FeedbackForm(form_data)
if form.is_valid():
sender_name = form.cleaned_data['sender_name']
email = form.cleaned_data['email']
message = form.cleaned_data['message']
else:
return JsonResponse(form.errors.as_json(), safe=False, status=400)
feedback = Feedback(sender_name=sender_name, email=email, message=message)
feedback.save()
return JsonResponse('[]', status=200, safe=False)
def restart_game(request, game_id):
this_game = get_object_or_404(Game, pk=game_id)
if not this_game.has_been_won:
return redirect(create_new_game)
players = Player.objects.filter(game_id=game_id)
if 'player_id' not in request.session:
return redirect(create_new_game)
this_player = get_object_or_404(Player, pk=request.session['player_id'])
if this_player not in players:
return redirect(create_new_game)
this_game.reset()
this_game.deal_cards_to_players()
return JsonResponse({'status': 'ok'})
<|reserved_special_token_1|>
from django.shortcuts import render, redirect
from .models import Game, Player, CardsInHand, Feedback
from django.db.models import Q
from .forms import GameForm, JoinForm, FeedbackForm
from django.shortcuts import get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.views.generic import CreateView
import json
# from django.contrib.auth.decorators import login_required
def get_joined_players(request, game_id):
game = get_object_or_404(Game, pk=game_id)
return HttpResponse(str(game.joined_players))
def create_new_game(request):
if request.method == "POST":
form_data = json.loads(request.body.decode('utf-8'))
form = GameForm(form_data)
if form.is_valid():
number_of_players = form.cleaned_data["number_of_players"]
new_game = Game(number_of_players=int(number_of_players))
new_game.instantiate() # initializes new game
new_game.save() # save new game to db
# create first player
new_player = Player(name=form.cleaned_data["creator_name"], game_id=new_game)
new_player.save()
# create new session to allow the user to play the game
request.session['player_id'] = new_player.pk
return JsonResponse({
"code": new_game.code,
"game_id": new_game.pk,
"number_of_players": number_of_players,
})
# return render(request, "game_created.html", {
# "form": form,
# "game_code": new_game.code,
# "n_players": number_of_players,
# "game_id": new_game.pk,
# "your_name": new_player.name,
# })
else:
return JsonResponse(form.errors.as_json(), safe=False, status=400)
else:
# set a dummy player id in player's session. this is needed to make channels session persistence work (for matchmaking)
if('player_id' not in request.session):
request.session['player_id'] = 0
create_form = GameForm(initial={'number_of_players': '2'})
join_form = JoinForm()
feedback_form = FeedbackForm()
return render(
request,
"newhome.html",
{
"create_form": create_form,
"join_form": join_form,
"feedback_form": feedback_form,
}
)
def join_game(request):
if request.method != "POST":
return HttpResponseRedirect("/game")
form_data = json.loads(request.body.decode('utf-8'))
form = JoinForm(form_data)
if form.is_valid():
code = int(form.cleaned_data['code'])
input_name = form.cleaned_data['name']
else:
return JsonResponse(form.errors.as_json(), safe=False, status=400)
game = get_object_or_404(Game, code=code)
if(game.joined_players < game.number_of_players):
# increment the number of players who joined this game
game.joined_players = game.joined_players + 1
game.save()
# create player and append it to this game
new_player = Player(name=input_name, game_id=game, player_number=game.joined_players)
new_player.save()
# create new session to allow user to play
request.session['player_id'] = new_player.pk
if(new_player.player_number == game.number_of_players):
# last player joined: deal cards to all players; game can now being
game.deal_cards_to_players()
return JsonResponse(game.pk, safe=False)
def game(request, game_id):
err_str = ''
this_game = get_object_or_404(Game, pk=game_id)
print(request.session.keys())
# if game is over, redirect to home
if this_game.has_been_won:
return redirect(create_new_game)
# get players who joined this game
players = Player.objects.filter(game_id=game_id)
if('player_id' not in request.session): # check if user has a session variable player_id
err_str = "Unauthenticated user"
this_player = get_object_or_404(Player, pk=request.session['player_id'])
if(this_player not in players): # check if this player has joined the game
err_str = "La partita richiesta non esiste o si è già conclusa."
if err_str != '':
return render(
request,
'error.html',
{
'error': err_str,
},
status=403
)
return render(request, 'gametest.html', {
'game_id': this_game.pk,
'number_of_players': this_game.number_of_players,
})
def feedback_create(request):
if request.method != "POST":
return HttpResponseRedirect("/game")
form_data = json.loads(request.body.decode('utf-8'))
form = FeedbackForm(form_data)
if form.is_valid():
sender_name = form.cleaned_data['sender_name']
email = form.cleaned_data['email']
message = form.cleaned_data['message']
else:
return JsonResponse(form.errors.as_json(), safe=False, status=400)
feedback = Feedback(sender_name=sender_name, email=email, message=message)
feedback.save()
return JsonResponse("[]", status=200, safe=False)
def restart_game(request, game_id):
this_game = get_object_or_404(Game, pk=game_id)
# if game isn't over, redirect to home
if not this_game.has_been_won:
return redirect(create_new_game)
# get players who joined this game
players = Player.objects.filter(game_id=game_id)
if('player_id' not in request.session): # check if user has a session variable player_id
return redirect(create_new_game)
this_player = get_object_or_404(Player, pk=request.session['player_id'])
if(this_player not in players): # check if this player has joined the game
return redirect(create_new_game)
this_game.reset()
this_game.deal_cards_to_players()
return JsonResponse({'status': 'ok'})
|
flexible
|
{
"blob_id": "d650f578ea30772489625ee26f3e4bf04131964b",
"index": 6140,
"step-1": "<mask token>\n\n\ndef join_game(request):\n if request.method != 'POST':\n return HttpResponseRedirect('/game')\n form_data = json.loads(request.body.decode('utf-8'))\n form = JoinForm(form_data)\n if form.is_valid():\n code = int(form.cleaned_data['code'])\n input_name = form.cleaned_data['name']\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n game = get_object_or_404(Game, code=code)\n if game.joined_players < game.number_of_players:\n game.joined_players = game.joined_players + 1\n game.save()\n new_player = Player(name=input_name, game_id=game, player_number=\n game.joined_players)\n new_player.save()\n request.session['player_id'] = new_player.pk\n if new_player.player_number == game.number_of_players:\n game.deal_cards_to_players()\n return JsonResponse(game.pk, safe=False)\n\n\ndef game(request, game_id):\n err_str = ''\n this_game = get_object_or_404(Game, pk=game_id)\n print(request.session.keys())\n if this_game.has_been_won:\n return redirect(create_new_game)\n players = Player.objects.filter(game_id=game_id)\n if 'player_id' not in request.session:\n err_str = 'Unauthenticated user'\n this_player = get_object_or_404(Player, pk=request.session['player_id'])\n if this_player not in players:\n err_str = 'La partita richiesta non esiste o si è già conclusa.'\n if err_str != '':\n return render(request, 'error.html', {'error': err_str}, status=403)\n return render(request, 'gametest.html', {'game_id': this_game.pk,\n 'number_of_players': this_game.number_of_players})\n\n\n<mask token>\n\n\ndef restart_game(request, game_id):\n this_game = get_object_or_404(Game, pk=game_id)\n if not this_game.has_been_won:\n return redirect(create_new_game)\n players = Player.objects.filter(game_id=game_id)\n if 'player_id' not in request.session:\n return redirect(create_new_game)\n this_player = get_object_or_404(Player, pk=request.session['player_id'])\n if this_player not in players:\n return redirect(create_new_game)\n this_game.reset()\n this_game.deal_cards_to_players()\n return JsonResponse({'status': 'ok'})\n",
"step-2": "<mask token>\n\n\ndef get_joined_players(request, game_id):\n game = get_object_or_404(Game, pk=game_id)\n return HttpResponse(str(game.joined_players))\n\n\n<mask token>\n\n\ndef join_game(request):\n if request.method != 'POST':\n return HttpResponseRedirect('/game')\n form_data = json.loads(request.body.decode('utf-8'))\n form = JoinForm(form_data)\n if form.is_valid():\n code = int(form.cleaned_data['code'])\n input_name = form.cleaned_data['name']\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n game = get_object_or_404(Game, code=code)\n if game.joined_players < game.number_of_players:\n game.joined_players = game.joined_players + 1\n game.save()\n new_player = Player(name=input_name, game_id=game, player_number=\n game.joined_players)\n new_player.save()\n request.session['player_id'] = new_player.pk\n if new_player.player_number == game.number_of_players:\n game.deal_cards_to_players()\n return JsonResponse(game.pk, safe=False)\n\n\ndef game(request, game_id):\n err_str = ''\n this_game = get_object_or_404(Game, pk=game_id)\n print(request.session.keys())\n if this_game.has_been_won:\n return redirect(create_new_game)\n players = Player.objects.filter(game_id=game_id)\n if 'player_id' not in request.session:\n err_str = 'Unauthenticated user'\n this_player = get_object_or_404(Player, pk=request.session['player_id'])\n if this_player not in players:\n err_str = 'La partita richiesta non esiste o si è già conclusa.'\n if err_str != '':\n return render(request, 'error.html', {'error': err_str}, status=403)\n return render(request, 'gametest.html', {'game_id': this_game.pk,\n 'number_of_players': this_game.number_of_players})\n\n\ndef feedback_create(request):\n if request.method != 'POST':\n return HttpResponseRedirect('/game')\n form_data = json.loads(request.body.decode('utf-8'))\n form = FeedbackForm(form_data)\n if form.is_valid():\n sender_name = form.cleaned_data['sender_name']\n email = form.cleaned_data['email']\n message = form.cleaned_data['message']\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n feedback = Feedback(sender_name=sender_name, email=email, message=message)\n feedback.save()\n return JsonResponse('[]', status=200, safe=False)\n\n\ndef restart_game(request, game_id):\n this_game = get_object_or_404(Game, pk=game_id)\n if not this_game.has_been_won:\n return redirect(create_new_game)\n players = Player.objects.filter(game_id=game_id)\n if 'player_id' not in request.session:\n return redirect(create_new_game)\n this_player = get_object_or_404(Player, pk=request.session['player_id'])\n if this_player not in players:\n return redirect(create_new_game)\n this_game.reset()\n this_game.deal_cards_to_players()\n return JsonResponse({'status': 'ok'})\n",
"step-3": "<mask token>\n\n\ndef get_joined_players(request, game_id):\n game = get_object_or_404(Game, pk=game_id)\n return HttpResponse(str(game.joined_players))\n\n\ndef create_new_game(request):\n if request.method == 'POST':\n form_data = json.loads(request.body.decode('utf-8'))\n form = GameForm(form_data)\n if form.is_valid():\n number_of_players = form.cleaned_data['number_of_players']\n new_game = Game(number_of_players=int(number_of_players))\n new_game.instantiate()\n new_game.save()\n new_player = Player(name=form.cleaned_data['creator_name'],\n game_id=new_game)\n new_player.save()\n request.session['player_id'] = new_player.pk\n return JsonResponse({'code': new_game.code, 'game_id': new_game\n .pk, 'number_of_players': number_of_players})\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n else:\n if 'player_id' not in request.session:\n request.session['player_id'] = 0\n create_form = GameForm(initial={'number_of_players': '2'})\n join_form = JoinForm()\n feedback_form = FeedbackForm()\n return render(request, 'newhome.html', {'create_form': create_form,\n 'join_form': join_form, 'feedback_form': feedback_form})\n\n\ndef join_game(request):\n if request.method != 'POST':\n return HttpResponseRedirect('/game')\n form_data = json.loads(request.body.decode('utf-8'))\n form = JoinForm(form_data)\n if form.is_valid():\n code = int(form.cleaned_data['code'])\n input_name = form.cleaned_data['name']\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n game = get_object_or_404(Game, code=code)\n if game.joined_players < game.number_of_players:\n game.joined_players = game.joined_players + 1\n game.save()\n new_player = Player(name=input_name, game_id=game, player_number=\n game.joined_players)\n new_player.save()\n request.session['player_id'] = new_player.pk\n if new_player.player_number == game.number_of_players:\n game.deal_cards_to_players()\n return JsonResponse(game.pk, safe=False)\n\n\ndef game(request, game_id):\n err_str = ''\n this_game = get_object_or_404(Game, pk=game_id)\n print(request.session.keys())\n if this_game.has_been_won:\n return redirect(create_new_game)\n players = Player.objects.filter(game_id=game_id)\n if 'player_id' not in request.session:\n err_str = 'Unauthenticated user'\n this_player = get_object_or_404(Player, pk=request.session['player_id'])\n if this_player not in players:\n err_str = 'La partita richiesta non esiste o si è già conclusa.'\n if err_str != '':\n return render(request, 'error.html', {'error': err_str}, status=403)\n return render(request, 'gametest.html', {'game_id': this_game.pk,\n 'number_of_players': this_game.number_of_players})\n\n\ndef feedback_create(request):\n if request.method != 'POST':\n return HttpResponseRedirect('/game')\n form_data = json.loads(request.body.decode('utf-8'))\n form = FeedbackForm(form_data)\n if form.is_valid():\n sender_name = form.cleaned_data['sender_name']\n email = form.cleaned_data['email']\n message = form.cleaned_data['message']\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n feedback = Feedback(sender_name=sender_name, email=email, message=message)\n feedback.save()\n return JsonResponse('[]', status=200, safe=False)\n\n\ndef restart_game(request, game_id):\n this_game = get_object_or_404(Game, pk=game_id)\n if not this_game.has_been_won:\n return redirect(create_new_game)\n players = Player.objects.filter(game_id=game_id)\n if 'player_id' not in request.session:\n return redirect(create_new_game)\n this_player = get_object_or_404(Player, pk=request.session['player_id'])\n if this_player not in players:\n return redirect(create_new_game)\n this_game.reset()\n this_game.deal_cards_to_players()\n return JsonResponse({'status': 'ok'})\n",
"step-4": "from django.shortcuts import render, redirect\nfrom .models import Game, Player, CardsInHand, Feedback\nfrom django.db.models import Q\nfrom .forms import GameForm, JoinForm, FeedbackForm\nfrom django.shortcuts import get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.views.generic import CreateView\nimport json\n\n\ndef get_joined_players(request, game_id):\n game = get_object_or_404(Game, pk=game_id)\n return HttpResponse(str(game.joined_players))\n\n\ndef create_new_game(request):\n if request.method == 'POST':\n form_data = json.loads(request.body.decode('utf-8'))\n form = GameForm(form_data)\n if form.is_valid():\n number_of_players = form.cleaned_data['number_of_players']\n new_game = Game(number_of_players=int(number_of_players))\n new_game.instantiate()\n new_game.save()\n new_player = Player(name=form.cleaned_data['creator_name'],\n game_id=new_game)\n new_player.save()\n request.session['player_id'] = new_player.pk\n return JsonResponse({'code': new_game.code, 'game_id': new_game\n .pk, 'number_of_players': number_of_players})\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n else:\n if 'player_id' not in request.session:\n request.session['player_id'] = 0\n create_form = GameForm(initial={'number_of_players': '2'})\n join_form = JoinForm()\n feedback_form = FeedbackForm()\n return render(request, 'newhome.html', {'create_form': create_form,\n 'join_form': join_form, 'feedback_form': feedback_form})\n\n\ndef join_game(request):\n if request.method != 'POST':\n return HttpResponseRedirect('/game')\n form_data = json.loads(request.body.decode('utf-8'))\n form = JoinForm(form_data)\n if form.is_valid():\n code = int(form.cleaned_data['code'])\n input_name = form.cleaned_data['name']\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n game = get_object_or_404(Game, code=code)\n if game.joined_players < game.number_of_players:\n game.joined_players = game.joined_players + 1\n game.save()\n new_player = Player(name=input_name, game_id=game, player_number=\n game.joined_players)\n new_player.save()\n request.session['player_id'] = new_player.pk\n if new_player.player_number == game.number_of_players:\n game.deal_cards_to_players()\n return JsonResponse(game.pk, safe=False)\n\n\ndef game(request, game_id):\n err_str = ''\n this_game = get_object_or_404(Game, pk=game_id)\n print(request.session.keys())\n if this_game.has_been_won:\n return redirect(create_new_game)\n players = Player.objects.filter(game_id=game_id)\n if 'player_id' not in request.session:\n err_str = 'Unauthenticated user'\n this_player = get_object_or_404(Player, pk=request.session['player_id'])\n if this_player not in players:\n err_str = 'La partita richiesta non esiste o si è già conclusa.'\n if err_str != '':\n return render(request, 'error.html', {'error': err_str}, status=403)\n return render(request, 'gametest.html', {'game_id': this_game.pk,\n 'number_of_players': this_game.number_of_players})\n\n\ndef feedback_create(request):\n if request.method != 'POST':\n return HttpResponseRedirect('/game')\n form_data = json.loads(request.body.decode('utf-8'))\n form = FeedbackForm(form_data)\n if form.is_valid():\n sender_name = form.cleaned_data['sender_name']\n email = form.cleaned_data['email']\n message = form.cleaned_data['message']\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n feedback = Feedback(sender_name=sender_name, email=email, message=message)\n feedback.save()\n return JsonResponse('[]', status=200, safe=False)\n\n\ndef restart_game(request, game_id):\n this_game = get_object_or_404(Game, pk=game_id)\n if not this_game.has_been_won:\n return redirect(create_new_game)\n players = Player.objects.filter(game_id=game_id)\n if 'player_id' not in request.session:\n return redirect(create_new_game)\n this_player = get_object_or_404(Player, pk=request.session['player_id'])\n if this_player not in players:\n return redirect(create_new_game)\n this_game.reset()\n this_game.deal_cards_to_players()\n return JsonResponse({'status': 'ok'})\n",
"step-5": "from django.shortcuts import render, redirect\nfrom .models import Game, Player, CardsInHand, Feedback\nfrom django.db.models import Q\nfrom .forms import GameForm, JoinForm, FeedbackForm\nfrom django.shortcuts import get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.views.generic import CreateView\nimport json\n# from django.contrib.auth.decorators import login_required\n\ndef get_joined_players(request, game_id):\n game = get_object_or_404(Game, pk=game_id)\n return HttpResponse(str(game.joined_players))\n\ndef create_new_game(request):\n if request.method == \"POST\":\n form_data = json.loads(request.body.decode('utf-8'))\n form = GameForm(form_data)\n\n if form.is_valid():\n number_of_players = form.cleaned_data[\"number_of_players\"]\n\n new_game = Game(number_of_players=int(number_of_players))\n new_game.instantiate() # initializes new game\n new_game.save() # save new game to db\n\n # create first player\n new_player = Player(name=form.cleaned_data[\"creator_name\"], game_id=new_game)\n new_player.save()\n\n # create new session to allow the user to play the game\n request.session['player_id'] = new_player.pk\n\n return JsonResponse({\n \"code\": new_game.code,\n \"game_id\": new_game.pk,\n \"number_of_players\": number_of_players,\n })\n # return render(request, \"game_created.html\", {\n # \"form\": form,\n # \"game_code\": new_game.code,\n # \"n_players\": number_of_players,\n # \"game_id\": new_game.pk,\n # \"your_name\": new_player.name,\n # })\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n else:\n # set a dummy player id in player's session. this is needed to make channels session persistence work (for matchmaking)\n if('player_id' not in request.session):\n request.session['player_id'] = 0\n\n create_form = GameForm(initial={'number_of_players': '2'})\n join_form = JoinForm()\n feedback_form = FeedbackForm()\n return render(\n request,\n \"newhome.html\",\n {\n \"create_form\": create_form,\n \"join_form\": join_form,\n \"feedback_form\": feedback_form,\n }\n )\n\ndef join_game(request):\n if request.method != \"POST\":\n return HttpResponseRedirect(\"/game\")\n\n form_data = json.loads(request.body.decode('utf-8'))\n form = JoinForm(form_data)\n if form.is_valid():\n code = int(form.cleaned_data['code'])\n input_name = form.cleaned_data['name']\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n\n game = get_object_or_404(Game, code=code)\n if(game.joined_players < game.number_of_players):\n # increment the number of players who joined this game\n game.joined_players = game.joined_players + 1\n game.save()\n # create player and append it to this game\n new_player = Player(name=input_name, game_id=game, player_number=game.joined_players)\n new_player.save()\n\n # create new session to allow user to play\n request.session['player_id'] = new_player.pk\n\n if(new_player.player_number == game.number_of_players):\n # last player joined: deal cards to all players; game can now being\n game.deal_cards_to_players()\n\n return JsonResponse(game.pk, safe=False)\n\ndef game(request, game_id):\n err_str = ''\n this_game = get_object_or_404(Game, pk=game_id)\n print(request.session.keys())\n\n # if game is over, redirect to home\n if this_game.has_been_won:\n return redirect(create_new_game)\n\n # get players who joined this game\n players = Player.objects.filter(game_id=game_id)\n\n if('player_id' not in request.session): # check if user has a session variable player_id\n err_str = \"Unauthenticated user\"\n\n this_player = get_object_or_404(Player, pk=request.session['player_id'])\n if(this_player not in players): # check if this player has joined the game\n err_str = \"La partita richiesta non esiste o si è già conclusa.\"\n\n if err_str != '':\n return render(\n request,\n 'error.html',\n {\n 'error': err_str,\n },\n status=403\n )\n\n return render(request, 'gametest.html', {\n 'game_id': this_game.pk,\n 'number_of_players': this_game.number_of_players,\n })\n\ndef feedback_create(request):\n if request.method != \"POST\":\n return HttpResponseRedirect(\"/game\")\n\n form_data = json.loads(request.body.decode('utf-8'))\n form = FeedbackForm(form_data)\n if form.is_valid():\n sender_name = form.cleaned_data['sender_name']\n email = form.cleaned_data['email']\n message = form.cleaned_data['message']\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n\n feedback = Feedback(sender_name=sender_name, email=email, message=message)\n feedback.save()\n return JsonResponse(\"[]\", status=200, safe=False)\n\ndef restart_game(request, game_id):\n this_game = get_object_or_404(Game, pk=game_id)\n\n # if game isn't over, redirect to home\n if not this_game.has_been_won:\n return redirect(create_new_game)\n\n # get players who joined this game\n players = Player.objects.filter(game_id=game_id)\n\n if('player_id' not in request.session): # check if user has a session variable player_id\n return redirect(create_new_game)\n\n this_player = get_object_or_404(Player, pk=request.session['player_id'])\n if(this_player not in players): # check if this player has joined the game\n return redirect(create_new_game)\n\n this_game.reset()\n this_game.deal_cards_to_players()\n\n return JsonResponse({'status': 'ok'})\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def basicRegex(strings):
if not isinstance(strings, list):
return []
ans = []
for string in strings:
pattern = re.compile(BASICPATTERN % string.strip())
ans.append(pattern)
return ans
<|reserved_special_token_1|>
<|reserved_special_token_0|>
BASICPATTERN = '[!/](%s)\\s{,1}(.*)'
def basicRegex(strings):
if not isinstance(strings, list):
return []
ans = []
for string in strings:
pattern = re.compile(BASICPATTERN % string.strip())
ans.append(pattern)
return ans
<|reserved_special_token_1|>
import re
BASICPATTERN = '[!/](%s)\\s{,1}(.*)'
def basicRegex(strings):
if not isinstance(strings, list):
return []
ans = []
for string in strings:
pattern = re.compile(BASICPATTERN % string.strip())
ans.append(pattern)
return ans
<|reserved_special_token_1|>
import re
BASICPATTERN = '[!/](%s)\s{,1}(.*)' # example "/animefind baka" -> (animefind, baka)
# returns compiled BASICPATTERN for each given string
def basicRegex(strings):
if not isinstance(strings,list):
return []
ans = []
for string in strings:
pattern = re.compile(BASICPATTERN % string.strip())
ans.append(pattern)
return ans
|
flexible
|
{
"blob_id": "1a28aea824752d18cbd462693f8f8980dba4974e",
"index": 9387,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef basicRegex(strings):\n if not isinstance(strings, list):\n return []\n ans = []\n for string in strings:\n pattern = re.compile(BASICPATTERN % string.strip())\n ans.append(pattern)\n return ans\n",
"step-3": "<mask token>\nBASICPATTERN = '[!/](%s)\\\\s{,1}(.*)'\n\n\ndef basicRegex(strings):\n if not isinstance(strings, list):\n return []\n ans = []\n for string in strings:\n pattern = re.compile(BASICPATTERN % string.strip())\n ans.append(pattern)\n return ans\n",
"step-4": "import re\nBASICPATTERN = '[!/](%s)\\\\s{,1}(.*)'\n\n\ndef basicRegex(strings):\n if not isinstance(strings, list):\n return []\n ans = []\n for string in strings:\n pattern = re.compile(BASICPATTERN % string.strip())\n ans.append(pattern)\n return ans\n",
"step-5": "import re\n\nBASICPATTERN = '[!/](%s)\\s{,1}(.*)' # example \"/animefind baka\" -> (animefind, baka)\n\n\n# returns compiled BASICPATTERN for each given string\ndef basicRegex(strings):\n if not isinstance(strings,list):\n return []\n ans = []\n for string in strings:\n pattern = re.compile(BASICPATTERN % string.strip())\n ans.append(pattern)\n return ans\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
try:
from zcrmsdk.src.com.zoho.crm.api.dc.data_center import DataCenter
except Exception as e:
from .data_center import DataCenter
class EUDataCenter(DataCenter):
"""
This class represents the properties of Zoho CRM in EU Domain.
"""
@classmethod
def PRODUCTION(cls):
"""
This method represents the Zoho CRM Production environment in EU domain
:return: An instance of Environments
"""
return DataCenter.Environment("https://www.zohoapis.eu", cls().get_iam_url(), cls().get_file_upload_url())
@classmethod
def SANDBOX(cls):
"""
This method represents the Zoho CRM Sandbox environment in EU domain
:return: An instance of Environment
"""
return DataCenter.Environment("https://sandbox.zohoapis.eu", cls().get_iam_url(), cls().get_file_upload_url())
@classmethod
def DEVELOPER(cls):
"""
This method represents the Zoho CRM Developer environment in EU domain
:return: An instance of Environment
"""
return DataCenter.Environment("https://developer.zohoapis.eu", cls().get_iam_url(), cls().get_file_upload_url())
def get_iam_url(self):
return "https://accounts.zoho.eu/oauth/v2/token"
def get_file_upload_url(self):
return "https://content.zohoapis.eu"
|
normal
|
{
"blob_id": "27c364ccf4a6703f74c95ebb386f8ced38b1eafd",
"index": 4960,
"step-1": "<mask token>\n\n\nclass EUDataCenter(DataCenter):\n <mask token>\n\n @classmethod\n def PRODUCTION(cls):\n \"\"\"\n This method represents the Zoho CRM Production environment in EU domain\n :return: An instance of Environments\n \"\"\"\n return DataCenter.Environment('https://www.zohoapis.eu', cls().\n get_iam_url(), cls().get_file_upload_url())\n\n @classmethod\n def SANDBOX(cls):\n \"\"\"\n This method represents the Zoho CRM Sandbox environment in EU domain\n :return: An instance of Environment\n \"\"\"\n return DataCenter.Environment('https://sandbox.zohoapis.eu', cls().\n get_iam_url(), cls().get_file_upload_url())\n\n @classmethod\n def DEVELOPER(cls):\n \"\"\"\n This method represents the Zoho CRM Developer environment in EU domain\n :return: An instance of Environment\n \"\"\"\n return DataCenter.Environment('https://developer.zohoapis.eu', cls(\n ).get_iam_url(), cls().get_file_upload_url())\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass EUDataCenter(DataCenter):\n <mask token>\n\n @classmethod\n def PRODUCTION(cls):\n \"\"\"\n This method represents the Zoho CRM Production environment in EU domain\n :return: An instance of Environments\n \"\"\"\n return DataCenter.Environment('https://www.zohoapis.eu', cls().\n get_iam_url(), cls().get_file_upload_url())\n\n @classmethod\n def SANDBOX(cls):\n \"\"\"\n This method represents the Zoho CRM Sandbox environment in EU domain\n :return: An instance of Environment\n \"\"\"\n return DataCenter.Environment('https://sandbox.zohoapis.eu', cls().\n get_iam_url(), cls().get_file_upload_url())\n\n @classmethod\n def DEVELOPER(cls):\n \"\"\"\n This method represents the Zoho CRM Developer environment in EU domain\n :return: An instance of Environment\n \"\"\"\n return DataCenter.Environment('https://developer.zohoapis.eu', cls(\n ).get_iam_url(), cls().get_file_upload_url())\n\n def get_iam_url(self):\n return 'https://accounts.zoho.eu/oauth/v2/token'\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass EUDataCenter(DataCenter):\n \"\"\"\n This class represents the properties of Zoho CRM in EU Domain.\n \"\"\"\n\n @classmethod\n def PRODUCTION(cls):\n \"\"\"\n This method represents the Zoho CRM Production environment in EU domain\n :return: An instance of Environments\n \"\"\"\n return DataCenter.Environment('https://www.zohoapis.eu', cls().\n get_iam_url(), cls().get_file_upload_url())\n\n @classmethod\n def SANDBOX(cls):\n \"\"\"\n This method represents the Zoho CRM Sandbox environment in EU domain\n :return: An instance of Environment\n \"\"\"\n return DataCenter.Environment('https://sandbox.zohoapis.eu', cls().\n get_iam_url(), cls().get_file_upload_url())\n\n @classmethod\n def DEVELOPER(cls):\n \"\"\"\n This method represents the Zoho CRM Developer environment in EU domain\n :return: An instance of Environment\n \"\"\"\n return DataCenter.Environment('https://developer.zohoapis.eu', cls(\n ).get_iam_url(), cls().get_file_upload_url())\n\n def get_iam_url(self):\n return 'https://accounts.zoho.eu/oauth/v2/token'\n\n def get_file_upload_url(self):\n return 'https://content.zohoapis.eu'\n",
"step-4": "try:\n from zcrmsdk.src.com.zoho.crm.api.dc.data_center import DataCenter\nexcept Exception as e:\n from .data_center import DataCenter\n\n\nclass EUDataCenter(DataCenter):\n \"\"\"\n This class represents the properties of Zoho CRM in EU Domain.\n \"\"\"\n\n @classmethod\n def PRODUCTION(cls):\n \"\"\"\n This method represents the Zoho CRM Production environment in EU domain\n :return: An instance of Environments\n \"\"\"\n return DataCenter.Environment('https://www.zohoapis.eu', cls().\n get_iam_url(), cls().get_file_upload_url())\n\n @classmethod\n def SANDBOX(cls):\n \"\"\"\n This method represents the Zoho CRM Sandbox environment in EU domain\n :return: An instance of Environment\n \"\"\"\n return DataCenter.Environment('https://sandbox.zohoapis.eu', cls().\n get_iam_url(), cls().get_file_upload_url())\n\n @classmethod\n def DEVELOPER(cls):\n \"\"\"\n This method represents the Zoho CRM Developer environment in EU domain\n :return: An instance of Environment\n \"\"\"\n return DataCenter.Environment('https://developer.zohoapis.eu', cls(\n ).get_iam_url(), cls().get_file_upload_url())\n\n def get_iam_url(self):\n return 'https://accounts.zoho.eu/oauth/v2/token'\n\n def get_file_upload_url(self):\n return 'https://content.zohoapis.eu'\n",
"step-5": "try:\n from zcrmsdk.src.com.zoho.crm.api.dc.data_center import DataCenter\nexcept Exception as e:\n from .data_center import DataCenter\n\n\nclass EUDataCenter(DataCenter):\n\n \"\"\"\n This class represents the properties of Zoho CRM in EU Domain.\n \"\"\"\n\n @classmethod\n def PRODUCTION(cls):\n\n \"\"\"\n This method represents the Zoho CRM Production environment in EU domain\n :return: An instance of Environments\n \"\"\"\n\n return DataCenter.Environment(\"https://www.zohoapis.eu\", cls().get_iam_url(), cls().get_file_upload_url())\n\n @classmethod\n def SANDBOX(cls):\n\n \"\"\"\n This method represents the Zoho CRM Sandbox environment in EU domain\n :return: An instance of Environment\n \"\"\"\n\n return DataCenter.Environment(\"https://sandbox.zohoapis.eu\", cls().get_iam_url(), cls().get_file_upload_url())\n\n @classmethod\n def DEVELOPER(cls):\n\n \"\"\"\n This method represents the Zoho CRM Developer environment in EU domain\n :return: An instance of Environment\n \"\"\"\n\n return DataCenter.Environment(\"https://developer.zohoapis.eu\", cls().get_iam_url(), cls().get_file_upload_url())\n\n def get_iam_url(self):\n return \"https://accounts.zoho.eu/oauth/v2/token\"\n\n def get_file_upload_url(self):\n return \"https://content.zohoapis.eu\"\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
##
#Author: Stephen
##
import socket
import select
import sys, os
from contextlib import contextmanager
hostip = 'localhost'
hostport = 8089
def connect(hostip=hostip,hostport=hostport):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
IP_address = hostip
Port = hostport
server.connect((IP_address, Port))
return server
def terminal_mode():
server = connect()
server.send(bytes('Connected via Terminal. Hello!\n','UTF-8'))
while True:
# maintains a list of possible input streams
sockets_list = [sys.stdin, server]
""" There are two possible input situations. Either the
user wants to give manual input to send to other people,
or the server is sending a message to be printed on the
screen. Select returns from sockets_list, the stream that
is reader for input. So for example, if the server wants
to send a message, then the if condition will hold true
below.If the user wants to send a message, the else
condition will evaluate as true"""
read_sockets,write_socket, error_socket = select.select(sockets_list,[],[])
for socks in read_sockets:
if socks == server:
message = socks.recv(2048)
sys.stdout.write("[Server]: "+message.decode("UTF-8"))
sys.stdout.write("\n\n[You]: ")
sys.stdout.flush()
else:
message = sys.stdin.readline()
if message == 'exit':
return
else:
server.send(bytes(message, 'UTF-8'))
print('Connection Closed.')
server.close()
def send_command(message):
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsocket.connect((hostip, hostport))
clientsocket.recv(2048)#supress welcome message
clientsocket.send(bytes(message, 'UTF-8'))
response = clientsocket.recv(2048)
clientsocket.close()
#print(response.decode("UTF-8"))
return response.decode("UTF-8")
def WIN_read_socket(server):
sockets_list = [server] #ONLY THIS IS DIFFERENT
read_sockets,write_socket, error_socket = select.select(sockets_list,[],[])
while True:
for socks in read_sockets:
if socks == server:
# maintains a list of possible input streams
message = socks.recv(2048)
sys.stdout.write("[Server]: "+message.decode("UTF-8"))
sys.stdout.write("\n\n[You]: ")
sys.stdout.flush()
print('Connection Closed.')
server.close()
def WIN_write_socket(server):
while True:
message = sys.stdin.readline()
if message == 'exit':
return
else:
server.send(bytes(message, 'UTF-8'))
try:
if str(sys.argv[1]) == 'terminal':
if str(sys.argv[2]) == 'windows':
from threading import *
server = connect()
server.send(bytes('Connected via Terminal. Hello!\n','UTF-8'))
Thread(target=WIN_read_socket, args=(server,)).start()
Thread(target=WIN_write_socket, args=(server,)).start()
else:
terminal_mode()
except:
pass
|
normal
|
{
"blob_id": "5cdf8cd4bfebb9aab2e8f421047fc1ba3190d566",
"index": 3451,
"step-1": "<mask token>\n\n\ndef connect(hostip=hostip, hostport=hostport):\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n IP_address = hostip\n Port = hostport\n server.connect((IP_address, Port))\n return server\n\n\ndef terminal_mode():\n server = connect()\n server.send(bytes('Connected via Terminal. Hello!\\n', 'UTF-8'))\n while True:\n sockets_list = [sys.stdin, server]\n \"\"\" There are two possible input situations. Either the\n user wants to give manual input to send to other people,\n or the server is sending a message to be printed on the\n screen. Select returns from sockets_list, the stream that\n is reader for input. So for example, if the server wants\n to send a message, then the if condition will hold true\n below.If the user wants to send a message, the else\n condition will evaluate as true\"\"\"\n read_sockets, write_socket, error_socket = select.select(sockets_list,\n [], [])\n for socks in read_sockets:\n if socks == server:\n message = socks.recv(2048)\n sys.stdout.write('[Server]: ' + message.decode('UTF-8'))\n sys.stdout.write('\\n\\n[You]: ')\n sys.stdout.flush()\n else:\n message = sys.stdin.readline()\n if message == 'exit':\n return\n else:\n server.send(bytes(message, 'UTF-8'))\n print('Connection Closed.')\n server.close()\n\n\ndef send_command(message):\n clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n clientsocket.connect((hostip, hostport))\n clientsocket.recv(2048)\n clientsocket.send(bytes(message, 'UTF-8'))\n response = clientsocket.recv(2048)\n clientsocket.close()\n return response.decode('UTF-8')\n\n\ndef WIN_read_socket(server):\n sockets_list = [server]\n read_sockets, write_socket, error_socket = select.select(sockets_list,\n [], [])\n while True:\n for socks in read_sockets:\n if socks == server:\n message = socks.recv(2048)\n sys.stdout.write('[Server]: ' + message.decode('UTF-8'))\n sys.stdout.write('\\n\\n[You]: ')\n sys.stdout.flush()\n print('Connection Closed.')\n server.close()\n\n\ndef WIN_write_socket(server):\n while True:\n message = sys.stdin.readline()\n if message == 'exit':\n return\n else:\n server.send(bytes(message, 'UTF-8'))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef connect(hostip=hostip, hostport=hostport):\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n IP_address = hostip\n Port = hostport\n server.connect((IP_address, Port))\n return server\n\n\ndef terminal_mode():\n server = connect()\n server.send(bytes('Connected via Terminal. Hello!\\n', 'UTF-8'))\n while True:\n sockets_list = [sys.stdin, server]\n \"\"\" There are two possible input situations. Either the\n user wants to give manual input to send to other people,\n or the server is sending a message to be printed on the\n screen. Select returns from sockets_list, the stream that\n is reader for input. So for example, if the server wants\n to send a message, then the if condition will hold true\n below.If the user wants to send a message, the else\n condition will evaluate as true\"\"\"\n read_sockets, write_socket, error_socket = select.select(sockets_list,\n [], [])\n for socks in read_sockets:\n if socks == server:\n message = socks.recv(2048)\n sys.stdout.write('[Server]: ' + message.decode('UTF-8'))\n sys.stdout.write('\\n\\n[You]: ')\n sys.stdout.flush()\n else:\n message = sys.stdin.readline()\n if message == 'exit':\n return\n else:\n server.send(bytes(message, 'UTF-8'))\n print('Connection Closed.')\n server.close()\n\n\ndef send_command(message):\n clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n clientsocket.connect((hostip, hostport))\n clientsocket.recv(2048)\n clientsocket.send(bytes(message, 'UTF-8'))\n response = clientsocket.recv(2048)\n clientsocket.close()\n return response.decode('UTF-8')\n\n\ndef WIN_read_socket(server):\n sockets_list = [server]\n read_sockets, write_socket, error_socket = select.select(sockets_list,\n [], [])\n while True:\n for socks in read_sockets:\n if socks == server:\n message = socks.recv(2048)\n sys.stdout.write('[Server]: ' + message.decode('UTF-8'))\n sys.stdout.write('\\n\\n[You]: ')\n sys.stdout.flush()\n print('Connection Closed.')\n server.close()\n\n\ndef WIN_write_socket(server):\n while True:\n message = sys.stdin.readline()\n if message == 'exit':\n return\n else:\n server.send(bytes(message, 'UTF-8'))\n\n\ntry:\n if str(sys.argv[1]) == 'terminal':\n if str(sys.argv[2]) == 'windows':\n from threading import *\n server = connect()\n server.send(bytes('Connected via Terminal. Hello!\\n', 'UTF-8'))\n Thread(target=WIN_read_socket, args=(server,)).start()\n Thread(target=WIN_write_socket, args=(server,)).start()\n else:\n terminal_mode()\nexcept:\n pass\n",
"step-3": "<mask token>\nhostip = 'localhost'\nhostport = 8089\n\n\ndef connect(hostip=hostip, hostport=hostport):\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n IP_address = hostip\n Port = hostport\n server.connect((IP_address, Port))\n return server\n\n\ndef terminal_mode():\n server = connect()\n server.send(bytes('Connected via Terminal. Hello!\\n', 'UTF-8'))\n while True:\n sockets_list = [sys.stdin, server]\n \"\"\" There are two possible input situations. Either the\n user wants to give manual input to send to other people,\n or the server is sending a message to be printed on the\n screen. Select returns from sockets_list, the stream that\n is reader for input. So for example, if the server wants\n to send a message, then the if condition will hold true\n below.If the user wants to send a message, the else\n condition will evaluate as true\"\"\"\n read_sockets, write_socket, error_socket = select.select(sockets_list,\n [], [])\n for socks in read_sockets:\n if socks == server:\n message = socks.recv(2048)\n sys.stdout.write('[Server]: ' + message.decode('UTF-8'))\n sys.stdout.write('\\n\\n[You]: ')\n sys.stdout.flush()\n else:\n message = sys.stdin.readline()\n if message == 'exit':\n return\n else:\n server.send(bytes(message, 'UTF-8'))\n print('Connection Closed.')\n server.close()\n\n\ndef send_command(message):\n clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n clientsocket.connect((hostip, hostport))\n clientsocket.recv(2048)\n clientsocket.send(bytes(message, 'UTF-8'))\n response = clientsocket.recv(2048)\n clientsocket.close()\n return response.decode('UTF-8')\n\n\ndef WIN_read_socket(server):\n sockets_list = [server]\n read_sockets, write_socket, error_socket = select.select(sockets_list,\n [], [])\n while True:\n for socks in read_sockets:\n if socks == server:\n message = socks.recv(2048)\n sys.stdout.write('[Server]: ' + message.decode('UTF-8'))\n sys.stdout.write('\\n\\n[You]: ')\n sys.stdout.flush()\n print('Connection Closed.')\n server.close()\n\n\ndef WIN_write_socket(server):\n while True:\n message = sys.stdin.readline()\n if message == 'exit':\n return\n else:\n server.send(bytes(message, 'UTF-8'))\n\n\ntry:\n if str(sys.argv[1]) == 'terminal':\n if str(sys.argv[2]) == 'windows':\n from threading import *\n server = connect()\n server.send(bytes('Connected via Terminal. Hello!\\n', 'UTF-8'))\n Thread(target=WIN_read_socket, args=(server,)).start()\n Thread(target=WIN_write_socket, args=(server,)).start()\n else:\n terminal_mode()\nexcept:\n pass\n",
"step-4": "import socket\nimport select\nimport sys, os\nfrom contextlib import contextmanager\nhostip = 'localhost'\nhostport = 8089\n\n\ndef connect(hostip=hostip, hostport=hostport):\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n IP_address = hostip\n Port = hostport\n server.connect((IP_address, Port))\n return server\n\n\ndef terminal_mode():\n server = connect()\n server.send(bytes('Connected via Terminal. Hello!\\n', 'UTF-8'))\n while True:\n sockets_list = [sys.stdin, server]\n \"\"\" There are two possible input situations. Either the\n user wants to give manual input to send to other people,\n or the server is sending a message to be printed on the\n screen. Select returns from sockets_list, the stream that\n is reader for input. So for example, if the server wants\n to send a message, then the if condition will hold true\n below.If the user wants to send a message, the else\n condition will evaluate as true\"\"\"\n read_sockets, write_socket, error_socket = select.select(sockets_list,\n [], [])\n for socks in read_sockets:\n if socks == server:\n message = socks.recv(2048)\n sys.stdout.write('[Server]: ' + message.decode('UTF-8'))\n sys.stdout.write('\\n\\n[You]: ')\n sys.stdout.flush()\n else:\n message = sys.stdin.readline()\n if message == 'exit':\n return\n else:\n server.send(bytes(message, 'UTF-8'))\n print('Connection Closed.')\n server.close()\n\n\ndef send_command(message):\n clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n clientsocket.connect((hostip, hostport))\n clientsocket.recv(2048)\n clientsocket.send(bytes(message, 'UTF-8'))\n response = clientsocket.recv(2048)\n clientsocket.close()\n return response.decode('UTF-8')\n\n\ndef WIN_read_socket(server):\n sockets_list = [server]\n read_sockets, write_socket, error_socket = select.select(sockets_list,\n [], [])\n while True:\n for socks in read_sockets:\n if socks == server:\n message = socks.recv(2048)\n sys.stdout.write('[Server]: ' + message.decode('UTF-8'))\n sys.stdout.write('\\n\\n[You]: ')\n sys.stdout.flush()\n print('Connection Closed.')\n server.close()\n\n\ndef WIN_write_socket(server):\n while True:\n message = sys.stdin.readline()\n if message == 'exit':\n return\n else:\n server.send(bytes(message, 'UTF-8'))\n\n\ntry:\n if str(sys.argv[1]) == 'terminal':\n if str(sys.argv[2]) == 'windows':\n from threading import *\n server = connect()\n server.send(bytes('Connected via Terminal. Hello!\\n', 'UTF-8'))\n Thread(target=WIN_read_socket, args=(server,)).start()\n Thread(target=WIN_write_socket, args=(server,)).start()\n else:\n terminal_mode()\nexcept:\n pass\n",
"step-5": "##\n#Author: Stephen\n##\nimport socket\nimport select\nimport sys, os\nfrom contextlib import contextmanager\n\nhostip = 'localhost'\nhostport = 8089\n\ndef connect(hostip=hostip,hostport=hostport):\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n IP_address = hostip\n Port = hostport\n server.connect((IP_address, Port))\n return server\n\ndef terminal_mode():\n server = connect()\n server.send(bytes('Connected via Terminal. Hello!\\n','UTF-8'))\n\n while True:\n # maintains a list of possible input streams\n sockets_list = [sys.stdin, server]\n \"\"\" There are two possible input situations. Either the\n user wants to give manual input to send to other people,\n or the server is sending a message to be printed on the\n screen. Select returns from sockets_list, the stream that\n is reader for input. So for example, if the server wants\n to send a message, then the if condition will hold true\n below.If the user wants to send a message, the else\n condition will evaluate as true\"\"\"\n read_sockets,write_socket, error_socket = select.select(sockets_list,[],[])\n\n for socks in read_sockets:\n if socks == server:\n message = socks.recv(2048)\n sys.stdout.write(\"[Server]: \"+message.decode(\"UTF-8\"))\n sys.stdout.write(\"\\n\\n[You]: \")\n sys.stdout.flush()\n else:\n message = sys.stdin.readline()\n if message == 'exit':\n return\n else:\n server.send(bytes(message, 'UTF-8'))\n\n print('Connection Closed.')\n server.close()\n\ndef send_command(message):\n clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n clientsocket.connect((hostip, hostport))\n clientsocket.recv(2048)#supress welcome message\n clientsocket.send(bytes(message, 'UTF-8'))\n response = clientsocket.recv(2048)\n clientsocket.close()\n #print(response.decode(\"UTF-8\"))\n return response.decode(\"UTF-8\")\n\ndef WIN_read_socket(server):\n sockets_list = [server] #ONLY THIS IS DIFFERENT\n read_sockets,write_socket, error_socket = select.select(sockets_list,[],[])\n while True:\n for socks in read_sockets:\n if socks == server:\n # maintains a list of possible input streams\n message = socks.recv(2048)\n sys.stdout.write(\"[Server]: \"+message.decode(\"UTF-8\"))\n sys.stdout.write(\"\\n\\n[You]: \")\n sys.stdout.flush()\n print('Connection Closed.')\n server.close()\n \ndef WIN_write_socket(server):\n while True:\n message = sys.stdin.readline()\n if message == 'exit':\n return\n else:\n server.send(bytes(message, 'UTF-8'))\n \ntry:\n if str(sys.argv[1]) == 'terminal':\n if str(sys.argv[2]) == 'windows':\n from threading import *\n server = connect()\n server.send(bytes('Connected via Terminal. Hello!\\n','UTF-8'))\n Thread(target=WIN_read_socket, args=(server,)).start() \n Thread(target=WIN_write_socket, args=(server,)).start() \n else:\n terminal_mode()\nexcept:\n pass\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
class TrieNode:
def __init__(self):
self.children = [None for i in range(26)]
self.isEndOfWord = 0
class Trie:
def __init__(self):
self.root = self.getNode()
def getNode(self):
return TrieNode()
def insert(self, key):
root = self.root
length = len(key)
for level in range(length):
index = ord(key[level])-ord('a')
if root.children[index]==None:
root.children[index] = self.getNode()
root = root.children[index]
root.isEndOfWord = 1
def search(self, key):
root = self.root
for level,c in enumerate(key):
if root.children[ord(c)-ord('a')]==None:
return False
root = root.children[ord(c)-ord('a')]
return root!=None and root.isEndOfWord==1
keys = ["the","a","there","anaswe","any", "by","their"]
output = ["Not present in trie", "Present in tire"]
# Trie object
t = Trie()
# Construct trie
for key in keys:
print 'inserting key, ', key
t.insert(key)
print("{} ---- {}".format("the",output[t.search("the")]))
print("{} ---- {}".format("these",output[t.search("these")]))
|
normal
|
{
"blob_id": "5c7c90717f2e98c26675fec6390b4ea9797d6a4e",
"index": 2240,
"step-1": "class TrieNode:\n\tdef __init__(self):\n\t\tself.children = [None for i in range(26)]\n\t\tself.isEndOfWord = 0\nclass Trie:\n\tdef __init__(self):\n\t\tself.root = self.getNode()\n\tdef getNode(self):\n\t\treturn TrieNode()\n\tdef insert(self, key):\n\t\troot = self.root\n\t\tlength = len(key)\n\t\tfor level in range(length):\n\t\t\tindex = ord(key[level])-ord('a')\n\t\t\tif root.children[index]==None:\n\t\t\t\troot.children[index] = self.getNode()\n\t\t\troot = root.children[index]\n\t\troot.isEndOfWord = 1\n\tdef search(self, key):\n\t\troot = self.root\n\t\tfor level,c in enumerate(key):\n\t\t\t if root.children[ord(c)-ord('a')]==None:\n\t\t\t\treturn False\n\t\t\t root = root.children[ord(c)-ord('a')]\n\t\treturn root!=None and root.isEndOfWord==1\nkeys = [\"the\",\"a\",\"there\",\"anaswe\",\"any\", \"by\",\"their\"] \noutput = [\"Not present in trie\", \"Present in tire\"] \n \n# Trie object \nt = Trie() \n \n# Construct trie \nfor key in keys: \n print 'inserting key, ', key\n t.insert(key) \t\nprint(\"{} ---- {}\".format(\"the\",output[t.search(\"the\")])) \t\t\nprint(\"{} ---- {}\".format(\"these\",output[t.search(\"these\")]))\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
import graphviz
import fa_util
class Graph:
def draw(self, directory, filename, rules, start_state, accept_states):
g = graphviz.Digraph(format="svg", graph_attr={'rankdir': 'LR'})
self.add_start_edge(g, start_state)
edges = {}
for rule in rules:
from_state = self.state_to_str(self.get_state(rule))
to_state = self.state_to_str(self.get_next_state(rule))
self.add_graph_node(g, self.get_state(rule), from_state, accept_states)
self.add_graph_node(g, self.get_next_state(rule), to_state, accept_states)
label = self.make_label(rule)
edge_labels = edges.get((from_state, to_state))
if edge_labels == None:
edges[(from_state, to_state)] = [label]
else:
edge_labels.append(label)
self.add_edges(g, edges)
g.render(filename=filename, directory=directory, format="png", view=True)
# Supposed to be extended
def make_label(self, rule):
return "ε" if rule._character == None else rule._character
# Supposed to be extended
def format_labels(self, labels):
return ','.join(labels)
# Supposed to be extended
def get_state(self, rule):
return rule._state
# Supposed to be extended
def get_next_state(self, rule):
return rule._next_state
# Supposed to be extended
def add_start_edge(self, graph, start_state):
dummy_node = fa_util.random_str(8)
graph.node(dummy_node, style="invis", shape="point")
graph.edge(dummy_node, self.state_to_str(start_state), style="bold")
def add_graph_node(self, graph, state, state_str, accept_states):
attr = {'root': 'true', 'shape': 'circle'}
if state in accept_states:
attr['shape'] = 'doublecircle'
graph.node(state_str, **attr)
def add_edges(self, graph, edges):
for (_from, to), labels in edges.items():
graph.edge(_from, to, self.format_labels(labels))
def state_to_str(self, state):
if isinstance(state, str):
return state
try:
iter(state)
### state is iterable ###
if len(state) == 0:
return 'Ø'
# converting list object directly to set object break the order of elements in string
list_str = str([self.state_to_str(e) for e in sorted(state)])
return list_str.replace('[', '{').replace(']', '}')
except TypeError:
### state is not iterable ###
return str(state)
|
normal
|
{
"blob_id": "c0e94a0d20397ebbbdddf726307b19b6c5c85ae6",
"index": 9082,
"step-1": "<mask token>\n\n\nclass Graph:\n\n def draw(self, directory, filename, rules, start_state, accept_states):\n g = graphviz.Digraph(format='svg', graph_attr={'rankdir': 'LR'})\n self.add_start_edge(g, start_state)\n edges = {}\n for rule in rules:\n from_state = self.state_to_str(self.get_state(rule))\n to_state = self.state_to_str(self.get_next_state(rule))\n self.add_graph_node(g, self.get_state(rule), from_state,\n accept_states)\n self.add_graph_node(g, self.get_next_state(rule), to_state,\n accept_states)\n label = self.make_label(rule)\n edge_labels = edges.get((from_state, to_state))\n if edge_labels == None:\n edges[from_state, to_state] = [label]\n else:\n edge_labels.append(label)\n self.add_edges(g, edges)\n g.render(filename=filename, directory=directory, format='png', view\n =True)\n\n def make_label(self, rule):\n return 'ε' if rule._character == None else rule._character\n <mask token>\n <mask token>\n\n def get_next_state(self, rule):\n return rule._next_state\n\n def add_start_edge(self, graph, start_state):\n dummy_node = fa_util.random_str(8)\n graph.node(dummy_node, style='invis', shape='point')\n graph.edge(dummy_node, self.state_to_str(start_state), style='bold')\n\n def add_graph_node(self, graph, state, state_str, accept_states):\n attr = {'root': 'true', 'shape': 'circle'}\n if state in accept_states:\n attr['shape'] = 'doublecircle'\n graph.node(state_str, **attr)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Graph:\n\n def draw(self, directory, filename, rules, start_state, accept_states):\n g = graphviz.Digraph(format='svg', graph_attr={'rankdir': 'LR'})\n self.add_start_edge(g, start_state)\n edges = {}\n for rule in rules:\n from_state = self.state_to_str(self.get_state(rule))\n to_state = self.state_to_str(self.get_next_state(rule))\n self.add_graph_node(g, self.get_state(rule), from_state,\n accept_states)\n self.add_graph_node(g, self.get_next_state(rule), to_state,\n accept_states)\n label = self.make_label(rule)\n edge_labels = edges.get((from_state, to_state))\n if edge_labels == None:\n edges[from_state, to_state] = [label]\n else:\n edge_labels.append(label)\n self.add_edges(g, edges)\n g.render(filename=filename, directory=directory, format='png', view\n =True)\n\n def make_label(self, rule):\n return 'ε' if rule._character == None else rule._character\n <mask token>\n <mask token>\n\n def get_next_state(self, rule):\n return rule._next_state\n\n def add_start_edge(self, graph, start_state):\n dummy_node = fa_util.random_str(8)\n graph.node(dummy_node, style='invis', shape='point')\n graph.edge(dummy_node, self.state_to_str(start_state), style='bold')\n\n def add_graph_node(self, graph, state, state_str, accept_states):\n attr = {'root': 'true', 'shape': 'circle'}\n if state in accept_states:\n attr['shape'] = 'doublecircle'\n graph.node(state_str, **attr)\n <mask token>\n\n def state_to_str(self, state):\n if isinstance(state, str):\n return state\n try:\n iter(state)\n if len(state) == 0:\n return 'Ø'\n list_str = str([self.state_to_str(e) for e in sorted(state)])\n return list_str.replace('[', '{').replace(']', '}')\n except TypeError:\n return str(state)\n",
"step-3": "<mask token>\n\n\nclass Graph:\n\n def draw(self, directory, filename, rules, start_state, accept_states):\n g = graphviz.Digraph(format='svg', graph_attr={'rankdir': 'LR'})\n self.add_start_edge(g, start_state)\n edges = {}\n for rule in rules:\n from_state = self.state_to_str(self.get_state(rule))\n to_state = self.state_to_str(self.get_next_state(rule))\n self.add_graph_node(g, self.get_state(rule), from_state,\n accept_states)\n self.add_graph_node(g, self.get_next_state(rule), to_state,\n accept_states)\n label = self.make_label(rule)\n edge_labels = edges.get((from_state, to_state))\n if edge_labels == None:\n edges[from_state, to_state] = [label]\n else:\n edge_labels.append(label)\n self.add_edges(g, edges)\n g.render(filename=filename, directory=directory, format='png', view\n =True)\n\n def make_label(self, rule):\n return 'ε' if rule._character == None else rule._character\n\n def format_labels(self, labels):\n return ','.join(labels)\n <mask token>\n\n def get_next_state(self, rule):\n return rule._next_state\n\n def add_start_edge(self, graph, start_state):\n dummy_node = fa_util.random_str(8)\n graph.node(dummy_node, style='invis', shape='point')\n graph.edge(dummy_node, self.state_to_str(start_state), style='bold')\n\n def add_graph_node(self, graph, state, state_str, accept_states):\n attr = {'root': 'true', 'shape': 'circle'}\n if state in accept_states:\n attr['shape'] = 'doublecircle'\n graph.node(state_str, **attr)\n\n def add_edges(self, graph, edges):\n for (_from, to), labels in edges.items():\n graph.edge(_from, to, self.format_labels(labels))\n\n def state_to_str(self, state):\n if isinstance(state, str):\n return state\n try:\n iter(state)\n if len(state) == 0:\n return 'Ø'\n list_str = str([self.state_to_str(e) for e in sorted(state)])\n return list_str.replace('[', '{').replace(']', '}')\n except TypeError:\n return str(state)\n",
"step-4": "<mask token>\n\n\nclass Graph:\n\n def draw(self, directory, filename, rules, start_state, accept_states):\n g = graphviz.Digraph(format='svg', graph_attr={'rankdir': 'LR'})\n self.add_start_edge(g, start_state)\n edges = {}\n for rule in rules:\n from_state = self.state_to_str(self.get_state(rule))\n to_state = self.state_to_str(self.get_next_state(rule))\n self.add_graph_node(g, self.get_state(rule), from_state,\n accept_states)\n self.add_graph_node(g, self.get_next_state(rule), to_state,\n accept_states)\n label = self.make_label(rule)\n edge_labels = edges.get((from_state, to_state))\n if edge_labels == None:\n edges[from_state, to_state] = [label]\n else:\n edge_labels.append(label)\n self.add_edges(g, edges)\n g.render(filename=filename, directory=directory, format='png', view\n =True)\n\n def make_label(self, rule):\n return 'ε' if rule._character == None else rule._character\n\n def format_labels(self, labels):\n return ','.join(labels)\n\n def get_state(self, rule):\n return rule._state\n\n def get_next_state(self, rule):\n return rule._next_state\n\n def add_start_edge(self, graph, start_state):\n dummy_node = fa_util.random_str(8)\n graph.node(dummy_node, style='invis', shape='point')\n graph.edge(dummy_node, self.state_to_str(start_state), style='bold')\n\n def add_graph_node(self, graph, state, state_str, accept_states):\n attr = {'root': 'true', 'shape': 'circle'}\n if state in accept_states:\n attr['shape'] = 'doublecircle'\n graph.node(state_str, **attr)\n\n def add_edges(self, graph, edges):\n for (_from, to), labels in edges.items():\n graph.edge(_from, to, self.format_labels(labels))\n\n def state_to_str(self, state):\n if isinstance(state, str):\n return state\n try:\n iter(state)\n if len(state) == 0:\n return 'Ø'\n list_str = str([self.state_to_str(e) for e in sorted(state)])\n return list_str.replace('[', '{').replace(']', '}')\n except TypeError:\n return str(state)\n",
"step-5": "# -*- coding: utf-8 -*-\nimport graphviz\nimport fa_util\n\n\nclass Graph:\n\n def draw(self, directory, filename, rules, start_state, accept_states):\n g = graphviz.Digraph(format=\"svg\", graph_attr={'rankdir': 'LR'})\n self.add_start_edge(g, start_state)\n\n edges = {}\n for rule in rules:\n from_state = self.state_to_str(self.get_state(rule))\n to_state = self.state_to_str(self.get_next_state(rule))\n\n self.add_graph_node(g, self.get_state(rule), from_state, accept_states)\n self.add_graph_node(g, self.get_next_state(rule), to_state, accept_states)\n\n label = self.make_label(rule)\n edge_labels = edges.get((from_state, to_state))\n if edge_labels == None:\n edges[(from_state, to_state)] = [label]\n else:\n edge_labels.append(label)\n self.add_edges(g, edges)\n\n g.render(filename=filename, directory=directory, format=\"png\", view=True)\n\n # Supposed to be extended\n def make_label(self, rule):\n return \"ε\" if rule._character == None else rule._character\n\n # Supposed to be extended\n def format_labels(self, labels):\n return ','.join(labels)\n\n # Supposed to be extended\n def get_state(self, rule):\n return rule._state\n\n # Supposed to be extended\n def get_next_state(self, rule):\n return rule._next_state\n\n # Supposed to be extended\n def add_start_edge(self, graph, start_state):\n dummy_node = fa_util.random_str(8)\n graph.node(dummy_node, style=\"invis\", shape=\"point\")\n graph.edge(dummy_node, self.state_to_str(start_state), style=\"bold\")\n\n def add_graph_node(self, graph, state, state_str, accept_states):\n attr = {'root': 'true', 'shape': 'circle'}\n if state in accept_states:\n attr['shape'] = 'doublecircle'\n graph.node(state_str, **attr)\n\n def add_edges(self, graph, edges):\n for (_from, to), labels in edges.items():\n graph.edge(_from, to, self.format_labels(labels))\n\n def state_to_str(self, state):\n if isinstance(state, str):\n return state\n\n try:\n iter(state)\n ### state is iterable ###\n if len(state) == 0:\n return 'Ø'\n\n # converting list object directly to set object break the order of elements in string\n list_str = str([self.state_to_str(e) for e in sorted(state)])\n return list_str.replace('[', '{').replace(']', '}')\n except TypeError:\n ### state is not iterable ###\n return str(state)\n",
"step-ids": [
6,
7,
9,
10,
12
]
}
|
[
6,
7,
9,
10,
12
] |
<|reserved_special_token_0|>
class TestNukeBoxDB(unittest.TestCase):
<|reserved_special_token_0|>
def setUp(self):
"""
B{Test} Data
- 2 User dict obj.
- contains basic data required by the MongoDB collection "Users"
- indexes exist on "mac_id" and "files" entries
- user entries contain a "set" of File elements which reference
the files (by obj id) that they have uploaded
- 2 File dict obj.
- contains basic data required for a File entry in the DB
- indexes exist on "track id"
"""
self.user_1 = {'name': 'Terry', 'mac_id': '12341234'}
self.user_2 = {'name': 'Eric', 'mac_id': '43211234'}
self.file_1 = {'filetype': '.mp3', 'artist': 'Foals', 'path':
'temp_dir', 'track': 'Birch Tree', 'size': '10000', 'art':
'http://foals_art.jpeg'}
self.file_2 = {'filetype': '.mp3', 'artist': 'Foals', 'path':
'temp_dir', 'track': 'What Went Down', 'size': '10000', 'art':
'http://foals_art.jpeg'}
def tearDown(self):
"""
B{Teardown} Test Data
- Deletes instance Data created in Setup
"""
for i in (self.file_1, self.file_2, self.user_1, self.user_2):
del i
def test_Ensure_Indexes(self):
"""
B{Test 01}
Tests the DB method used to create the required indexes
- ensureInexes returns a list of booleans
- each item is True on success
"""
nbdb = NukeBoxDB(Debug=True)
_a, _b, _c = nbdb.ensureIndexes()
self.assertTrue(_a and _b and _c)
nbdb = None
<|reserved_special_token_0|>
def test_Create_User_Files(self):
"""
B{Test 03}
Tests File entry creation in th DB
- createFile first tries to retrieve an existing entry, updating it
if a match is found
- the method then uses the current "mac_id" instance variable to
retrieve the current user and updates their set of files
"""
nbdb = NukeBoxDB()
user_2_result = nbdb.createUser(self.user_2)
file_1_result = nbdb.createFile(self.file_1)
file_2_result = nbdb.createFile(self.file_2)
self.assertEquals(file_1_result, nbdb.getTrack(self.file_1['track'])[1]
)
self.assertEquals(file_2_result, nbdb.getTrack(self.file_2['track'])[1]
)
del user_2_result
nbdb = None
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestNukeBoxDB(unittest.TestCase):
<|reserved_special_token_0|>
def setUp(self):
"""
B{Test} Data
- 2 User dict obj.
- contains basic data required by the MongoDB collection "Users"
- indexes exist on "mac_id" and "files" entries
- user entries contain a "set" of File elements which reference
the files (by obj id) that they have uploaded
- 2 File dict obj.
- contains basic data required for a File entry in the DB
- indexes exist on "track id"
"""
self.user_1 = {'name': 'Terry', 'mac_id': '12341234'}
self.user_2 = {'name': 'Eric', 'mac_id': '43211234'}
self.file_1 = {'filetype': '.mp3', 'artist': 'Foals', 'path':
'temp_dir', 'track': 'Birch Tree', 'size': '10000', 'art':
'http://foals_art.jpeg'}
self.file_2 = {'filetype': '.mp3', 'artist': 'Foals', 'path':
'temp_dir', 'track': 'What Went Down', 'size': '10000', 'art':
'http://foals_art.jpeg'}
def tearDown(self):
"""
B{Teardown} Test Data
- Deletes instance Data created in Setup
"""
for i in (self.file_1, self.file_2, self.user_1, self.user_2):
del i
def test_Ensure_Indexes(self):
"""
B{Test 01}
Tests the DB method used to create the required indexes
- ensureInexes returns a list of booleans
- each item is True on success
"""
nbdb = NukeBoxDB(Debug=True)
_a, _b, _c = nbdb.ensureIndexes()
self.assertTrue(_a and _b and _c)
nbdb = None
<|reserved_special_token_0|>
def test_Create_User_Files(self):
"""
B{Test 03}
Tests File entry creation in th DB
- createFile first tries to retrieve an existing entry, updating it
if a match is found
- the method then uses the current "mac_id" instance variable to
retrieve the current user and updates their set of files
"""
nbdb = NukeBoxDB()
user_2_result = nbdb.createUser(self.user_2)
file_1_result = nbdb.createFile(self.file_1)
file_2_result = nbdb.createFile(self.file_2)
self.assertEquals(file_1_result, nbdb.getTrack(self.file_1['track'])[1]
)
self.assertEquals(file_2_result, nbdb.getTrack(self.file_2['track'])[1]
)
del user_2_result
nbdb = None
def test_Get_Valid_Track(self):
"""
"""
nbdb = NukeBoxDB()
track_value, track_id = nbdb.getTrack(self.file_1['track'])
self.assertTrue(track_value)
self.assertIsNotNone(track_id)
nbdb = None
def test_Get_Invalid_Track(self):
"""
"""
nbdb = NukeBoxDB()
track_value, track_id = nbdb.getTrack(
'Some Track That Should Not Exists in DB')
self.assertFalse(track_value)
self.assertIsNone(track_id)
nbdb = None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestNukeBoxDB(unittest.TestCase):
"""
"""
def setUp(self):
"""
B{Test} Data
- 2 User dict obj.
- contains basic data required by the MongoDB collection "Users"
- indexes exist on "mac_id" and "files" entries
- user entries contain a "set" of File elements which reference
the files (by obj id) that they have uploaded
- 2 File dict obj.
- contains basic data required for a File entry in the DB
- indexes exist on "track id"
"""
self.user_1 = {'name': 'Terry', 'mac_id': '12341234'}
self.user_2 = {'name': 'Eric', 'mac_id': '43211234'}
self.file_1 = {'filetype': '.mp3', 'artist': 'Foals', 'path':
'temp_dir', 'track': 'Birch Tree', 'size': '10000', 'art':
'http://foals_art.jpeg'}
self.file_2 = {'filetype': '.mp3', 'artist': 'Foals', 'path':
'temp_dir', 'track': 'What Went Down', 'size': '10000', 'art':
'http://foals_art.jpeg'}
def tearDown(self):
"""
B{Teardown} Test Data
- Deletes instance Data created in Setup
"""
for i in (self.file_1, self.file_2, self.user_1, self.user_2):
del i
def test_Ensure_Indexes(self):
"""
B{Test 01}
Tests the DB method used to create the required indexes
- ensureInexes returns a list of booleans
- each item is True on success
"""
nbdb = NukeBoxDB(Debug=True)
_a, _b, _c = nbdb.ensureIndexes()
self.assertTrue(_a and _b and _c)
nbdb = None
def test_Create_User(self):
"""
B{Test 02}
Tests User entry creation in the DB
- createUser first checks if an matching entry already exists,
updating the existing entry if it does
- either way it returns the entries object id
"""
nbdb = NukeBoxDB()
user_1_result = nbdb.createUser(self.user_1)
self.assertTrue(user_1_result)
nbdb = None
def test_Create_User_Files(self):
"""
B{Test 03}
Tests File entry creation in th DB
- createFile first tries to retrieve an existing entry, updating it
if a match is found
- the method then uses the current "mac_id" instance variable to
retrieve the current user and updates their set of files
"""
nbdb = NukeBoxDB()
user_2_result = nbdb.createUser(self.user_2)
file_1_result = nbdb.createFile(self.file_1)
file_2_result = nbdb.createFile(self.file_2)
self.assertEquals(file_1_result, nbdb.getTrack(self.file_1['track'])[1]
)
self.assertEquals(file_2_result, nbdb.getTrack(self.file_2['track'])[1]
)
del user_2_result
nbdb = None
def test_Get_Valid_Track(self):
"""
"""
nbdb = NukeBoxDB()
track_value, track_id = nbdb.getTrack(self.file_1['track'])
self.assertTrue(track_value)
self.assertIsNotNone(track_id)
nbdb = None
def test_Get_Invalid_Track(self):
"""
"""
nbdb = NukeBoxDB()
track_value, track_id = nbdb.getTrack(
'Some Track That Should Not Exists in DB')
self.assertFalse(track_value)
self.assertIsNone(track_id)
nbdb = None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import sys
import unittest
from nukebox2000.MongoBox import NukeBoxDB
class TestNukeBoxDB(unittest.TestCase):
"""
"""
def setUp(self):
"""
B{Test} Data
- 2 User dict obj.
- contains basic data required by the MongoDB collection "Users"
- indexes exist on "mac_id" and "files" entries
- user entries contain a "set" of File elements which reference
the files (by obj id) that they have uploaded
- 2 File dict obj.
- contains basic data required for a File entry in the DB
- indexes exist on "track id"
"""
self.user_1 = {'name': 'Terry', 'mac_id': '12341234'}
self.user_2 = {'name': 'Eric', 'mac_id': '43211234'}
self.file_1 = {'filetype': '.mp3', 'artist': 'Foals', 'path':
'temp_dir', 'track': 'Birch Tree', 'size': '10000', 'art':
'http://foals_art.jpeg'}
self.file_2 = {'filetype': '.mp3', 'artist': 'Foals', 'path':
'temp_dir', 'track': 'What Went Down', 'size': '10000', 'art':
'http://foals_art.jpeg'}
def tearDown(self):
"""
B{Teardown} Test Data
- Deletes instance Data created in Setup
"""
for i in (self.file_1, self.file_2, self.user_1, self.user_2):
del i
def test_Ensure_Indexes(self):
"""
B{Test 01}
Tests the DB method used to create the required indexes
- ensureInexes returns a list of booleans
- each item is True on success
"""
nbdb = NukeBoxDB(Debug=True)
_a, _b, _c = nbdb.ensureIndexes()
self.assertTrue(_a and _b and _c)
nbdb = None
def test_Create_User(self):
"""
B{Test 02}
Tests User entry creation in the DB
- createUser first checks if an matching entry already exists,
updating the existing entry if it does
- either way it returns the entries object id
"""
nbdb = NukeBoxDB()
user_1_result = nbdb.createUser(self.user_1)
self.assertTrue(user_1_result)
nbdb = None
def test_Create_User_Files(self):
"""
B{Test 03}
Tests File entry creation in th DB
- createFile first tries to retrieve an existing entry, updating it
if a match is found
- the method then uses the current "mac_id" instance variable to
retrieve the current user and updates their set of files
"""
nbdb = NukeBoxDB()
user_2_result = nbdb.createUser(self.user_2)
file_1_result = nbdb.createFile(self.file_1)
file_2_result = nbdb.createFile(self.file_2)
self.assertEquals(file_1_result, nbdb.getTrack(self.file_1['track'])[1]
)
self.assertEquals(file_2_result, nbdb.getTrack(self.file_2['track'])[1]
)
del user_2_result
nbdb = None
def test_Get_Valid_Track(self):
"""
"""
nbdb = NukeBoxDB()
track_value, track_id = nbdb.getTrack(self.file_1['track'])
self.assertTrue(track_value)
self.assertIsNotNone(track_id)
nbdb = None
def test_Get_Invalid_Track(self):
"""
"""
nbdb = NukeBoxDB()
track_value, track_id = nbdb.getTrack(
'Some Track That Should Not Exists in DB')
self.assertFalse(track_value)
self.assertIsNone(track_id)
nbdb = None
if __name__ == '__main__':
sys.exit(unittest.main())
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_nukeboxQueue
----------------------------------
Tests for `nukebox2000` module.
"""
import sys
import unittest
from nukebox2000.MongoBox import NukeBoxDB
class TestNukeBoxDB(unittest.TestCase):
'''
'''
def setUp(self):
'''
B{Test} Data
- 2 User dict obj.
- contains basic data required by the MongoDB collection "Users"
- indexes exist on "mac_id" and "files" entries
- user entries contain a "set" of File elements which reference
the files (by obj id) that they have uploaded
- 2 File dict obj.
- contains basic data required for a File entry in the DB
- indexes exist on "track id"
'''
self.user_1 = {
'name': 'Terry',
'mac_id': '12341234'
}
self.user_2 = {
'name': 'Eric',
'mac_id': '43211234'
}
self.file_1 = {
'filetype': '.mp3',
'artist': 'Foals',
'path': 'temp_dir',
'track': 'Birch Tree',
'size': '10000',
'art': 'http://foals_art.jpeg'
}
self.file_2 = {
'filetype': '.mp3',
'artist': 'Foals',
'path': 'temp_dir',
'track': 'What Went Down',
'size': '10000',
'art': 'http://foals_art.jpeg'
}
def tearDown(self):
'''
B{Teardown} Test Data
- Deletes instance Data created in Setup
'''
for i in self.file_1, self.file_2, self.user_1, self.user_2:
del i
def test_Ensure_Indexes(self):
'''
B{Test 01}
Tests the DB method used to create the required indexes
- ensureInexes returns a list of booleans
- each item is True on success
'''
nbdb = NukeBoxDB(Debug=True)
_a, _b, _c = nbdb.ensureIndexes()
self.assertTrue(_a and _b and _c)
nbdb = None
def test_Create_User(self):
'''
B{Test 02}
Tests User entry creation in the DB
- createUser first checks if an matching entry already exists,
updating the existing entry if it does
- either way it returns the entries object id
'''
nbdb = NukeBoxDB()
user_1_result = nbdb.createUser(self.user_1)
self.assertTrue(user_1_result)
nbdb = None
def test_Create_User_Files(self):
'''
B{Test 03}
Tests File entry creation in th DB
- createFile first tries to retrieve an existing entry, updating it
if a match is found
- the method then uses the current "mac_id" instance variable to
retrieve the current user and updates their set of files
'''
nbdb = NukeBoxDB()
user_2_result = nbdb.createUser(self.user_2)
file_1_result = nbdb.createFile(self.file_1)
file_2_result = nbdb.createFile(self.file_2)
self.assertEquals(
file_1_result, nbdb.getTrack(self.file_1['track'])[1]
)
self.assertEquals(
file_2_result, nbdb.getTrack(self.file_2['track'])[1]
)
del user_2_result
nbdb = None
def test_Get_Valid_Track(self):
'''
'''
nbdb = NukeBoxDB()
track_value, track_id = nbdb.getTrack(self.file_1['track'])
self.assertTrue(track_value)
self.assertIsNotNone(track_id)
nbdb = None
def test_Get_Invalid_Track(self):
'''
'''
nbdb = NukeBoxDB()
track_value, track_id = nbdb.getTrack(
'Some Track That Should Not Exists in DB'
)
self.assertFalse(track_value)
self.assertIsNone(track_id)
nbdb = None
if __name__ == '__main__':
sys.exit(unittest.main())
|
flexible
|
{
"blob_id": "bf63ceca2347f750cdf38dce620eaa3c73b556f1",
"index": 1733,
"step-1": "<mask token>\n\n\nclass TestNukeBoxDB(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n \"\"\"\n B{Test} Data\n\n - 2 User dict obj.\n - contains basic data required by the MongoDB collection \"Users\"\n - indexes exist on \"mac_id\" and \"files\" entries\n - user entries contain a \"set\" of File elements which reference\n the files (by obj id) that they have uploaded\n\n - 2 File dict obj.\n - contains basic data required for a File entry in the DB\n - indexes exist on \"track id\"\n \"\"\"\n self.user_1 = {'name': 'Terry', 'mac_id': '12341234'}\n self.user_2 = {'name': 'Eric', 'mac_id': '43211234'}\n self.file_1 = {'filetype': '.mp3', 'artist': 'Foals', 'path':\n 'temp_dir', 'track': 'Birch Tree', 'size': '10000', 'art':\n 'http://foals_art.jpeg'}\n self.file_2 = {'filetype': '.mp3', 'artist': 'Foals', 'path':\n 'temp_dir', 'track': 'What Went Down', 'size': '10000', 'art':\n 'http://foals_art.jpeg'}\n\n def tearDown(self):\n \"\"\"\n B{Teardown} Test Data\n\n - Deletes instance Data created in Setup\n \"\"\"\n for i in (self.file_1, self.file_2, self.user_1, self.user_2):\n del i\n\n def test_Ensure_Indexes(self):\n \"\"\"\n B{Test 01}\n\n Tests the DB method used to create the required indexes\n\n - ensureInexes returns a list of booleans\n - each item is True on success\n \"\"\"\n nbdb = NukeBoxDB(Debug=True)\n _a, _b, _c = nbdb.ensureIndexes()\n self.assertTrue(_a and _b and _c)\n nbdb = None\n <mask token>\n\n def test_Create_User_Files(self):\n \"\"\"\n B{Test 03}\n\n Tests File entry creation in th DB\n\n - createFile first tries to retrieve an existing entry, updating it\n if a match is found\n - the method then uses the current \"mac_id\" instance variable to\n retrieve the current user and updates their set of files\n \"\"\"\n nbdb = NukeBoxDB()\n user_2_result = nbdb.createUser(self.user_2)\n file_1_result = nbdb.createFile(self.file_1)\n file_2_result = nbdb.createFile(self.file_2)\n self.assertEquals(file_1_result, nbdb.getTrack(self.file_1['track'])[1]\n )\n self.assertEquals(file_2_result, nbdb.getTrack(self.file_2['track'])[1]\n )\n del user_2_result\n nbdb = None\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestNukeBoxDB(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n \"\"\"\n B{Test} Data\n\n - 2 User dict obj.\n - contains basic data required by the MongoDB collection \"Users\"\n - indexes exist on \"mac_id\" and \"files\" entries\n - user entries contain a \"set\" of File elements which reference\n the files (by obj id) that they have uploaded\n\n - 2 File dict obj.\n - contains basic data required for a File entry in the DB\n - indexes exist on \"track id\"\n \"\"\"\n self.user_1 = {'name': 'Terry', 'mac_id': '12341234'}\n self.user_2 = {'name': 'Eric', 'mac_id': '43211234'}\n self.file_1 = {'filetype': '.mp3', 'artist': 'Foals', 'path':\n 'temp_dir', 'track': 'Birch Tree', 'size': '10000', 'art':\n 'http://foals_art.jpeg'}\n self.file_2 = {'filetype': '.mp3', 'artist': 'Foals', 'path':\n 'temp_dir', 'track': 'What Went Down', 'size': '10000', 'art':\n 'http://foals_art.jpeg'}\n\n def tearDown(self):\n \"\"\"\n B{Teardown} Test Data\n\n - Deletes instance Data created in Setup\n \"\"\"\n for i in (self.file_1, self.file_2, self.user_1, self.user_2):\n del i\n\n def test_Ensure_Indexes(self):\n \"\"\"\n B{Test 01}\n\n Tests the DB method used to create the required indexes\n\n - ensureInexes returns a list of booleans\n - each item is True on success\n \"\"\"\n nbdb = NukeBoxDB(Debug=True)\n _a, _b, _c = nbdb.ensureIndexes()\n self.assertTrue(_a and _b and _c)\n nbdb = None\n <mask token>\n\n def test_Create_User_Files(self):\n \"\"\"\n B{Test 03}\n\n Tests File entry creation in th DB\n\n - createFile first tries to retrieve an existing entry, updating it\n if a match is found\n - the method then uses the current \"mac_id\" instance variable to\n retrieve the current user and updates their set of files\n \"\"\"\n nbdb = NukeBoxDB()\n user_2_result = nbdb.createUser(self.user_2)\n file_1_result = nbdb.createFile(self.file_1)\n file_2_result = nbdb.createFile(self.file_2)\n self.assertEquals(file_1_result, nbdb.getTrack(self.file_1['track'])[1]\n )\n self.assertEquals(file_2_result, nbdb.getTrack(self.file_2['track'])[1]\n )\n del user_2_result\n nbdb = None\n\n def test_Get_Valid_Track(self):\n \"\"\"\n \"\"\"\n nbdb = NukeBoxDB()\n track_value, track_id = nbdb.getTrack(self.file_1['track'])\n self.assertTrue(track_value)\n self.assertIsNotNone(track_id)\n nbdb = None\n\n def test_Get_Invalid_Track(self):\n \"\"\"\n \"\"\"\n nbdb = NukeBoxDB()\n track_value, track_id = nbdb.getTrack(\n 'Some Track That Should Not Exists in DB')\n self.assertFalse(track_value)\n self.assertIsNone(track_id)\n nbdb = None\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestNukeBoxDB(unittest.TestCase):\n \"\"\"\n \"\"\"\n\n def setUp(self):\n \"\"\"\n B{Test} Data\n\n - 2 User dict obj.\n - contains basic data required by the MongoDB collection \"Users\"\n - indexes exist on \"mac_id\" and \"files\" entries\n - user entries contain a \"set\" of File elements which reference\n the files (by obj id) that they have uploaded\n\n - 2 File dict obj.\n - contains basic data required for a File entry in the DB\n - indexes exist on \"track id\"\n \"\"\"\n self.user_1 = {'name': 'Terry', 'mac_id': '12341234'}\n self.user_2 = {'name': 'Eric', 'mac_id': '43211234'}\n self.file_1 = {'filetype': '.mp3', 'artist': 'Foals', 'path':\n 'temp_dir', 'track': 'Birch Tree', 'size': '10000', 'art':\n 'http://foals_art.jpeg'}\n self.file_2 = {'filetype': '.mp3', 'artist': 'Foals', 'path':\n 'temp_dir', 'track': 'What Went Down', 'size': '10000', 'art':\n 'http://foals_art.jpeg'}\n\n def tearDown(self):\n \"\"\"\n B{Teardown} Test Data\n\n - Deletes instance Data created in Setup\n \"\"\"\n for i in (self.file_1, self.file_2, self.user_1, self.user_2):\n del i\n\n def test_Ensure_Indexes(self):\n \"\"\"\n B{Test 01}\n\n Tests the DB method used to create the required indexes\n\n - ensureInexes returns a list of booleans\n - each item is True on success\n \"\"\"\n nbdb = NukeBoxDB(Debug=True)\n _a, _b, _c = nbdb.ensureIndexes()\n self.assertTrue(_a and _b and _c)\n nbdb = None\n\n def test_Create_User(self):\n \"\"\"\n B{Test 02}\n\n Tests User entry creation in the DB\n\n - createUser first checks if an matching entry already exists,\n updating the existing entry if it does\n - either way it returns the entries object id\n \"\"\"\n nbdb = NukeBoxDB()\n user_1_result = nbdb.createUser(self.user_1)\n self.assertTrue(user_1_result)\n nbdb = None\n\n def test_Create_User_Files(self):\n \"\"\"\n B{Test 03}\n\n Tests File entry creation in th DB\n\n - createFile first tries to retrieve an existing entry, updating it\n if a match is found\n - the method then uses the current \"mac_id\" instance variable to\n retrieve the current user and updates their set of files\n \"\"\"\n nbdb = NukeBoxDB()\n user_2_result = nbdb.createUser(self.user_2)\n file_1_result = nbdb.createFile(self.file_1)\n file_2_result = nbdb.createFile(self.file_2)\n self.assertEquals(file_1_result, nbdb.getTrack(self.file_1['track'])[1]\n )\n self.assertEquals(file_2_result, nbdb.getTrack(self.file_2['track'])[1]\n )\n del user_2_result\n nbdb = None\n\n def test_Get_Valid_Track(self):\n \"\"\"\n \"\"\"\n nbdb = NukeBoxDB()\n track_value, track_id = nbdb.getTrack(self.file_1['track'])\n self.assertTrue(track_value)\n self.assertIsNotNone(track_id)\n nbdb = None\n\n def test_Get_Invalid_Track(self):\n \"\"\"\n \"\"\"\n nbdb = NukeBoxDB()\n track_value, track_id = nbdb.getTrack(\n 'Some Track That Should Not Exists in DB')\n self.assertFalse(track_value)\n self.assertIsNone(track_id)\n nbdb = None\n\n\n<mask token>\n",
"step-4": "<mask token>\nimport sys\nimport unittest\nfrom nukebox2000.MongoBox import NukeBoxDB\n\n\nclass TestNukeBoxDB(unittest.TestCase):\n \"\"\"\n \"\"\"\n\n def setUp(self):\n \"\"\"\n B{Test} Data\n\n - 2 User dict obj.\n - contains basic data required by the MongoDB collection \"Users\"\n - indexes exist on \"mac_id\" and \"files\" entries\n - user entries contain a \"set\" of File elements which reference\n the files (by obj id) that they have uploaded\n\n - 2 File dict obj.\n - contains basic data required for a File entry in the DB\n - indexes exist on \"track id\"\n \"\"\"\n self.user_1 = {'name': 'Terry', 'mac_id': '12341234'}\n self.user_2 = {'name': 'Eric', 'mac_id': '43211234'}\n self.file_1 = {'filetype': '.mp3', 'artist': 'Foals', 'path':\n 'temp_dir', 'track': 'Birch Tree', 'size': '10000', 'art':\n 'http://foals_art.jpeg'}\n self.file_2 = {'filetype': '.mp3', 'artist': 'Foals', 'path':\n 'temp_dir', 'track': 'What Went Down', 'size': '10000', 'art':\n 'http://foals_art.jpeg'}\n\n def tearDown(self):\n \"\"\"\n B{Teardown} Test Data\n\n - Deletes instance Data created in Setup\n \"\"\"\n for i in (self.file_1, self.file_2, self.user_1, self.user_2):\n del i\n\n def test_Ensure_Indexes(self):\n \"\"\"\n B{Test 01}\n\n Tests the DB method used to create the required indexes\n\n - ensureInexes returns a list of booleans\n - each item is True on success\n \"\"\"\n nbdb = NukeBoxDB(Debug=True)\n _a, _b, _c = nbdb.ensureIndexes()\n self.assertTrue(_a and _b and _c)\n nbdb = None\n\n def test_Create_User(self):\n \"\"\"\n B{Test 02}\n\n Tests User entry creation in the DB\n\n - createUser first checks if an matching entry already exists,\n updating the existing entry if it does\n - either way it returns the entries object id\n \"\"\"\n nbdb = NukeBoxDB()\n user_1_result = nbdb.createUser(self.user_1)\n self.assertTrue(user_1_result)\n nbdb = None\n\n def test_Create_User_Files(self):\n \"\"\"\n B{Test 03}\n\n Tests File entry creation in th DB\n\n - createFile first tries to retrieve an existing entry, updating it\n if a match is found\n - the method then uses the current \"mac_id\" instance variable to\n retrieve the current user and updates their set of files\n \"\"\"\n nbdb = NukeBoxDB()\n user_2_result = nbdb.createUser(self.user_2)\n file_1_result = nbdb.createFile(self.file_1)\n file_2_result = nbdb.createFile(self.file_2)\n self.assertEquals(file_1_result, nbdb.getTrack(self.file_1['track'])[1]\n )\n self.assertEquals(file_2_result, nbdb.getTrack(self.file_2['track'])[1]\n )\n del user_2_result\n nbdb = None\n\n def test_Get_Valid_Track(self):\n \"\"\"\n \"\"\"\n nbdb = NukeBoxDB()\n track_value, track_id = nbdb.getTrack(self.file_1['track'])\n self.assertTrue(track_value)\n self.assertIsNotNone(track_id)\n nbdb = None\n\n def test_Get_Invalid_Track(self):\n \"\"\"\n \"\"\"\n nbdb = NukeBoxDB()\n track_value, track_id = nbdb.getTrack(\n 'Some Track That Should Not Exists in DB')\n self.assertFalse(track_value)\n self.assertIsNone(track_id)\n nbdb = None\n\n\nif __name__ == '__main__':\n sys.exit(unittest.main())\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ntest_nukeboxQueue\n----------------------------------\n\nTests for `nukebox2000` module.\n\"\"\"\n\n\nimport sys\nimport unittest\nfrom nukebox2000.MongoBox import NukeBoxDB\n\n\nclass TestNukeBoxDB(unittest.TestCase):\n\n '''\n '''\n\n def setUp(self):\n '''\n B{Test} Data\n\n - 2 User dict obj.\n - contains basic data required by the MongoDB collection \"Users\"\n - indexes exist on \"mac_id\" and \"files\" entries\n - user entries contain a \"set\" of File elements which reference\n the files (by obj id) that they have uploaded\n\n - 2 File dict obj.\n - contains basic data required for a File entry in the DB\n - indexes exist on \"track id\"\n '''\n\n self.user_1 = {\n 'name': 'Terry',\n 'mac_id': '12341234'\n }\n\n self.user_2 = {\n 'name': 'Eric',\n 'mac_id': '43211234'\n }\n\n self.file_1 = {\n 'filetype': '.mp3',\n 'artist': 'Foals',\n 'path': 'temp_dir',\n 'track': 'Birch Tree',\n 'size': '10000',\n 'art': 'http://foals_art.jpeg'\n }\n\n self.file_2 = {\n 'filetype': '.mp3',\n 'artist': 'Foals',\n 'path': 'temp_dir',\n 'track': 'What Went Down',\n 'size': '10000',\n 'art': 'http://foals_art.jpeg'\n }\n\n def tearDown(self):\n '''\n B{Teardown} Test Data\n\n - Deletes instance Data created in Setup\n '''\n\n for i in self.file_1, self.file_2, self.user_1, self.user_2:\n\n del i\n\n def test_Ensure_Indexes(self):\n '''\n B{Test 01}\n\n Tests the DB method used to create the required indexes\n\n - ensureInexes returns a list of booleans\n - each item is True on success\n '''\n\n nbdb = NukeBoxDB(Debug=True)\n\n _a, _b, _c = nbdb.ensureIndexes()\n\n self.assertTrue(_a and _b and _c)\n\n nbdb = None\n\n def test_Create_User(self):\n '''\n B{Test 02}\n\n Tests User entry creation in the DB\n\n - createUser first checks if an matching entry already exists,\n updating the existing entry if it does\n - either way it returns the entries object id\n '''\n\n nbdb = NukeBoxDB()\n\n user_1_result = nbdb.createUser(self.user_1)\n\n self.assertTrue(user_1_result)\n\n nbdb = None\n\n def test_Create_User_Files(self):\n '''\n B{Test 03}\n\n Tests File entry creation in th DB\n\n - createFile first tries to retrieve an existing entry, updating it\n if a match is found\n - the method then uses the current \"mac_id\" instance variable to\n retrieve the current user and updates their set of files\n '''\n\n nbdb = NukeBoxDB()\n\n user_2_result = nbdb.createUser(self.user_2)\n file_1_result = nbdb.createFile(self.file_1)\n file_2_result = nbdb.createFile(self.file_2)\n\n self.assertEquals(\n file_1_result, nbdb.getTrack(self.file_1['track'])[1]\n )\n\n self.assertEquals(\n file_2_result, nbdb.getTrack(self.file_2['track'])[1]\n )\n\n del user_2_result\n nbdb = None\n\n def test_Get_Valid_Track(self):\n '''\n '''\n\n nbdb = NukeBoxDB()\n\n track_value, track_id = nbdb.getTrack(self.file_1['track'])\n\n self.assertTrue(track_value)\n self.assertIsNotNone(track_id)\n\n nbdb = None\n\n def test_Get_Invalid_Track(self):\n '''\n '''\n\n nbdb = NukeBoxDB()\n\n track_value, track_id = nbdb.getTrack(\n 'Some Track That Should Not Exists in DB'\n )\n\n self.assertFalse(track_value)\n self.assertIsNone(track_id)\n\n nbdb = None\n\n\nif __name__ == '__main__':\n\n sys.exit(unittest.main())\n",
"step-ids": [
5,
7,
9,
11,
12
]
}
|
[
5,
7,
9,
11,
12
] |
#import cvxopt
from cvxopt import matrix, spmatrix, solvers
#import scipy
from scipy.special import expit
import numpy as np
import sys
import pandas as pd
import time
class KernelNC():
"""
distance based classifier for spectrum kernels
"""
def __init__(self, classes):
self.classes = classes
def compute_dist(self, X, Y):
K_x = np.dot(X, X.T).toarray()
K_y = np.dot(Y, Y.T).toarray()
K_xy = np.dot(X, Y.T).toarray()
return np.diag(K_x) - 2*K_xy.mean(axis=1) + K_y.mean()
def predict(self, X):
dists = np.array([self.compute_dist(X, classe) for classe in self.classes])
return dists.argmin(axis=0)
def score(self, X, y):
y__ = self.predict(X)
return 100*(y__==y).mean()
class MultiKerOpt():
def __init__(self, alpha=0.01, tol=1e-07, degree=2, method='klr', hide=False):
self.alpha = alpha
self.tol = tol
self.degree = degree
self.method = method
self.hide = hide
def scale(self, u, norm):
if norm=='l1':
return u/np.sum(u)
elif norm=='l2':
return u / np.sqrt(np.sum(u**2))
else:
raise Exception('l1 and l2 are the only available norms')
def bound(self, u, u_0, gamma, norm):
u__ = u - u_0
u__ = np.abs(self.scale(u__, norm) * gamma)
return u__ + u_0
def KrrIterate(self, Kernels, y, coef, weights = None):
"""
Weighted KRR iterations
"""
K_w = np.sum((Kernels * coef[:, None, None]), axis=0) ** self.degree
N, D = K_w.shape
if weights is None:
c = np.linalg.solve(np.linalg.inv(K_w + self.alpha * np.eye(N, D)), y[:, np.newaxis])
else:
W_r = np.diag(np.sqrt(weights))
A = W_r.dot(K_w).dot(W_r) + self.alpha * np.eye(N,D)
Y = np.dot(W_r, y[:, np.newaxis])
x_sol = np.linalg.solve(A, Y)
c = np.dot(W_r, x_sol)
return c
def KlrIterate(self, Kernels, y, coef, tol=1e-07, max_iters=5):
"""
KLR iterations
"""
c_old = self.KrrIterate(Kernels, y, coef)
K_w = np.sum((Kernels * coef[:, None, None]), axis=0) ** self.degree
y_enc = 2*y-1
for i in range(max_iters):
m_t = np.dot(K_w, c_old)
p_t = -expit(-y_enc[:, np.newaxis]*m_t)
w_t = expit(m_t)*expit(-m_t)
z_t = m_t - (p_t * y_enc[:, np.newaxis]) /(w_t+ 1e-05)
c_new = self.KrrIterate(Kernels, z_t.flatten(), coef, weights=w_t.flatten())
if np.linalg.norm(c_new - c_old)<tol:
break
else:
c_old = c_new
return c_old
def SvmIterate(self, Kernels, y, coef):
"""
SVM Estimation
"""
nb_samples = y.shape[0]
C = 1 / ( 2 * self.alpha * nb_samples)
r = np.arange(nb_samples)
o = np.ones(nb_samples)
z = np.zeros(nb_samples)
K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** (self.degree)
y_enc = 2*y-1
P = matrix(K_w.astype(float), tc='d')
q = matrix(-y_enc, tc='d')
G = spmatrix(np.r_[y_enc, -y_enc], np.r_[r, r + nb_samples], np.r_[r, r], tc='d')
h = matrix(np.r_[o * C, z], tc='d')
if self.hide:
solvers.options['show_progress'] = False
sol = solvers.qp(P, q, G, h)
c = np.ravel(sol['x'])[:,np.newaxis]
return c
def gradUpdate(self, Kernels, coef, delta):
"""
Updating Gradient
"""
K_t = np.sum(Kernels * coef[:, None, None], axis=0) ** (self.degree-1)
grad = np.zeros(len(Kernels))
for m in range(len(Kernels)):
grad[m] = delta.T.dot((K_t * Kernels[m])).dot(delta)
return - self.degree * grad
def fit(self, Kernels, y, u_0=0, gamma=1, norm='l2', n_iter=5, step=1, weights=None):
coef = np.random.normal(0, 1, len(Kernels)) / len(Kernels)
coef = self.bound(coef, u_0, gamma, norm)
new_coef = 0
score_prev = np.inf
for i in range(n_iter):
#print(i+1)
if self.method=='klr':
delta = self.KlrIterate(Kernels, y, coef, tol=1e-07, max_iters=5)
elif self.method=='svm':
delta = self.SvmIterate(Kernels, y, coef)
else:
delta = self.KrrIterate(Kernels, y, coef, weights = weights)
grad = self.gradUpdate(Kernels, coef, delta)
new_coef = coef - step * grad
new_coef = self.bound(new_coef, u_0, gamma, norm)
score = np.linalg.norm(new_coef - coef, np.inf)
if score>score_prev:
step *= 0.9
if score<self.tol:
self.coef = coef
self.delta = delta
coef = new_coef
score_prev = score.copy()
self.coef, self.delta = coef, delta
#return new_coef
def predict(self, Kernels):
K_w = np.sum(Kernels * self.coef[:, None, None], axis=0) ** (self.degree)
y__ = np.sign(K_w.dot(self.delta)).flatten()
if self.method != 'krr':
y__ = 0.5 * (y__ + 1)
return y__
def score(self, Kernels, y):
y__ = self.predict(Kernels)
if self.method!='krr':
score = 100*(y__==y).mean()
else:
score = np.mean((y__- y)**2)
return score
def CvSearch(K_xx, K_yx, y, method='svm', degrees=[4], alphas=[0.01], cv=5, n_iter=5):
tt = time.time()
n_iters = cv * len(degrees) * len(alphas)
n_samples = y.shape[0]
DEG, ALPH, TRAIN, VAL = [], [], [], []
i=0
for degree in degrees:
for alpha in alphas:
DEG.append(degree)
ALPH.append(alpha)
#SPLITTING
INDS = np.array(range(n_samples))
idx = np.random.permutation(n_samples)
INDS = INDS[idx]
vals = np.array_split(INDS, cv)
perfs_train = []
perfs_val = []
for val in vals:
i += 1
sys.stderr.write('\rIteration %d/%d -- degree %d --alpha %.3f' %(i, n_iters, degree, alpha))
sys.stderr.flush()
train = np.setdiff1d(range(n_samples),val)
clf = MultiKerOpt(alpha=alpha, tol=1e-07, degree=degree, method=method, hide=True)
clf.fit(K_xx[:,train.reshape(-1,1), train], y[train], n_iter=n_iter)
score_train = clf.score(K_xx[:,train.reshape(-1,1), train], y[train])
score_val = clf.score(K_xx[:,val.reshape(-1,1), train], y[val])
perfs_train.append(score_train)
perfs_val.append(score_val)
TRAIN.append(np.mean(np.array(perfs_train)))
VAL.append(np.mean(np.array(perfs_val)))
df = pd.DataFrame({'degree':DEG, 'alpha':ALPH, 'train':TRAIN, 'val':VAL})
tt = time.time() - tt
print('Done in %.3f'%(tt/60))
return df
#
def get_best(df):
idx = np.argmax(df.val.values)
best = np.max(df.val.values)
best_degree = df.degree[idx]
best_alpha = df.alpha[idx]
return best_degree, best_alpha, best
|
normal
|
{
"blob_id": "6f35c29f6f2dcc6c1dae3e9c1ddf595225748041",
"index": 3018,
"step-1": "<mask token>\n\n\nclass KernelNC:\n <mask token>\n\n def __init__(self, classes):\n self.classes = classes\n\n def compute_dist(self, X, Y):\n K_x = np.dot(X, X.T).toarray()\n K_y = np.dot(Y, Y.T).toarray()\n K_xy = np.dot(X, Y.T).toarray()\n return np.diag(K_x) - 2 * K_xy.mean(axis=1) + K_y.mean()\n\n def predict(self, X):\n dists = np.array([self.compute_dist(X, classe) for classe in self.\n classes])\n return dists.argmin(axis=0)\n\n def score(self, X, y):\n y__ = self.predict(X)\n return 100 * (y__ == y).mean()\n\n\nclass MultiKerOpt:\n\n def __init__(self, alpha=0.01, tol=1e-07, degree=2, method='klr', hide=\n False):\n self.alpha = alpha\n self.tol = tol\n self.degree = degree\n self.method = method\n self.hide = hide\n\n def scale(self, u, norm):\n if norm == 'l1':\n return u / np.sum(u)\n elif norm == 'l2':\n return u / np.sqrt(np.sum(u ** 2))\n else:\n raise Exception('l1 and l2 are the only available norms')\n\n def bound(self, u, u_0, gamma, norm):\n u__ = u - u_0\n u__ = np.abs(self.scale(u__, norm) * gamma)\n return u__ + u_0\n\n def KrrIterate(self, Kernels, y, coef, weights=None):\n \"\"\"\n Weighted KRR iterations\n \"\"\"\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n N, D = K_w.shape\n if weights is None:\n c = np.linalg.solve(np.linalg.inv(K_w + self.alpha * np.eye(N,\n D)), y[:, np.newaxis])\n else:\n W_r = np.diag(np.sqrt(weights))\n A = W_r.dot(K_w).dot(W_r) + self.alpha * np.eye(N, D)\n Y = np.dot(W_r, y[:, np.newaxis])\n x_sol = np.linalg.solve(A, Y)\n c = np.dot(W_r, x_sol)\n return c\n\n def KlrIterate(self, Kernels, y, coef, tol=1e-07, max_iters=5):\n \"\"\"\n KLR iterations\n \"\"\"\n c_old = self.KrrIterate(Kernels, y, coef)\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n y_enc = 2 * y - 1\n for i in range(max_iters):\n m_t = np.dot(K_w, c_old)\n p_t = -expit(-y_enc[:, np.newaxis] * m_t)\n w_t = expit(m_t) * expit(-m_t)\n z_t = m_t - p_t * y_enc[:, np.newaxis] / (w_t + 1e-05)\n c_new = self.KrrIterate(Kernels, z_t.flatten(), coef, weights=\n w_t.flatten())\n if np.linalg.norm(c_new - c_old) < tol:\n break\n else:\n c_old = c_new\n return c_old\n\n def SvmIterate(self, Kernels, y, coef):\n \"\"\"\n SVM Estimation\n \"\"\"\n nb_samples = y.shape[0]\n C = 1 / (2 * self.alpha * nb_samples)\n r = np.arange(nb_samples)\n o = np.ones(nb_samples)\n z = np.zeros(nb_samples)\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n y_enc = 2 * y - 1\n P = matrix(K_w.astype(float), tc='d')\n q = matrix(-y_enc, tc='d')\n G = spmatrix(np.r_[y_enc, -y_enc], np.r_[r, r + nb_samples], np.r_[\n r, r], tc='d')\n h = matrix(np.r_[o * C, z], tc='d')\n if self.hide:\n solvers.options['show_progress'] = False\n sol = solvers.qp(P, q, G, h)\n c = np.ravel(sol['x'])[:, np.newaxis]\n return c\n\n def gradUpdate(self, Kernels, coef, delta):\n \"\"\"\n Updating Gradient\n \"\"\"\n K_t = np.sum(Kernels * coef[:, None, None], axis=0) ** (self.degree - 1\n )\n grad = np.zeros(len(Kernels))\n for m in range(len(Kernels)):\n grad[m] = delta.T.dot(K_t * Kernels[m]).dot(delta)\n return -self.degree * grad\n\n def fit(self, Kernels, y, u_0=0, gamma=1, norm='l2', n_iter=5, step=1,\n weights=None):\n coef = np.random.normal(0, 1, len(Kernels)) / len(Kernels)\n coef = self.bound(coef, u_0, gamma, norm)\n new_coef = 0\n score_prev = np.inf\n for i in range(n_iter):\n if self.method == 'klr':\n delta = self.KlrIterate(Kernels, y, coef, tol=1e-07,\n max_iters=5)\n elif self.method == 'svm':\n delta = self.SvmIterate(Kernels, y, coef)\n else:\n delta = self.KrrIterate(Kernels, y, coef, weights=weights)\n grad = self.gradUpdate(Kernels, coef, delta)\n new_coef = coef - step * grad\n new_coef = self.bound(new_coef, u_0, gamma, norm)\n score = np.linalg.norm(new_coef - coef, np.inf)\n if score > score_prev:\n step *= 0.9\n if score < self.tol:\n self.coef = coef\n self.delta = delta\n coef = new_coef\n score_prev = score.copy()\n self.coef, self.delta = coef, delta\n\n def predict(self, Kernels):\n K_w = np.sum(Kernels * self.coef[:, None, None], axis=0) ** self.degree\n y__ = np.sign(K_w.dot(self.delta)).flatten()\n if self.method != 'krr':\n y__ = 0.5 * (y__ + 1)\n return y__\n\n def score(self, Kernels, y):\n y__ = self.predict(Kernels)\n if self.method != 'krr':\n score = 100 * (y__ == y).mean()\n else:\n score = np.mean((y__ - y) ** 2)\n return score\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass KernelNC:\n \"\"\"\n distance based classifier for spectrum kernels\n \"\"\"\n\n def __init__(self, classes):\n self.classes = classes\n\n def compute_dist(self, X, Y):\n K_x = np.dot(X, X.T).toarray()\n K_y = np.dot(Y, Y.T).toarray()\n K_xy = np.dot(X, Y.T).toarray()\n return np.diag(K_x) - 2 * K_xy.mean(axis=1) + K_y.mean()\n\n def predict(self, X):\n dists = np.array([self.compute_dist(X, classe) for classe in self.\n classes])\n return dists.argmin(axis=0)\n\n def score(self, X, y):\n y__ = self.predict(X)\n return 100 * (y__ == y).mean()\n\n\nclass MultiKerOpt:\n\n def __init__(self, alpha=0.01, tol=1e-07, degree=2, method='klr', hide=\n False):\n self.alpha = alpha\n self.tol = tol\n self.degree = degree\n self.method = method\n self.hide = hide\n\n def scale(self, u, norm):\n if norm == 'l1':\n return u / np.sum(u)\n elif norm == 'l2':\n return u / np.sqrt(np.sum(u ** 2))\n else:\n raise Exception('l1 and l2 are the only available norms')\n\n def bound(self, u, u_0, gamma, norm):\n u__ = u - u_0\n u__ = np.abs(self.scale(u__, norm) * gamma)\n return u__ + u_0\n\n def KrrIterate(self, Kernels, y, coef, weights=None):\n \"\"\"\n Weighted KRR iterations\n \"\"\"\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n N, D = K_w.shape\n if weights is None:\n c = np.linalg.solve(np.linalg.inv(K_w + self.alpha * np.eye(N,\n D)), y[:, np.newaxis])\n else:\n W_r = np.diag(np.sqrt(weights))\n A = W_r.dot(K_w).dot(W_r) + self.alpha * np.eye(N, D)\n Y = np.dot(W_r, y[:, np.newaxis])\n x_sol = np.linalg.solve(A, Y)\n c = np.dot(W_r, x_sol)\n return c\n\n def KlrIterate(self, Kernels, y, coef, tol=1e-07, max_iters=5):\n \"\"\"\n KLR iterations\n \"\"\"\n c_old = self.KrrIterate(Kernels, y, coef)\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n y_enc = 2 * y - 1\n for i in range(max_iters):\n m_t = np.dot(K_w, c_old)\n p_t = -expit(-y_enc[:, np.newaxis] * m_t)\n w_t = expit(m_t) * expit(-m_t)\n z_t = m_t - p_t * y_enc[:, np.newaxis] / (w_t + 1e-05)\n c_new = self.KrrIterate(Kernels, z_t.flatten(), coef, weights=\n w_t.flatten())\n if np.linalg.norm(c_new - c_old) < tol:\n break\n else:\n c_old = c_new\n return c_old\n\n def SvmIterate(self, Kernels, y, coef):\n \"\"\"\n SVM Estimation\n \"\"\"\n nb_samples = y.shape[0]\n C = 1 / (2 * self.alpha * nb_samples)\n r = np.arange(nb_samples)\n o = np.ones(nb_samples)\n z = np.zeros(nb_samples)\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n y_enc = 2 * y - 1\n P = matrix(K_w.astype(float), tc='d')\n q = matrix(-y_enc, tc='d')\n G = spmatrix(np.r_[y_enc, -y_enc], np.r_[r, r + nb_samples], np.r_[\n r, r], tc='d')\n h = matrix(np.r_[o * C, z], tc='d')\n if self.hide:\n solvers.options['show_progress'] = False\n sol = solvers.qp(P, q, G, h)\n c = np.ravel(sol['x'])[:, np.newaxis]\n return c\n\n def gradUpdate(self, Kernels, coef, delta):\n \"\"\"\n Updating Gradient\n \"\"\"\n K_t = np.sum(Kernels * coef[:, None, None], axis=0) ** (self.degree - 1\n )\n grad = np.zeros(len(Kernels))\n for m in range(len(Kernels)):\n grad[m] = delta.T.dot(K_t * Kernels[m]).dot(delta)\n return -self.degree * grad\n\n def fit(self, Kernels, y, u_0=0, gamma=1, norm='l2', n_iter=5, step=1,\n weights=None):\n coef = np.random.normal(0, 1, len(Kernels)) / len(Kernels)\n coef = self.bound(coef, u_0, gamma, norm)\n new_coef = 0\n score_prev = np.inf\n for i in range(n_iter):\n if self.method == 'klr':\n delta = self.KlrIterate(Kernels, y, coef, tol=1e-07,\n max_iters=5)\n elif self.method == 'svm':\n delta = self.SvmIterate(Kernels, y, coef)\n else:\n delta = self.KrrIterate(Kernels, y, coef, weights=weights)\n grad = self.gradUpdate(Kernels, coef, delta)\n new_coef = coef - step * grad\n new_coef = self.bound(new_coef, u_0, gamma, norm)\n score = np.linalg.norm(new_coef - coef, np.inf)\n if score > score_prev:\n step *= 0.9\n if score < self.tol:\n self.coef = coef\n self.delta = delta\n coef = new_coef\n score_prev = score.copy()\n self.coef, self.delta = coef, delta\n\n def predict(self, Kernels):\n K_w = np.sum(Kernels * self.coef[:, None, None], axis=0) ** self.degree\n y__ = np.sign(K_w.dot(self.delta)).flatten()\n if self.method != 'krr':\n y__ = 0.5 * (y__ + 1)\n return y__\n\n def score(self, Kernels, y):\n y__ = self.predict(Kernels)\n if self.method != 'krr':\n score = 100 * (y__ == y).mean()\n else:\n score = np.mean((y__ - y) ** 2)\n return score\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass KernelNC:\n \"\"\"\n distance based classifier for spectrum kernels\n \"\"\"\n\n def __init__(self, classes):\n self.classes = classes\n\n def compute_dist(self, X, Y):\n K_x = np.dot(X, X.T).toarray()\n K_y = np.dot(Y, Y.T).toarray()\n K_xy = np.dot(X, Y.T).toarray()\n return np.diag(K_x) - 2 * K_xy.mean(axis=1) + K_y.mean()\n\n def predict(self, X):\n dists = np.array([self.compute_dist(X, classe) for classe in self.\n classes])\n return dists.argmin(axis=0)\n\n def score(self, X, y):\n y__ = self.predict(X)\n return 100 * (y__ == y).mean()\n\n\nclass MultiKerOpt:\n\n def __init__(self, alpha=0.01, tol=1e-07, degree=2, method='klr', hide=\n False):\n self.alpha = alpha\n self.tol = tol\n self.degree = degree\n self.method = method\n self.hide = hide\n\n def scale(self, u, norm):\n if norm == 'l1':\n return u / np.sum(u)\n elif norm == 'l2':\n return u / np.sqrt(np.sum(u ** 2))\n else:\n raise Exception('l1 and l2 are the only available norms')\n\n def bound(self, u, u_0, gamma, norm):\n u__ = u - u_0\n u__ = np.abs(self.scale(u__, norm) * gamma)\n return u__ + u_0\n\n def KrrIterate(self, Kernels, y, coef, weights=None):\n \"\"\"\n Weighted KRR iterations\n \"\"\"\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n N, D = K_w.shape\n if weights is None:\n c = np.linalg.solve(np.linalg.inv(K_w + self.alpha * np.eye(N,\n D)), y[:, np.newaxis])\n else:\n W_r = np.diag(np.sqrt(weights))\n A = W_r.dot(K_w).dot(W_r) + self.alpha * np.eye(N, D)\n Y = np.dot(W_r, y[:, np.newaxis])\n x_sol = np.linalg.solve(A, Y)\n c = np.dot(W_r, x_sol)\n return c\n\n def KlrIterate(self, Kernels, y, coef, tol=1e-07, max_iters=5):\n \"\"\"\n KLR iterations\n \"\"\"\n c_old = self.KrrIterate(Kernels, y, coef)\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n y_enc = 2 * y - 1\n for i in range(max_iters):\n m_t = np.dot(K_w, c_old)\n p_t = -expit(-y_enc[:, np.newaxis] * m_t)\n w_t = expit(m_t) * expit(-m_t)\n z_t = m_t - p_t * y_enc[:, np.newaxis] / (w_t + 1e-05)\n c_new = self.KrrIterate(Kernels, z_t.flatten(), coef, weights=\n w_t.flatten())\n if np.linalg.norm(c_new - c_old) < tol:\n break\n else:\n c_old = c_new\n return c_old\n\n def SvmIterate(self, Kernels, y, coef):\n \"\"\"\n SVM Estimation\n \"\"\"\n nb_samples = y.shape[0]\n C = 1 / (2 * self.alpha * nb_samples)\n r = np.arange(nb_samples)\n o = np.ones(nb_samples)\n z = np.zeros(nb_samples)\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n y_enc = 2 * y - 1\n P = matrix(K_w.astype(float), tc='d')\n q = matrix(-y_enc, tc='d')\n G = spmatrix(np.r_[y_enc, -y_enc], np.r_[r, r + nb_samples], np.r_[\n r, r], tc='d')\n h = matrix(np.r_[o * C, z], tc='d')\n if self.hide:\n solvers.options['show_progress'] = False\n sol = solvers.qp(P, q, G, h)\n c = np.ravel(sol['x'])[:, np.newaxis]\n return c\n\n def gradUpdate(self, Kernels, coef, delta):\n \"\"\"\n Updating Gradient\n \"\"\"\n K_t = np.sum(Kernels * coef[:, None, None], axis=0) ** (self.degree - 1\n )\n grad = np.zeros(len(Kernels))\n for m in range(len(Kernels)):\n grad[m] = delta.T.dot(K_t * Kernels[m]).dot(delta)\n return -self.degree * grad\n\n def fit(self, Kernels, y, u_0=0, gamma=1, norm='l2', n_iter=5, step=1,\n weights=None):\n coef = np.random.normal(0, 1, len(Kernels)) / len(Kernels)\n coef = self.bound(coef, u_0, gamma, norm)\n new_coef = 0\n score_prev = np.inf\n for i in range(n_iter):\n if self.method == 'klr':\n delta = self.KlrIterate(Kernels, y, coef, tol=1e-07,\n max_iters=5)\n elif self.method == 'svm':\n delta = self.SvmIterate(Kernels, y, coef)\n else:\n delta = self.KrrIterate(Kernels, y, coef, weights=weights)\n grad = self.gradUpdate(Kernels, coef, delta)\n new_coef = coef - step * grad\n new_coef = self.bound(new_coef, u_0, gamma, norm)\n score = np.linalg.norm(new_coef - coef, np.inf)\n if score > score_prev:\n step *= 0.9\n if score < self.tol:\n self.coef = coef\n self.delta = delta\n coef = new_coef\n score_prev = score.copy()\n self.coef, self.delta = coef, delta\n\n def predict(self, Kernels):\n K_w = np.sum(Kernels * self.coef[:, None, None], axis=0) ** self.degree\n y__ = np.sign(K_w.dot(self.delta)).flatten()\n if self.method != 'krr':\n y__ = 0.5 * (y__ + 1)\n return y__\n\n def score(self, Kernels, y):\n y__ = self.predict(Kernels)\n if self.method != 'krr':\n score = 100 * (y__ == y).mean()\n else:\n score = np.mean((y__ - y) ** 2)\n return score\n\n\ndef CvSearch(K_xx, K_yx, y, method='svm', degrees=[4], alphas=[0.01], cv=5,\n n_iter=5):\n tt = time.time()\n n_iters = cv * len(degrees) * len(alphas)\n n_samples = y.shape[0]\n DEG, ALPH, TRAIN, VAL = [], [], [], []\n i = 0\n for degree in degrees:\n for alpha in alphas:\n DEG.append(degree)\n ALPH.append(alpha)\n INDS = np.array(range(n_samples))\n idx = np.random.permutation(n_samples)\n INDS = INDS[idx]\n vals = np.array_split(INDS, cv)\n perfs_train = []\n perfs_val = []\n for val in vals:\n i += 1\n sys.stderr.write(\n '\\rIteration %d/%d -- degree %d --alpha %.3f' % (i,\n n_iters, degree, alpha))\n sys.stderr.flush()\n train = np.setdiff1d(range(n_samples), val)\n clf = MultiKerOpt(alpha=alpha, tol=1e-07, degree=degree,\n method=method, hide=True)\n clf.fit(K_xx[:, train.reshape(-1, 1), train], y[train],\n n_iter=n_iter)\n score_train = clf.score(K_xx[:, train.reshape(-1, 1), train\n ], y[train])\n score_val = clf.score(K_xx[:, val.reshape(-1, 1), train], y\n [val])\n perfs_train.append(score_train)\n perfs_val.append(score_val)\n TRAIN.append(np.mean(np.array(perfs_train)))\n VAL.append(np.mean(np.array(perfs_val)))\n df = pd.DataFrame({'degree': DEG, 'alpha': ALPH, 'train': TRAIN, 'val':\n VAL})\n tt = time.time() - tt\n print('Done in %.3f' % (tt / 60))\n return df\n\n\ndef get_best(df):\n idx = np.argmax(df.val.values)\n best = np.max(df.val.values)\n best_degree = df.degree[idx]\n best_alpha = df.alpha[idx]\n return best_degree, best_alpha, best\n",
"step-4": "from cvxopt import matrix, spmatrix, solvers\nfrom scipy.special import expit\nimport numpy as np\nimport sys\nimport pandas as pd\nimport time\n\n\nclass KernelNC:\n \"\"\"\n distance based classifier for spectrum kernels\n \"\"\"\n\n def __init__(self, classes):\n self.classes = classes\n\n def compute_dist(self, X, Y):\n K_x = np.dot(X, X.T).toarray()\n K_y = np.dot(Y, Y.T).toarray()\n K_xy = np.dot(X, Y.T).toarray()\n return np.diag(K_x) - 2 * K_xy.mean(axis=1) + K_y.mean()\n\n def predict(self, X):\n dists = np.array([self.compute_dist(X, classe) for classe in self.\n classes])\n return dists.argmin(axis=0)\n\n def score(self, X, y):\n y__ = self.predict(X)\n return 100 * (y__ == y).mean()\n\n\nclass MultiKerOpt:\n\n def __init__(self, alpha=0.01, tol=1e-07, degree=2, method='klr', hide=\n False):\n self.alpha = alpha\n self.tol = tol\n self.degree = degree\n self.method = method\n self.hide = hide\n\n def scale(self, u, norm):\n if norm == 'l1':\n return u / np.sum(u)\n elif norm == 'l2':\n return u / np.sqrt(np.sum(u ** 2))\n else:\n raise Exception('l1 and l2 are the only available norms')\n\n def bound(self, u, u_0, gamma, norm):\n u__ = u - u_0\n u__ = np.abs(self.scale(u__, norm) * gamma)\n return u__ + u_0\n\n def KrrIterate(self, Kernels, y, coef, weights=None):\n \"\"\"\n Weighted KRR iterations\n \"\"\"\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n N, D = K_w.shape\n if weights is None:\n c = np.linalg.solve(np.linalg.inv(K_w + self.alpha * np.eye(N,\n D)), y[:, np.newaxis])\n else:\n W_r = np.diag(np.sqrt(weights))\n A = W_r.dot(K_w).dot(W_r) + self.alpha * np.eye(N, D)\n Y = np.dot(W_r, y[:, np.newaxis])\n x_sol = np.linalg.solve(A, Y)\n c = np.dot(W_r, x_sol)\n return c\n\n def KlrIterate(self, Kernels, y, coef, tol=1e-07, max_iters=5):\n \"\"\"\n KLR iterations\n \"\"\"\n c_old = self.KrrIterate(Kernels, y, coef)\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n y_enc = 2 * y - 1\n for i in range(max_iters):\n m_t = np.dot(K_w, c_old)\n p_t = -expit(-y_enc[:, np.newaxis] * m_t)\n w_t = expit(m_t) * expit(-m_t)\n z_t = m_t - p_t * y_enc[:, np.newaxis] / (w_t + 1e-05)\n c_new = self.KrrIterate(Kernels, z_t.flatten(), coef, weights=\n w_t.flatten())\n if np.linalg.norm(c_new - c_old) < tol:\n break\n else:\n c_old = c_new\n return c_old\n\n def SvmIterate(self, Kernels, y, coef):\n \"\"\"\n SVM Estimation\n \"\"\"\n nb_samples = y.shape[0]\n C = 1 / (2 * self.alpha * nb_samples)\n r = np.arange(nb_samples)\n o = np.ones(nb_samples)\n z = np.zeros(nb_samples)\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n y_enc = 2 * y - 1\n P = matrix(K_w.astype(float), tc='d')\n q = matrix(-y_enc, tc='d')\n G = spmatrix(np.r_[y_enc, -y_enc], np.r_[r, r + nb_samples], np.r_[\n r, r], tc='d')\n h = matrix(np.r_[o * C, z], tc='d')\n if self.hide:\n solvers.options['show_progress'] = False\n sol = solvers.qp(P, q, G, h)\n c = np.ravel(sol['x'])[:, np.newaxis]\n return c\n\n def gradUpdate(self, Kernels, coef, delta):\n \"\"\"\n Updating Gradient\n \"\"\"\n K_t = np.sum(Kernels * coef[:, None, None], axis=0) ** (self.degree - 1\n )\n grad = np.zeros(len(Kernels))\n for m in range(len(Kernels)):\n grad[m] = delta.T.dot(K_t * Kernels[m]).dot(delta)\n return -self.degree * grad\n\n def fit(self, Kernels, y, u_0=0, gamma=1, norm='l2', n_iter=5, step=1,\n weights=None):\n coef = np.random.normal(0, 1, len(Kernels)) / len(Kernels)\n coef = self.bound(coef, u_0, gamma, norm)\n new_coef = 0\n score_prev = np.inf\n for i in range(n_iter):\n if self.method == 'klr':\n delta = self.KlrIterate(Kernels, y, coef, tol=1e-07,\n max_iters=5)\n elif self.method == 'svm':\n delta = self.SvmIterate(Kernels, y, coef)\n else:\n delta = self.KrrIterate(Kernels, y, coef, weights=weights)\n grad = self.gradUpdate(Kernels, coef, delta)\n new_coef = coef - step * grad\n new_coef = self.bound(new_coef, u_0, gamma, norm)\n score = np.linalg.norm(new_coef - coef, np.inf)\n if score > score_prev:\n step *= 0.9\n if score < self.tol:\n self.coef = coef\n self.delta = delta\n coef = new_coef\n score_prev = score.copy()\n self.coef, self.delta = coef, delta\n\n def predict(self, Kernels):\n K_w = np.sum(Kernels * self.coef[:, None, None], axis=0) ** self.degree\n y__ = np.sign(K_w.dot(self.delta)).flatten()\n if self.method != 'krr':\n y__ = 0.5 * (y__ + 1)\n return y__\n\n def score(self, Kernels, y):\n y__ = self.predict(Kernels)\n if self.method != 'krr':\n score = 100 * (y__ == y).mean()\n else:\n score = np.mean((y__ - y) ** 2)\n return score\n\n\ndef CvSearch(K_xx, K_yx, y, method='svm', degrees=[4], alphas=[0.01], cv=5,\n n_iter=5):\n tt = time.time()\n n_iters = cv * len(degrees) * len(alphas)\n n_samples = y.shape[0]\n DEG, ALPH, TRAIN, VAL = [], [], [], []\n i = 0\n for degree in degrees:\n for alpha in alphas:\n DEG.append(degree)\n ALPH.append(alpha)\n INDS = np.array(range(n_samples))\n idx = np.random.permutation(n_samples)\n INDS = INDS[idx]\n vals = np.array_split(INDS, cv)\n perfs_train = []\n perfs_val = []\n for val in vals:\n i += 1\n sys.stderr.write(\n '\\rIteration %d/%d -- degree %d --alpha %.3f' % (i,\n n_iters, degree, alpha))\n sys.stderr.flush()\n train = np.setdiff1d(range(n_samples), val)\n clf = MultiKerOpt(alpha=alpha, tol=1e-07, degree=degree,\n method=method, hide=True)\n clf.fit(K_xx[:, train.reshape(-1, 1), train], y[train],\n n_iter=n_iter)\n score_train = clf.score(K_xx[:, train.reshape(-1, 1), train\n ], y[train])\n score_val = clf.score(K_xx[:, val.reshape(-1, 1), train], y\n [val])\n perfs_train.append(score_train)\n perfs_val.append(score_val)\n TRAIN.append(np.mean(np.array(perfs_train)))\n VAL.append(np.mean(np.array(perfs_val)))\n df = pd.DataFrame({'degree': DEG, 'alpha': ALPH, 'train': TRAIN, 'val':\n VAL})\n tt = time.time() - tt\n print('Done in %.3f' % (tt / 60))\n return df\n\n\ndef get_best(df):\n idx = np.argmax(df.val.values)\n best = np.max(df.val.values)\n best_degree = df.degree[idx]\n best_alpha = df.alpha[idx]\n return best_degree, best_alpha, best\n",
"step-5": "#import cvxopt\nfrom cvxopt import matrix, spmatrix, solvers\n#import scipy\nfrom scipy.special import expit\nimport numpy as np\nimport sys\nimport pandas as pd\nimport time\n\nclass KernelNC():\n \"\"\"\n distance based classifier for spectrum kernels\n \"\"\"\n \n def __init__(self, classes):\n self.classes = classes\n \n def compute_dist(self, X, Y):\n K_x = np.dot(X, X.T).toarray()\n K_y = np.dot(Y, Y.T).toarray()\n K_xy = np.dot(X, Y.T).toarray()\n \n return np.diag(K_x) - 2*K_xy.mean(axis=1) + K_y.mean()\n \n def predict(self, X):\n \n dists = np.array([self.compute_dist(X, classe) for classe in self.classes])\n return dists.argmin(axis=0)\n \n def score(self, X, y):\n y__ = self.predict(X)\n return 100*(y__==y).mean()\n\nclass MultiKerOpt():\n \n def __init__(self, alpha=0.01, tol=1e-07, degree=2, method='klr', hide=False):\n self.alpha = alpha\n self.tol = tol\n self.degree = degree\n self.method = method\n self.hide = hide\n \n def scale(self, u, norm):\n if norm=='l1':\n return u/np.sum(u)\n elif norm=='l2':\n return u / np.sqrt(np.sum(u**2))\n else:\n raise Exception('l1 and l2 are the only available norms')\n \n def bound(self, u, u_0, gamma, norm):\n u__ = u - u_0\n u__ = np.abs(self.scale(u__, norm) * gamma)\n return u__ + u_0\n \n def KrrIterate(self, Kernels, y, coef, weights = None):\n \"\"\"\n Weighted KRR iterations\n \"\"\"\n K_w = np.sum((Kernels * coef[:, None, None]), axis=0) ** self.degree\n N, D = K_w.shape\n if weights is None:\n c = np.linalg.solve(np.linalg.inv(K_w + self.alpha * np.eye(N, D)), y[:, np.newaxis])\n else:\n W_r = np.diag(np.sqrt(weights))\n A = W_r.dot(K_w).dot(W_r) + self.alpha * np.eye(N,D)\n Y = np.dot(W_r, y[:, np.newaxis])\n x_sol = np.linalg.solve(A, Y)\n c = np.dot(W_r, x_sol)\n return c\n \n def KlrIterate(self, Kernels, y, coef, tol=1e-07, max_iters=5):\n \"\"\"\n KLR iterations\n \"\"\"\n c_old = self.KrrIterate(Kernels, y, coef)\n K_w = np.sum((Kernels * coef[:, None, None]), axis=0) ** self.degree\n y_enc = 2*y-1\n \n for i in range(max_iters):\n m_t = np.dot(K_w, c_old)\n p_t = -expit(-y_enc[:, np.newaxis]*m_t)\n w_t = expit(m_t)*expit(-m_t)\n z_t = m_t - (p_t * y_enc[:, np.newaxis]) /(w_t+ 1e-05)\n c_new = self.KrrIterate(Kernels, z_t.flatten(), coef, weights=w_t.flatten())\n if np.linalg.norm(c_new - c_old)<tol:\n break\n else:\n c_old = c_new\n return c_old\n\n def SvmIterate(self, Kernels, y, coef):\n \"\"\"\n SVM Estimation\n \"\"\"\n nb_samples = y.shape[0]\n C = 1 / ( 2 * self.alpha * nb_samples)\n \n r = np.arange(nb_samples)\n o = np.ones(nb_samples)\n z = np.zeros(nb_samples)\n \n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** (self.degree)\n \n y_enc = 2*y-1\n \n P = matrix(K_w.astype(float), tc='d')\n q = matrix(-y_enc, tc='d')\n G = spmatrix(np.r_[y_enc, -y_enc], np.r_[r, r + nb_samples], np.r_[r, r], tc='d')\n h = matrix(np.r_[o * C, z], tc='d')\n \n if self.hide:\n solvers.options['show_progress'] = False\n sol = solvers.qp(P, q, G, h)\n c = np.ravel(sol['x'])[:,np.newaxis]\n \n return c\n \n def gradUpdate(self, Kernels, coef, delta):\n \"\"\"\n Updating Gradient\n \"\"\"\n K_t = np.sum(Kernels * coef[:, None, None], axis=0) ** (self.degree-1)\n grad = np.zeros(len(Kernels))\n for m in range(len(Kernels)):\n grad[m] = delta.T.dot((K_t * Kernels[m])).dot(delta)\n \n return - self.degree * grad\n \n def fit(self, Kernels, y, u_0=0, gamma=1, norm='l2', n_iter=5, step=1, weights=None):\n coef = np.random.normal(0, 1, len(Kernels)) / len(Kernels)\n coef = self.bound(coef, u_0, gamma, norm)\n new_coef = 0\n \n score_prev = np.inf\n \n for i in range(n_iter):\n #print(i+1)\n if self.method=='klr':\n delta = self.KlrIterate(Kernels, y, coef, tol=1e-07, max_iters=5)\n elif self.method=='svm':\n delta = self.SvmIterate(Kernels, y, coef)\n else:\n delta = self.KrrIterate(Kernels, y, coef, weights = weights)\n \n grad = self.gradUpdate(Kernels, coef, delta)\n \n new_coef = coef - step * grad\n new_coef = self.bound(new_coef, u_0, gamma, norm)\n \n score = np.linalg.norm(new_coef - coef, np.inf)\n \n if score>score_prev:\n step *= 0.9\n \n if score<self.tol:\n self.coef = coef\n self.delta = delta\n \n coef = new_coef\n score_prev = score.copy()\n \n self.coef, self.delta = coef, delta\n #return new_coef\n def predict(self, Kernels):\n K_w = np.sum(Kernels * self.coef[:, None, None], axis=0) ** (self.degree)\n y__ = np.sign(K_w.dot(self.delta)).flatten()\n if self.method != 'krr':\n y__ = 0.5 * (y__ + 1)\n return y__\n \n def score(self, Kernels, y):\n y__ = self.predict(Kernels)\n if self.method!='krr':\n score = 100*(y__==y).mean()\n else:\n score = np.mean((y__- y)**2)\n return score\n \n \ndef CvSearch(K_xx, K_yx, y, method='svm', degrees=[4], alphas=[0.01], cv=5, n_iter=5):\n tt = time.time()\n \n n_iters = cv * len(degrees) * len(alphas)\n \n n_samples = y.shape[0]\n \n DEG, ALPH, TRAIN, VAL = [], [], [], []\n \n i=0\n \n for degree in degrees:\n for alpha in alphas:\n DEG.append(degree)\n ALPH.append(alpha)\n \n #SPLITTING\n INDS = np.array(range(n_samples))\n idx = np.random.permutation(n_samples)\n INDS = INDS[idx]\n \n vals = np.array_split(INDS, cv)\n \n perfs_train = []\n perfs_val = []\n \n for val in vals:\n i += 1 \n sys.stderr.write('\\rIteration %d/%d -- degree %d --alpha %.3f' %(i, n_iters, degree, alpha))\n sys.stderr.flush()\n \n train = np.setdiff1d(range(n_samples),val)\n \n clf = MultiKerOpt(alpha=alpha, tol=1e-07, degree=degree, method=method, hide=True)\n \n clf.fit(K_xx[:,train.reshape(-1,1), train], y[train], n_iter=n_iter)\n \n score_train = clf.score(K_xx[:,train.reshape(-1,1), train], y[train])\n \n score_val = clf.score(K_xx[:,val.reshape(-1,1), train], y[val])\n \n perfs_train.append(score_train)\n perfs_val.append(score_val)\n \n TRAIN.append(np.mean(np.array(perfs_train)))\n VAL.append(np.mean(np.array(perfs_val)))\n \n df = pd.DataFrame({'degree':DEG, 'alpha':ALPH, 'train':TRAIN, 'val':VAL})\n \n tt = time.time() - tt\n print('Done in %.3f'%(tt/60))\n \n return df\n#\ndef get_best(df):\n idx = np.argmax(df.val.values)\n best = np.max(df.val.values)\n\n best_degree = df.degree[idx]\n best_alpha = df.alpha[idx]\n return best_degree, best_alpha, best\n",
"step-ids": [
16,
17,
19,
20,
21
]
}
|
[
16,
17,
19,
20,
21
] |
from __future__ import print_function
from __future__ import division
import os
import sys
import time
import datetime
import os.path as osp
from collections import defaultdict
import numpy as np
import math
from functools import partial
from tqdm import tqdm
import glog as log
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from args import argument_parser, image_dataset_kwargs, optimizer_kwargs
from torchreid.data_manager import ImageDataManager
from torchreid import models
from torchreid.utils.iotools import save_checkpoint, check_isfile
from torchreid.utils.avgmeter import AverageMeter
from torchreid.utils.loggers import Logger
from torchreid.utils.torchtools import count_num_param
from torchreid.utils.reidtools import visualize_ranked_results, distmat_hist, calc_distmat
from torchreid.eval_metrics import test
from torchreid.utils.load_weights import load_weights
from torchreid.utils.absorb_bn import search_absorbed_bn
from torchreid.evaluate_lfw import evaluate, compute_embeddings_lfw
# global variables
parser = argument_parser()
args = parser.parse_args()
def main():
global args
torch.manual_seed(args.seed)
if not args.use_avai_gpus: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()
if args.use_cpu: use_gpu = False
log_name = 'log_test.txt'
sys.stdout = Logger(osp.join(args.save_dir, log_name))
print("==========\nArgs:{}\n==========".format(args))
if use_gpu:
print("Currently using GPU {}".format(args.gpu_devices))
cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
else:
print("Currently using CPU, however, GPU is highly recommended")
print("Initializing image data manager")
if not args.convert_to_onnx: # and not args.infer:
dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))
trainloader, trainloader_dict, testloader_dict = dm.return_dataloaders()
num_train_pids = 100
print("Initializing model: {}".format(args.arch))
model = models.init_model(name=args.arch, num_classes=num_train_pids, loss={'xent', 'htri'},
pretrained=False if args.load_weights else 'imagenet', grayscale=args.grayscale,
ceil_mode=not args.convert_to_onnx, infer=True, bits=args.bits,
normalize_embeddings=args.normalize_embeddings, normalize_fc=args.normalize_fc, convbn=args.convbn)
print("Model size: {:.3f} M".format(count_num_param(model)))
if args.load_weights and check_isfile(args.load_weights):
# load pretrained weights but ignore layers that don't match in size
load_weights(model, args.load_weights)
print("Loaded pretrained weights from '{}'".format(args.load_weights))
if args.absorb_bn:
search_absorbed_bn(model)
if args.quantization or args.save_quantized_model:
from gap_quantization.quantization import ModelQuantizer
from gap_quantization.dump_utils import dump_quant_params, remove_extra_dump, remove_cat_files
if args.quant_data_dir is None:
raise AttributeError('quant-data-dir argument is required.')
num_channels = 1 if args.grayscale else 3
cfg = {
"bits": args.bits, # number of bits to store weights and activations
"accum_bits": 32, # number of bits to store intermediate convolution result
"signed": True, # use signed numbers
"save_folder": args.save_dir, # folder to save results
"data_source": args.quant_data_dir, # folder with images to collect dataset statistics
"use_gpu": False, # use GPU for inference
"batch_size": 1,
"num_workers": 0, # number of workers for PyTorch dataloader
"verbose": True,
"save_params": args.save_quantized_model, # save quantization parameters to the file
"quantize_forward": True, # replace usual convs, poolings, ... with GAP-like ones
"num_input_channels": num_channels,
"raw_input": args.no_normalize,
"double_precision": args.double_precision # use double precision convolutions
}
model = model.cpu()
quantizer = ModelQuantizer(model, cfg, dm.transform_test) # transform test is OK if we use args.no_normalize
quantizer.quantize_model() # otherwise we need to add QuantizeInput operation
if args.infer:
if args.image_path == '':
raise AttributeError('Image for inference is required')
quantizer.dump_activations(args.image_path, dm.transform_test,
save_dir=os.path.join(args.save_dir, 'activations_dump'))
dump_quant_params(args.save_dir, args.convbn)
if args.convbn:
remove_extra_dump(os.path.join(args.save_dir, 'activations_dump'))
remove_cat_files(args.save_dir)
if use_gpu:
model = nn.DataParallel(model).cuda()
if args.evaluate:
print("Evaluate only")
for name in args.target_names:
if not 'lfw' in name.lower():
print("Evaluating {} ...".format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
distmat = test(args, model, queryloader, galleryloader, use_gpu, return_distmat=True)
if args.visualize_ranks:
visualize_ranked_results(
distmat, dm.return_testdataset_by_name(name),
save_dir=osp.join(args.save_dir, 'ranked_results', name),
topk=20
)
else:
model.eval()
same_acc, diff_acc, all_acc, auc, thresh = evaluate(args, dm.lfw_dataset, model, compute_embeddings_lfw,
args.test_batch_size, verbose=False, show_failed=args.show_failed, load_embeddings=args.load_embeddings)
log.info('Validation accuracy: {0:.4f}, {1:.4f}'.format(same_acc, diff_acc))
log.info('Validation accuracy mean: {0:.4f}'.format(all_acc))
log.info('Validation AUC: {0:.4f}'.format(auc))
log.info('Estimated threshold: {0:.4f}'.format(thresh))
#roc_auc(model, '/home/maxim/data/lfw/pairsTest.txt', '/media/slow_drive/cropped_lfw', args, use_gpu)
return
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "0ad529298f321d2f3a63cde8179a50cf2881ee00",
"index": 2162,
"step-1": "<mask token>\n\n\ndef main():\n global args\n torch.manual_seed(args.seed)\n if not args.use_avai_gpus:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices\n use_gpu = torch.cuda.is_available()\n if args.use_cpu:\n use_gpu = False\n log_name = 'log_test.txt'\n sys.stdout = Logger(osp.join(args.save_dir, log_name))\n print('==========\\nArgs:{}\\n=========='.format(args))\n if use_gpu:\n print('Currently using GPU {}'.format(args.gpu_devices))\n cudnn.benchmark = True\n torch.cuda.manual_seed_all(args.seed)\n else:\n print('Currently using CPU, however, GPU is highly recommended')\n print('Initializing image data manager')\n if not args.convert_to_onnx:\n dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))\n trainloader, trainloader_dict, testloader_dict = dm.return_dataloaders(\n )\n num_train_pids = 100\n print('Initializing model: {}'.format(args.arch))\n model = models.init_model(name=args.arch, num_classes=num_train_pids,\n loss={'xent', 'htri'}, pretrained=False if args.load_weights else\n 'imagenet', grayscale=args.grayscale, ceil_mode=not args.\n convert_to_onnx, infer=True, bits=args.bits, normalize_embeddings=\n args.normalize_embeddings, normalize_fc=args.normalize_fc, convbn=\n args.convbn)\n print('Model size: {:.3f} M'.format(count_num_param(model)))\n if args.load_weights and check_isfile(args.load_weights):\n load_weights(model, args.load_weights)\n print(\"Loaded pretrained weights from '{}'\".format(args.load_weights))\n if args.absorb_bn:\n search_absorbed_bn(model)\n if args.quantization or args.save_quantized_model:\n from gap_quantization.quantization import ModelQuantizer\n from gap_quantization.dump_utils import dump_quant_params, remove_extra_dump, remove_cat_files\n if args.quant_data_dir is None:\n raise AttributeError('quant-data-dir argument is required.')\n num_channels = 1 if args.grayscale else 3\n cfg = {'bits': args.bits, 'accum_bits': 32, 'signed': True,\n 'save_folder': args.save_dir, 'data_source': args.\n quant_data_dir, 'use_gpu': False, 'batch_size': 1,\n 'num_workers': 0, 'verbose': True, 'save_params': args.\n save_quantized_model, 'quantize_forward': True,\n 'num_input_channels': num_channels, 'raw_input': args.\n no_normalize, 'double_precision': args.double_precision}\n model = model.cpu()\n quantizer = ModelQuantizer(model, cfg, dm.transform_test)\n quantizer.quantize_model()\n if args.infer:\n if args.image_path == '':\n raise AttributeError('Image for inference is required')\n quantizer.dump_activations(args.image_path, dm.transform_test,\n save_dir=os.path.join(args.save_dir, 'activations_dump'))\n dump_quant_params(args.save_dir, args.convbn)\n if args.convbn:\n remove_extra_dump(os.path.join(args.save_dir,\n 'activations_dump'))\n remove_cat_files(args.save_dir)\n if use_gpu:\n model = nn.DataParallel(model).cuda()\n if args.evaluate:\n print('Evaluate only')\n for name in args.target_names:\n if not 'lfw' in name.lower():\n print('Evaluating {} ...'.format(name))\n queryloader = testloader_dict[name]['query']\n galleryloader = testloader_dict[name]['gallery']\n distmat = test(args, model, queryloader, galleryloader,\n use_gpu, return_distmat=True)\n if args.visualize_ranks:\n visualize_ranked_results(distmat, dm.\n return_testdataset_by_name(name), save_dir=osp.join\n (args.save_dir, 'ranked_results', name), topk=20)\n else:\n model.eval()\n same_acc, diff_acc, all_acc, auc, thresh = evaluate(args,\n dm.lfw_dataset, model, compute_embeddings_lfw, args.\n test_batch_size, verbose=False, show_failed=args.\n show_failed, load_embeddings=args.load_embeddings)\n log.info('Validation accuracy: {0:.4f}, {1:.4f}'.format(\n same_acc, diff_acc))\n log.info('Validation accuracy mean: {0:.4f}'.format(all_acc))\n log.info('Validation AUC: {0:.4f}'.format(auc))\n log.info('Estimated threshold: {0:.4f}'.format(thresh))\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n global args\n torch.manual_seed(args.seed)\n if not args.use_avai_gpus:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices\n use_gpu = torch.cuda.is_available()\n if args.use_cpu:\n use_gpu = False\n log_name = 'log_test.txt'\n sys.stdout = Logger(osp.join(args.save_dir, log_name))\n print('==========\\nArgs:{}\\n=========='.format(args))\n if use_gpu:\n print('Currently using GPU {}'.format(args.gpu_devices))\n cudnn.benchmark = True\n torch.cuda.manual_seed_all(args.seed)\n else:\n print('Currently using CPU, however, GPU is highly recommended')\n print('Initializing image data manager')\n if not args.convert_to_onnx:\n dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))\n trainloader, trainloader_dict, testloader_dict = dm.return_dataloaders(\n )\n num_train_pids = 100\n print('Initializing model: {}'.format(args.arch))\n model = models.init_model(name=args.arch, num_classes=num_train_pids,\n loss={'xent', 'htri'}, pretrained=False if args.load_weights else\n 'imagenet', grayscale=args.grayscale, ceil_mode=not args.\n convert_to_onnx, infer=True, bits=args.bits, normalize_embeddings=\n args.normalize_embeddings, normalize_fc=args.normalize_fc, convbn=\n args.convbn)\n print('Model size: {:.3f} M'.format(count_num_param(model)))\n if args.load_weights and check_isfile(args.load_weights):\n load_weights(model, args.load_weights)\n print(\"Loaded pretrained weights from '{}'\".format(args.load_weights))\n if args.absorb_bn:\n search_absorbed_bn(model)\n if args.quantization or args.save_quantized_model:\n from gap_quantization.quantization import ModelQuantizer\n from gap_quantization.dump_utils import dump_quant_params, remove_extra_dump, remove_cat_files\n if args.quant_data_dir is None:\n raise AttributeError('quant-data-dir argument is required.')\n num_channels = 1 if args.grayscale else 3\n cfg = {'bits': args.bits, 'accum_bits': 32, 'signed': True,\n 'save_folder': args.save_dir, 'data_source': args.\n quant_data_dir, 'use_gpu': False, 'batch_size': 1,\n 'num_workers': 0, 'verbose': True, 'save_params': args.\n save_quantized_model, 'quantize_forward': True,\n 'num_input_channels': num_channels, 'raw_input': args.\n no_normalize, 'double_precision': args.double_precision}\n model = model.cpu()\n quantizer = ModelQuantizer(model, cfg, dm.transform_test)\n quantizer.quantize_model()\n if args.infer:\n if args.image_path == '':\n raise AttributeError('Image for inference is required')\n quantizer.dump_activations(args.image_path, dm.transform_test,\n save_dir=os.path.join(args.save_dir, 'activations_dump'))\n dump_quant_params(args.save_dir, args.convbn)\n if args.convbn:\n remove_extra_dump(os.path.join(args.save_dir,\n 'activations_dump'))\n remove_cat_files(args.save_dir)\n if use_gpu:\n model = nn.DataParallel(model).cuda()\n if args.evaluate:\n print('Evaluate only')\n for name in args.target_names:\n if not 'lfw' in name.lower():\n print('Evaluating {} ...'.format(name))\n queryloader = testloader_dict[name]['query']\n galleryloader = testloader_dict[name]['gallery']\n distmat = test(args, model, queryloader, galleryloader,\n use_gpu, return_distmat=True)\n if args.visualize_ranks:\n visualize_ranked_results(distmat, dm.\n return_testdataset_by_name(name), save_dir=osp.join\n (args.save_dir, 'ranked_results', name), topk=20)\n else:\n model.eval()\n same_acc, diff_acc, all_acc, auc, thresh = evaluate(args,\n dm.lfw_dataset, model, compute_embeddings_lfw, args.\n test_batch_size, verbose=False, show_failed=args.\n show_failed, load_embeddings=args.load_embeddings)\n log.info('Validation accuracy: {0:.4f}, {1:.4f}'.format(\n same_acc, diff_acc))\n log.info('Validation accuracy mean: {0:.4f}'.format(all_acc))\n log.info('Validation AUC: {0:.4f}'.format(auc))\n log.info('Estimated threshold: {0:.4f}'.format(thresh))\n return\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nparser = argument_parser()\nargs = parser.parse_args()\n\n\ndef main():\n global args\n torch.manual_seed(args.seed)\n if not args.use_avai_gpus:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices\n use_gpu = torch.cuda.is_available()\n if args.use_cpu:\n use_gpu = False\n log_name = 'log_test.txt'\n sys.stdout = Logger(osp.join(args.save_dir, log_name))\n print('==========\\nArgs:{}\\n=========='.format(args))\n if use_gpu:\n print('Currently using GPU {}'.format(args.gpu_devices))\n cudnn.benchmark = True\n torch.cuda.manual_seed_all(args.seed)\n else:\n print('Currently using CPU, however, GPU is highly recommended')\n print('Initializing image data manager')\n if not args.convert_to_onnx:\n dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))\n trainloader, trainloader_dict, testloader_dict = dm.return_dataloaders(\n )\n num_train_pids = 100\n print('Initializing model: {}'.format(args.arch))\n model = models.init_model(name=args.arch, num_classes=num_train_pids,\n loss={'xent', 'htri'}, pretrained=False if args.load_weights else\n 'imagenet', grayscale=args.grayscale, ceil_mode=not args.\n convert_to_onnx, infer=True, bits=args.bits, normalize_embeddings=\n args.normalize_embeddings, normalize_fc=args.normalize_fc, convbn=\n args.convbn)\n print('Model size: {:.3f} M'.format(count_num_param(model)))\n if args.load_weights and check_isfile(args.load_weights):\n load_weights(model, args.load_weights)\n print(\"Loaded pretrained weights from '{}'\".format(args.load_weights))\n if args.absorb_bn:\n search_absorbed_bn(model)\n if args.quantization or args.save_quantized_model:\n from gap_quantization.quantization import ModelQuantizer\n from gap_quantization.dump_utils import dump_quant_params, remove_extra_dump, remove_cat_files\n if args.quant_data_dir is None:\n raise AttributeError('quant-data-dir argument is required.')\n num_channels = 1 if args.grayscale else 3\n cfg = {'bits': args.bits, 'accum_bits': 32, 'signed': True,\n 'save_folder': args.save_dir, 'data_source': args.\n quant_data_dir, 'use_gpu': False, 'batch_size': 1,\n 'num_workers': 0, 'verbose': True, 'save_params': args.\n save_quantized_model, 'quantize_forward': True,\n 'num_input_channels': num_channels, 'raw_input': args.\n no_normalize, 'double_precision': args.double_precision}\n model = model.cpu()\n quantizer = ModelQuantizer(model, cfg, dm.transform_test)\n quantizer.quantize_model()\n if args.infer:\n if args.image_path == '':\n raise AttributeError('Image for inference is required')\n quantizer.dump_activations(args.image_path, dm.transform_test,\n save_dir=os.path.join(args.save_dir, 'activations_dump'))\n dump_quant_params(args.save_dir, args.convbn)\n if args.convbn:\n remove_extra_dump(os.path.join(args.save_dir,\n 'activations_dump'))\n remove_cat_files(args.save_dir)\n if use_gpu:\n model = nn.DataParallel(model).cuda()\n if args.evaluate:\n print('Evaluate only')\n for name in args.target_names:\n if not 'lfw' in name.lower():\n print('Evaluating {} ...'.format(name))\n queryloader = testloader_dict[name]['query']\n galleryloader = testloader_dict[name]['gallery']\n distmat = test(args, model, queryloader, galleryloader,\n use_gpu, return_distmat=True)\n if args.visualize_ranks:\n visualize_ranked_results(distmat, dm.\n return_testdataset_by_name(name), save_dir=osp.join\n (args.save_dir, 'ranked_results', name), topk=20)\n else:\n model.eval()\n same_acc, diff_acc, all_acc, auc, thresh = evaluate(args,\n dm.lfw_dataset, model, compute_embeddings_lfw, args.\n test_batch_size, verbose=False, show_failed=args.\n show_failed, load_embeddings=args.load_embeddings)\n log.info('Validation accuracy: {0:.4f}, {1:.4f}'.format(\n same_acc, diff_acc))\n log.info('Validation accuracy mean: {0:.4f}'.format(all_acc))\n log.info('Validation AUC: {0:.4f}'.format(auc))\n log.info('Estimated threshold: {0:.4f}'.format(thresh))\n return\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from __future__ import print_function\nfrom __future__ import division\nimport os\nimport sys\nimport time\nimport datetime\nimport os.path as osp\nfrom collections import defaultdict\nimport numpy as np\nimport math\nfrom functools import partial\nfrom tqdm import tqdm\nimport glog as log\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom args import argument_parser, image_dataset_kwargs, optimizer_kwargs\nfrom torchreid.data_manager import ImageDataManager\nfrom torchreid import models\nfrom torchreid.utils.iotools import save_checkpoint, check_isfile\nfrom torchreid.utils.avgmeter import AverageMeter\nfrom torchreid.utils.loggers import Logger\nfrom torchreid.utils.torchtools import count_num_param\nfrom torchreid.utils.reidtools import visualize_ranked_results, distmat_hist, calc_distmat\nfrom torchreid.eval_metrics import test\nfrom torchreid.utils.load_weights import load_weights\nfrom torchreid.utils.absorb_bn import search_absorbed_bn\nfrom torchreid.evaluate_lfw import evaluate, compute_embeddings_lfw\nparser = argument_parser()\nargs = parser.parse_args()\n\n\ndef main():\n global args\n torch.manual_seed(args.seed)\n if not args.use_avai_gpus:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices\n use_gpu = torch.cuda.is_available()\n if args.use_cpu:\n use_gpu = False\n log_name = 'log_test.txt'\n sys.stdout = Logger(osp.join(args.save_dir, log_name))\n print('==========\\nArgs:{}\\n=========='.format(args))\n if use_gpu:\n print('Currently using GPU {}'.format(args.gpu_devices))\n cudnn.benchmark = True\n torch.cuda.manual_seed_all(args.seed)\n else:\n print('Currently using CPU, however, GPU is highly recommended')\n print('Initializing image data manager')\n if not args.convert_to_onnx:\n dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))\n trainloader, trainloader_dict, testloader_dict = dm.return_dataloaders(\n )\n num_train_pids = 100\n print('Initializing model: {}'.format(args.arch))\n model = models.init_model(name=args.arch, num_classes=num_train_pids,\n loss={'xent', 'htri'}, pretrained=False if args.load_weights else\n 'imagenet', grayscale=args.grayscale, ceil_mode=not args.\n convert_to_onnx, infer=True, bits=args.bits, normalize_embeddings=\n args.normalize_embeddings, normalize_fc=args.normalize_fc, convbn=\n args.convbn)\n print('Model size: {:.3f} M'.format(count_num_param(model)))\n if args.load_weights and check_isfile(args.load_weights):\n load_weights(model, args.load_weights)\n print(\"Loaded pretrained weights from '{}'\".format(args.load_weights))\n if args.absorb_bn:\n search_absorbed_bn(model)\n if args.quantization or args.save_quantized_model:\n from gap_quantization.quantization import ModelQuantizer\n from gap_quantization.dump_utils import dump_quant_params, remove_extra_dump, remove_cat_files\n if args.quant_data_dir is None:\n raise AttributeError('quant-data-dir argument is required.')\n num_channels = 1 if args.grayscale else 3\n cfg = {'bits': args.bits, 'accum_bits': 32, 'signed': True,\n 'save_folder': args.save_dir, 'data_source': args.\n quant_data_dir, 'use_gpu': False, 'batch_size': 1,\n 'num_workers': 0, 'verbose': True, 'save_params': args.\n save_quantized_model, 'quantize_forward': True,\n 'num_input_channels': num_channels, 'raw_input': args.\n no_normalize, 'double_precision': args.double_precision}\n model = model.cpu()\n quantizer = ModelQuantizer(model, cfg, dm.transform_test)\n quantizer.quantize_model()\n if args.infer:\n if args.image_path == '':\n raise AttributeError('Image for inference is required')\n quantizer.dump_activations(args.image_path, dm.transform_test,\n save_dir=os.path.join(args.save_dir, 'activations_dump'))\n dump_quant_params(args.save_dir, args.convbn)\n if args.convbn:\n remove_extra_dump(os.path.join(args.save_dir,\n 'activations_dump'))\n remove_cat_files(args.save_dir)\n if use_gpu:\n model = nn.DataParallel(model).cuda()\n if args.evaluate:\n print('Evaluate only')\n for name in args.target_names:\n if not 'lfw' in name.lower():\n print('Evaluating {} ...'.format(name))\n queryloader = testloader_dict[name]['query']\n galleryloader = testloader_dict[name]['gallery']\n distmat = test(args, model, queryloader, galleryloader,\n use_gpu, return_distmat=True)\n if args.visualize_ranks:\n visualize_ranked_results(distmat, dm.\n return_testdataset_by_name(name), save_dir=osp.join\n (args.save_dir, 'ranked_results', name), topk=20)\n else:\n model.eval()\n same_acc, diff_acc, all_acc, auc, thresh = evaluate(args,\n dm.lfw_dataset, model, compute_embeddings_lfw, args.\n test_batch_size, verbose=False, show_failed=args.\n show_failed, load_embeddings=args.load_embeddings)\n log.info('Validation accuracy: {0:.4f}, {1:.4f}'.format(\n same_acc, diff_acc))\n log.info('Validation accuracy mean: {0:.4f}'.format(all_acc))\n log.info('Validation AUC: {0:.4f}'.format(auc))\n log.info('Estimated threshold: {0:.4f}'.format(thresh))\n return\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from __future__ import print_function\nfrom __future__ import division\n\nimport os\nimport sys\nimport time\nimport datetime\nimport os.path as osp\nfrom collections import defaultdict\nimport numpy as np\nimport math\nfrom functools import partial\nfrom tqdm import tqdm\nimport glog as log\n\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\n\n\nfrom args import argument_parser, image_dataset_kwargs, optimizer_kwargs\nfrom torchreid.data_manager import ImageDataManager\nfrom torchreid import models\nfrom torchreid.utils.iotools import save_checkpoint, check_isfile\nfrom torchreid.utils.avgmeter import AverageMeter\nfrom torchreid.utils.loggers import Logger\nfrom torchreid.utils.torchtools import count_num_param\nfrom torchreid.utils.reidtools import visualize_ranked_results, distmat_hist, calc_distmat\nfrom torchreid.eval_metrics import test\nfrom torchreid.utils.load_weights import load_weights\nfrom torchreid.utils.absorb_bn import search_absorbed_bn\nfrom torchreid.evaluate_lfw import evaluate, compute_embeddings_lfw\n\n\n# global variables\nparser = argument_parser()\nargs = parser.parse_args()\n\n\ndef main():\n global args\n\n torch.manual_seed(args.seed)\n if not args.use_avai_gpus: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices\n use_gpu = torch.cuda.is_available()\n if args.use_cpu: use_gpu = False\n log_name = 'log_test.txt'\n sys.stdout = Logger(osp.join(args.save_dir, log_name))\n print(\"==========\\nArgs:{}\\n==========\".format(args))\n\n if use_gpu:\n print(\"Currently using GPU {}\".format(args.gpu_devices))\n cudnn.benchmark = True\n torch.cuda.manual_seed_all(args.seed)\n else:\n print(\"Currently using CPU, however, GPU is highly recommended\")\n\n print(\"Initializing image data manager\")\n if not args.convert_to_onnx: # and not args.infer:\n dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))\n trainloader, trainloader_dict, testloader_dict = dm.return_dataloaders()\n\n num_train_pids = 100\n\n print(\"Initializing model: {}\".format(args.arch))\n model = models.init_model(name=args.arch, num_classes=num_train_pids, loss={'xent', 'htri'},\n pretrained=False if args.load_weights else 'imagenet', grayscale=args.grayscale,\n ceil_mode=not args.convert_to_onnx, infer=True, bits=args.bits,\n normalize_embeddings=args.normalize_embeddings, normalize_fc=args.normalize_fc, convbn=args.convbn)\n print(\"Model size: {:.3f} M\".format(count_num_param(model)))\n\n if args.load_weights and check_isfile(args.load_weights):\n # load pretrained weights but ignore layers that don't match in size\n load_weights(model, args.load_weights)\n print(\"Loaded pretrained weights from '{}'\".format(args.load_weights))\n\n if args.absorb_bn:\n search_absorbed_bn(model)\n\n if args.quantization or args.save_quantized_model:\n from gap_quantization.quantization import ModelQuantizer\n from gap_quantization.dump_utils import dump_quant_params, remove_extra_dump, remove_cat_files\n\n if args.quant_data_dir is None:\n raise AttributeError('quant-data-dir argument is required.')\n\n num_channels = 1 if args.grayscale else 3\n cfg = {\n \"bits\": args.bits, # number of bits to store weights and activations\n \"accum_bits\": 32, # number of bits to store intermediate convolution result\n \"signed\": True, # use signed numbers\n \"save_folder\": args.save_dir, # folder to save results\n \"data_source\": args.quant_data_dir, # folder with images to collect dataset statistics\n \"use_gpu\": False, # use GPU for inference\n \"batch_size\": 1,\n \"num_workers\": 0, # number of workers for PyTorch dataloader\n \"verbose\": True,\n \"save_params\": args.save_quantized_model, # save quantization parameters to the file\n \"quantize_forward\": True, # replace usual convs, poolings, ... with GAP-like ones\n \"num_input_channels\": num_channels,\n \"raw_input\": args.no_normalize,\n \"double_precision\": args.double_precision # use double precision convolutions\n }\n\n model = model.cpu()\n quantizer = ModelQuantizer(model, cfg, dm.transform_test) # transform test is OK if we use args.no_normalize\n quantizer.quantize_model() # otherwise we need to add QuantizeInput operation\n\n if args.infer:\n if args.image_path == '':\n raise AttributeError('Image for inference is required')\n\n quantizer.dump_activations(args.image_path, dm.transform_test,\n save_dir=os.path.join(args.save_dir, 'activations_dump'))\n dump_quant_params(args.save_dir, args.convbn)\n if args.convbn:\n remove_extra_dump(os.path.join(args.save_dir, 'activations_dump'))\n remove_cat_files(args.save_dir)\n\n\n if use_gpu:\n model = nn.DataParallel(model).cuda()\n\n\n if args.evaluate:\n print(\"Evaluate only\")\n\n for name in args.target_names:\n if not 'lfw' in name.lower():\n print(\"Evaluating {} ...\".format(name))\n queryloader = testloader_dict[name]['query']\n galleryloader = testloader_dict[name]['gallery']\n distmat = test(args, model, queryloader, galleryloader, use_gpu, return_distmat=True)\n\n if args.visualize_ranks:\n visualize_ranked_results(\n distmat, dm.return_testdataset_by_name(name),\n save_dir=osp.join(args.save_dir, 'ranked_results', name),\n topk=20\n )\n\n else:\n model.eval()\n same_acc, diff_acc, all_acc, auc, thresh = evaluate(args, dm.lfw_dataset, model, compute_embeddings_lfw,\n args.test_batch_size, verbose=False, show_failed=args.show_failed, load_embeddings=args.load_embeddings)\n log.info('Validation accuracy: {0:.4f}, {1:.4f}'.format(same_acc, diff_acc))\n log.info('Validation accuracy mean: {0:.4f}'.format(all_acc))\n log.info('Validation AUC: {0:.4f}'.format(auc))\n log.info('Estimated threshold: {0:.4f}'.format(thresh))\n #roc_auc(model, '/home/maxim/data/lfw/pairsTest.txt', '/media/slow_drive/cropped_lfw', args, use_gpu)\n return\n\nif __name__ == '__main__':\n main()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
if __name__ == '__main__':
print('--------------------------------------')
query = 'user=pilgrim&database=master&password=PapayaWhip'
a_list = query.split('&')
print(a_list)
print('--------------------------------------')
a_list_of_lists = [v.split('=', 1) for v in a_list if '=' in v]
print(a_list_of_lists)
a_dict = dict(a_list_of_lists)
print(a_dict)
print('--------------------------------------')
a_string = 'My alphabet starts where your alphabet ends.'
print(a_string[3:11])
print(a_string[3:-3])
print(a_string[0:2])
print(a_string[:18])
print(a_string[18:])
|
flexible
|
{
"blob_id": "5c3bf49f88dec429ec85cceb8130cccf2691363b",
"index": 1538,
"step-1": "<mask token>\n",
"step-2": "if __name__ == '__main__':\n print('--------------------------------------')\n query = 'user=pilgrim&database=master&password=PapayaWhip'\n a_list = query.split('&')\n print(a_list)\n print('--------------------------------------')\n a_list_of_lists = [v.split('=', 1) for v in a_list if '=' in v]\n print(a_list_of_lists)\n a_dict = dict(a_list_of_lists)\n print(a_dict)\n print('--------------------------------------')\n a_string = 'My alphabet starts where your alphabet ends.'\n print(a_string[3:11])\n print(a_string[3:-3])\n print(a_string[0:2])\n print(a_string[:18])\n print(a_string[18:])\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
class Node(object):
def __init__(self,data):
self.data = data
self.left = None
self.right = None
self.parent = None
class tree(object):
def __init__(self):
self.root = None
def insert(self,root,value):
if self.root == None:
self.root = Node(value)
else:
if value < root.data:
if root.left is None:
root.left = Node(value)
else:
self.insert(root.left,value)
elif value > root.data:
if root.right is None:
root.right = Node(value)
else:
self.insert(root.right,value)
return root
def delete(self,root,data,parent):
if root is None:
return root
if root.data < data:
parent = root
root.right = self.delete(root.right,data,parent)
elif root.data > data :
parent = root
root.left = self.delete(root.left,data,parent)
else:
if root is None or root.data != data:
return False
elif root.left is None and root.right is None:
if data > parent.data:
parent.right = None
root = None
else:
parent.left = None
root = None
elif root.left is None:
if data > parent.data:
parent.right = root.right
root = parent.right
else:
parent.left = root.right
root = parent.left
elif root.right is None:
if data > parent.data:
parent.right = root.right
root = parent.right
else:
parent.left = root.right
root = parent.right
else:
temp = self.successor(root.right)
root.data = temp.data
root.right = self.delete(root.right,temp.data,parent)
return root
def successor(self,root):
temp = root
if root.right:
while temp.left:
temp = temp.left
return temp
def inorder(self,root):
if root is not None:
self.inorder(root.left)
print(root.data)
self.inorder(root.right)
def main():
Tree = tree()
l =[50,30,20,40,70,60,80]
for item in l:
Tree.insert(Tree.root,item)
print(Tree.delete(Tree.root,20,None))
print("inorder after deleting 20:")
print(Tree.inorder(Tree.root))
print(Tree.delete(Tree.root,30,None))
print(Tree.delete(Tree.root,50,None))
print(Tree.inorder(Tree.root))
main()
|
normal
|
{
"blob_id": "64c32b3ada7fff51a7c4b07872b7688e100897d8",
"index": 81,
"step-1": "class Node(object):\n <mask token>\n\n\nclass tree(object):\n\n def __init__(self):\n self.root = None\n\n def insert(self, root, value):\n if self.root == None:\n self.root = Node(value)\n elif value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left, value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right, value)\n return root\n\n def delete(self, root, data, parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right, data, parent)\n elif root.data > data:\n parent = root\n root.left = self.delete(root.left, data, parent)\n elif root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right, temp.data, parent)\n return root\n\n def successor(self, root):\n temp = root\n if root.right:\n while temp.left:\n temp = temp.left\n return temp\n\n def inorder(self, root):\n if root is not None:\n self.inorder(root.left)\n print(root.data)\n self.inorder(root.right)\n\n\n<mask token>\n",
"step-2": "class Node(object):\n\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n self.parent = None\n\n\nclass tree(object):\n\n def __init__(self):\n self.root = None\n\n def insert(self, root, value):\n if self.root == None:\n self.root = Node(value)\n elif value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left, value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right, value)\n return root\n\n def delete(self, root, data, parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right, data, parent)\n elif root.data > data:\n parent = root\n root.left = self.delete(root.left, data, parent)\n elif root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right, temp.data, parent)\n return root\n\n def successor(self, root):\n temp = root\n if root.right:\n while temp.left:\n temp = temp.left\n return temp\n\n def inorder(self, root):\n if root is not None:\n self.inorder(root.left)\n print(root.data)\n self.inorder(root.right)\n\n\n<mask token>\n",
"step-3": "class Node(object):\n\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n self.parent = None\n\n\nclass tree(object):\n\n def __init__(self):\n self.root = None\n\n def insert(self, root, value):\n if self.root == None:\n self.root = Node(value)\n elif value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left, value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right, value)\n return root\n\n def delete(self, root, data, parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right, data, parent)\n elif root.data > data:\n parent = root\n root.left = self.delete(root.left, data, parent)\n elif root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right, temp.data, parent)\n return root\n\n def successor(self, root):\n temp = root\n if root.right:\n while temp.left:\n temp = temp.left\n return temp\n\n def inorder(self, root):\n if root is not None:\n self.inorder(root.left)\n print(root.data)\n self.inorder(root.right)\n\n\ndef main():\n Tree = tree()\n l = [50, 30, 20, 40, 70, 60, 80]\n for item in l:\n Tree.insert(Tree.root, item)\n print(Tree.delete(Tree.root, 20, None))\n print('inorder after deleting 20:')\n print(Tree.inorder(Tree.root))\n print(Tree.delete(Tree.root, 30, None))\n print(Tree.delete(Tree.root, 50, None))\n print(Tree.inorder(Tree.root))\n\n\n<mask token>\n",
"step-4": "class Node(object):\n\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n self.parent = None\n\n\nclass tree(object):\n\n def __init__(self):\n self.root = None\n\n def insert(self, root, value):\n if self.root == None:\n self.root = Node(value)\n elif value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left, value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right, value)\n return root\n\n def delete(self, root, data, parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right, data, parent)\n elif root.data > data:\n parent = root\n root.left = self.delete(root.left, data, parent)\n elif root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right, temp.data, parent)\n return root\n\n def successor(self, root):\n temp = root\n if root.right:\n while temp.left:\n temp = temp.left\n return temp\n\n def inorder(self, root):\n if root is not None:\n self.inorder(root.left)\n print(root.data)\n self.inorder(root.right)\n\n\ndef main():\n Tree = tree()\n l = [50, 30, 20, 40, 70, 60, 80]\n for item in l:\n Tree.insert(Tree.root, item)\n print(Tree.delete(Tree.root, 20, None))\n print('inorder after deleting 20:')\n print(Tree.inorder(Tree.root))\n print(Tree.delete(Tree.root, 30, None))\n print(Tree.delete(Tree.root, 50, None))\n print(Tree.inorder(Tree.root))\n\n\nmain()\n",
"step-5": "class Node(object):\n def __init__(self,data):\n self.data = data\n self.left = None\n self.right = None\n self.parent = None\n\nclass tree(object):\n def __init__(self):\n self.root = None\n \n def insert(self,root,value):\n if self.root == None:\n self.root = Node(value)\n else:\n if value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left,value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right,value)\n return root \n def delete(self,root,data,parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right,data,parent)\n elif root.data > data :\n parent = root\n root.left = self.delete(root.left,data,parent)\n else:\n if root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n \n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right,temp.data,parent)\n \n return root\n \n def successor(self,root):\n temp = root\n if root.right:\n while temp.left:\n temp = temp.left\n return temp\n def inorder(self,root):\n if root is not None:\n self.inorder(root.left)\n print(root.data)\n self.inorder(root.right)\n \ndef main():\n Tree = tree()\n l =[50,30,20,40,70,60,80]\n for item in l:\n Tree.insert(Tree.root,item)\n print(Tree.delete(Tree.root,20,None))\n print(\"inorder after deleting 20:\")\n print(Tree.inorder(Tree.root))\n print(Tree.delete(Tree.root,30,None))\n print(Tree.delete(Tree.root,50,None))\n print(Tree.inorder(Tree.root))\n \nmain()\n \n \n \n \n \n \n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
def convertTime(et):
"""'2017-06-01 11:41:53 AM' to '2017-06-01 11:41:53' """
hour = int(et[11:13])
if et.find('PM') != -1 and hour != 12:
dateString = et[:10]
hour = hour + 12
et = dateString + ' ' + str(hour) + et[13:19]
elif et.find('AM') != -1 and hour == 12:
dateString = et[:10]
hour = 0
et = dateString + ' ' + '0' + str(hour) + et[13:19]
else:
et = et[:19]
return et
def getNYtimenow():
tz = pytz.timezone('America/New_York')
time = str(datetime.now(tz))[:19]
return time
def datetimeStringToObject(timeString):
"""convert a string in format YYYY-MM-DD hh:mm:ss to a datetime object"""
try:
year = int(timeString[:4])
month = int(timeString[5:7])
day = int(timeString[8:10])
hour = int(timeString[11:13])
minute = int(timeString[14:16])
result = datetime(year, month, day, hour, minute)
return result
except:
return None
<|reserved_special_token_0|>
def resetEstComp(cur, vID):
"""estimate completion time goes to 0"""
cur.execute('UPDATE OpenTasks SET estComplete = null WHERE vID = ? ', [vID]
)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def execute_query(cur, query, args=()):
cur = cur.execute(query, args)
rows = cur.fetchall()
return rows
def convertTime(et):
"""'2017-06-01 11:41:53 AM' to '2017-06-01 11:41:53' """
hour = int(et[11:13])
if et.find('PM') != -1 and hour != 12:
dateString = et[:10]
hour = hour + 12
et = dateString + ' ' + str(hour) + et[13:19]
elif et.find('AM') != -1 and hour == 12:
dateString = et[:10]
hour = 0
et = dateString + ' ' + '0' + str(hour) + et[13:19]
else:
et = et[:19]
return et
def getNYtimenow():
tz = pytz.timezone('America/New_York')
time = str(datetime.now(tz))[:19]
return time
def datetimeStringToObject(timeString):
"""convert a string in format YYYY-MM-DD hh:mm:ss to a datetime object"""
try:
year = int(timeString[:4])
month = int(timeString[5:7])
day = int(timeString[8:10])
hour = int(timeString[11:13])
minute = int(timeString[14:16])
result = datetime(year, month, day, hour, minute)
return result
except:
return None
<|reserved_special_token_0|>
def notSignedIn(vID):
"""Return true is the drivers did not enter vehicle ID,
return False if the drivers have entered the vehicle ID"""
if str(vID) == '0':
return True
return False
def resetEstComp(cur, vID):
"""estimate completion time goes to 0"""
cur.execute('UPDATE OpenTasks SET estComplete = null WHERE vID = ? ', [vID]
)
def getNextFixOrderNum(cur, vID):
"""return the integer which is one larger than the order number of the last fixed task"""
orderNum = execute_query(cur,
'SELECT Count(*) FROM OpenTasks where vID = ? and fixTask = 1', [vID])[
0][0]
orderNum = int(orderNum) + 1
return orderNum
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def execute_query(cur, query, args=()):
cur = cur.execute(query, args)
rows = cur.fetchall()
return rows
def convertTime(et):
"""'2017-06-01 11:41:53 AM' to '2017-06-01 11:41:53' """
hour = int(et[11:13])
if et.find('PM') != -1 and hour != 12:
dateString = et[:10]
hour = hour + 12
et = dateString + ' ' + str(hour) + et[13:19]
elif et.find('AM') != -1 and hour == 12:
dateString = et[:10]
hour = 0
et = dateString + ' ' + '0' + str(hour) + et[13:19]
else:
et = et[:19]
return et
def getNYtimenow():
tz = pytz.timezone('America/New_York')
time = str(datetime.now(tz))[:19]
return time
def datetimeStringToObject(timeString):
"""convert a string in format YYYY-MM-DD hh:mm:ss to a datetime object"""
try:
year = int(timeString[:4])
month = int(timeString[5:7])
day = int(timeString[8:10])
hour = int(timeString[11:13])
minute = int(timeString[14:16])
result = datetime(year, month, day, hour, minute)
return result
except:
return None
def timeStringToObject(timeString):
"""convert a string in format hh:mm:ss to a datetime object with current date"""
try:
hour = int(timeString[:2])
minute = int(timeString[3:5])
result = datetime.today().replace(hour=hour, minute=minute, second=
0, microsecond=0)
return result
except:
return None
def notSignedIn(vID):
"""Return true is the drivers did not enter vehicle ID,
return False if the drivers have entered the vehicle ID"""
if str(vID) == '0':
return True
return False
def resetEstComp(cur, vID):
"""estimate completion time goes to 0"""
cur.execute('UPDATE OpenTasks SET estComplete = null WHERE vID = ? ', [vID]
)
def getNextFixOrderNum(cur, vID):
"""return the integer which is one larger than the order number of the last fixed task"""
orderNum = execute_query(cur,
'SELECT Count(*) FROM OpenTasks where vID = ? and fixTask = 1', [vID])[
0][0]
orderNum = int(orderNum) + 1
return orderNum
def getNextOrderNum(cur, vID):
"""return the integer which is one larger than the order number of the last task"""
orderNum = execute_query(cur,
'SELECT Count(*) FROM OpenTasks where vID = ?', [vID])[0][0]
orderNum = int(orderNum) + 1
return orderNum
def fixOrderBeforeInsert(cur, vID, orderNum):
"""Increment later tasks' order number by 1, orderNum is the order of the inserted task
should be called before inserting the task """
cur.execute(
'UPDATE OpenTasks SET orderNum = orderNum + 1 WHERE vID = ? and orderNum >= ?'
, [vID, orderNum])
<|reserved_special_token_1|>
import csv
import sqlite3
import time
from datetime import datetime, timedelta
import pandas as pd
import pytz
import json
import urllib
import numpy as np
DATABASE = '/var/www/html/citibikeapp/citibikeapp/citibike_change.db'
def execute_query(cur, query, args=()):
cur = cur.execute(query, args)
rows = cur.fetchall()
return rows
def convertTime(et):
"""'2017-06-01 11:41:53 AM' to '2017-06-01 11:41:53' """
hour = int(et[11:13])
if et.find('PM') != -1 and hour != 12:
dateString = et[:10]
hour = hour + 12
et = dateString + ' ' + str(hour) + et[13:19]
elif et.find('AM') != -1 and hour == 12:
dateString = et[:10]
hour = 0
et = dateString + ' ' + '0' + str(hour) + et[13:19]
else:
et = et[:19]
return et
def getNYtimenow():
tz = pytz.timezone('America/New_York')
time = str(datetime.now(tz))[:19]
return time
def datetimeStringToObject(timeString):
"""convert a string in format YYYY-MM-DD hh:mm:ss to a datetime object"""
try:
year = int(timeString[:4])
month = int(timeString[5:7])
day = int(timeString[8:10])
hour = int(timeString[11:13])
minute = int(timeString[14:16])
result = datetime(year, month, day, hour, minute)
return result
except:
return None
def timeStringToObject(timeString):
"""convert a string in format hh:mm:ss to a datetime object with current date"""
try:
hour = int(timeString[:2])
minute = int(timeString[3:5])
result = datetime.today().replace(hour=hour, minute=minute, second=
0, microsecond=0)
return result
except:
return None
def notSignedIn(vID):
"""Return true is the drivers did not enter vehicle ID,
return False if the drivers have entered the vehicle ID"""
if str(vID) == '0':
return True
return False
def resetEstComp(cur, vID):
"""estimate completion time goes to 0"""
cur.execute('UPDATE OpenTasks SET estComplete = null WHERE vID = ? ', [vID]
)
def getNextFixOrderNum(cur, vID):
"""return the integer which is one larger than the order number of the last fixed task"""
orderNum = execute_query(cur,
'SELECT Count(*) FROM OpenTasks where vID = ? and fixTask = 1', [vID])[
0][0]
orderNum = int(orderNum) + 1
return orderNum
def getNextOrderNum(cur, vID):
"""return the integer which is one larger than the order number of the last task"""
orderNum = execute_query(cur,
'SELECT Count(*) FROM OpenTasks where vID = ?', [vID])[0][0]
orderNum = int(orderNum) + 1
return orderNum
def fixOrderBeforeInsert(cur, vID, orderNum):
"""Increment later tasks' order number by 1, orderNum is the order of the inserted task
should be called before inserting the task """
cur.execute(
'UPDATE OpenTasks SET orderNum = orderNum + 1 WHERE vID = ? and orderNum >= ?'
, [vID, orderNum])
<|reserved_special_token_1|>
import csv
import sqlite3
import time
from datetime import datetime, timedelta
import pandas as pd
import pytz
import json
import urllib
import numpy as np
DATABASE = '/var/www/html/citibikeapp/citibikeapp/citibike_change.db'
def execute_query(cur,query, args=()):
cur = cur.execute(query, args)
rows = cur.fetchall()
# cur.close()
return rows
def convertTime(et):
"""'2017-06-01 11:41:53 AM' to '2017-06-01 11:41:53' """
hour = int(et[11:13])
if et.find('PM') != -1 and hour != 12:
dateString = et[:10]
hour = hour + 12
et = dateString + ' ' + str(hour) + et[13:19]
elif et.find('AM') != -1 and hour == 12:
dateString = et[:10]
hour = 0
et = dateString + ' ' + '0'+str(hour) + et[13:19]
else:
et = et[:19]
return et
def getNYtimenow():
tz = pytz.timezone('America/New_York')
time = str(datetime.now(tz))[:19]
return time
def datetimeStringToObject(timeString):
"""convert a string in format YYYY-MM-DD hh:mm:ss to a datetime object"""
try:
year = int(timeString[:4])
month = int(timeString[5:7])
day = int(timeString[8:10])
hour = int(timeString[11:13])
minute = int(timeString[14:16])
result = datetime(year, month, day, hour, minute)
return result
except:
return None
def timeStringToObject(timeString):
"""convert a string in format hh:mm:ss to a datetime object with current date"""
try:
# year = datetime.now().year
# month = datetime.now().month
# day = datetime.now().day
hour = int(timeString[:2])
minute = int(timeString[3:5])
result = datetime.today().replace(hour=hour, minute=minute, second=0, microsecond=0)
return result
except:
return None
def notSignedIn(vID):
"""Return true is the drivers did not enter vehicle ID,
return False if the drivers have entered the vehicle ID"""
if str(vID) == '0':
return True
return False
def resetEstComp(cur, vID):
"""estimate completion time goes to 0"""
cur.execute("""UPDATE OpenTasks SET estComplete = null WHERE vID = ? """,[vID])
def getNextFixOrderNum(cur,vID):
"""return the integer which is one larger than the order number of the last fixed task"""
orderNum = execute_query(cur, """SELECT Count(*) FROM OpenTasks where vID = ? and fixTask = 1""", [vID])[0][0]
orderNum = int(orderNum) + 1
return orderNum
def getNextOrderNum(cur,vID):
"""return the integer which is one larger than the order number of the last task"""
orderNum = execute_query(cur,"""SELECT Count(*) FROM OpenTasks where vID = ?""", [vID])[0][0]
orderNum = int(orderNum) + 1
return orderNum
def fixOrderBeforeInsert(cur,vID,orderNum):
"""Increment later tasks' order number by 1, orderNum is the order of the inserted task
should be called before inserting the task """
cur.execute("""UPDATE OpenTasks SET orderNum = orderNum + 1 WHERE vID = ? and orderNum >= ?""",[vID, orderNum])
|
flexible
|
{
"blob_id": "9b8b196e1ad845ab745dabe5abe3be7bea0d5695",
"index": 4835,
"step-1": "<mask token>\n\n\ndef convertTime(et):\n \"\"\"'2017-06-01 11:41:53 AM' to '2017-06-01 11:41:53' \"\"\"\n hour = int(et[11:13])\n if et.find('PM') != -1 and hour != 12:\n dateString = et[:10]\n hour = hour + 12\n et = dateString + ' ' + str(hour) + et[13:19]\n elif et.find('AM') != -1 and hour == 12:\n dateString = et[:10]\n hour = 0\n et = dateString + ' ' + '0' + str(hour) + et[13:19]\n else:\n et = et[:19]\n return et\n\n\ndef getNYtimenow():\n tz = pytz.timezone('America/New_York')\n time = str(datetime.now(tz))[:19]\n return time\n\n\ndef datetimeStringToObject(timeString):\n \"\"\"convert a string in format YYYY-MM-DD hh:mm:ss to a datetime object\"\"\"\n try:\n year = int(timeString[:4])\n month = int(timeString[5:7])\n day = int(timeString[8:10])\n hour = int(timeString[11:13])\n minute = int(timeString[14:16])\n result = datetime(year, month, day, hour, minute)\n return result\n except:\n return None\n\n\n<mask token>\n\n\ndef resetEstComp(cur, vID):\n \"\"\"estimate completion time goes to 0\"\"\"\n cur.execute('UPDATE OpenTasks SET estComplete = null WHERE vID = ? ', [vID]\n )\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef execute_query(cur, query, args=()):\n cur = cur.execute(query, args)\n rows = cur.fetchall()\n return rows\n\n\ndef convertTime(et):\n \"\"\"'2017-06-01 11:41:53 AM' to '2017-06-01 11:41:53' \"\"\"\n hour = int(et[11:13])\n if et.find('PM') != -1 and hour != 12:\n dateString = et[:10]\n hour = hour + 12\n et = dateString + ' ' + str(hour) + et[13:19]\n elif et.find('AM') != -1 and hour == 12:\n dateString = et[:10]\n hour = 0\n et = dateString + ' ' + '0' + str(hour) + et[13:19]\n else:\n et = et[:19]\n return et\n\n\ndef getNYtimenow():\n tz = pytz.timezone('America/New_York')\n time = str(datetime.now(tz))[:19]\n return time\n\n\ndef datetimeStringToObject(timeString):\n \"\"\"convert a string in format YYYY-MM-DD hh:mm:ss to a datetime object\"\"\"\n try:\n year = int(timeString[:4])\n month = int(timeString[5:7])\n day = int(timeString[8:10])\n hour = int(timeString[11:13])\n minute = int(timeString[14:16])\n result = datetime(year, month, day, hour, minute)\n return result\n except:\n return None\n\n\n<mask token>\n\n\ndef notSignedIn(vID):\n \"\"\"Return true is the drivers did not enter vehicle ID, \n return False if the drivers have entered the vehicle ID\"\"\"\n if str(vID) == '0':\n return True\n return False\n\n\ndef resetEstComp(cur, vID):\n \"\"\"estimate completion time goes to 0\"\"\"\n cur.execute('UPDATE OpenTasks SET estComplete = null WHERE vID = ? ', [vID]\n )\n\n\ndef getNextFixOrderNum(cur, vID):\n \"\"\"return the integer which is one larger than the order number of the last fixed task\"\"\"\n orderNum = execute_query(cur,\n 'SELECT Count(*) FROM OpenTasks where vID = ? and fixTask = 1', [vID])[\n 0][0]\n orderNum = int(orderNum) + 1\n return orderNum\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef execute_query(cur, query, args=()):\n cur = cur.execute(query, args)\n rows = cur.fetchall()\n return rows\n\n\ndef convertTime(et):\n \"\"\"'2017-06-01 11:41:53 AM' to '2017-06-01 11:41:53' \"\"\"\n hour = int(et[11:13])\n if et.find('PM') != -1 and hour != 12:\n dateString = et[:10]\n hour = hour + 12\n et = dateString + ' ' + str(hour) + et[13:19]\n elif et.find('AM') != -1 and hour == 12:\n dateString = et[:10]\n hour = 0\n et = dateString + ' ' + '0' + str(hour) + et[13:19]\n else:\n et = et[:19]\n return et\n\n\ndef getNYtimenow():\n tz = pytz.timezone('America/New_York')\n time = str(datetime.now(tz))[:19]\n return time\n\n\ndef datetimeStringToObject(timeString):\n \"\"\"convert a string in format YYYY-MM-DD hh:mm:ss to a datetime object\"\"\"\n try:\n year = int(timeString[:4])\n month = int(timeString[5:7])\n day = int(timeString[8:10])\n hour = int(timeString[11:13])\n minute = int(timeString[14:16])\n result = datetime(year, month, day, hour, minute)\n return result\n except:\n return None\n\n\ndef timeStringToObject(timeString):\n \"\"\"convert a string in format hh:mm:ss to a datetime object with current date\"\"\"\n try:\n hour = int(timeString[:2])\n minute = int(timeString[3:5])\n result = datetime.today().replace(hour=hour, minute=minute, second=\n 0, microsecond=0)\n return result\n except:\n return None\n\n\ndef notSignedIn(vID):\n \"\"\"Return true is the drivers did not enter vehicle ID, \n return False if the drivers have entered the vehicle ID\"\"\"\n if str(vID) == '0':\n return True\n return False\n\n\ndef resetEstComp(cur, vID):\n \"\"\"estimate completion time goes to 0\"\"\"\n cur.execute('UPDATE OpenTasks SET estComplete = null WHERE vID = ? ', [vID]\n )\n\n\ndef getNextFixOrderNum(cur, vID):\n \"\"\"return the integer which is one larger than the order number of the last fixed task\"\"\"\n orderNum = execute_query(cur,\n 'SELECT Count(*) FROM OpenTasks where vID = ? and fixTask = 1', [vID])[\n 0][0]\n orderNum = int(orderNum) + 1\n return orderNum\n\n\ndef getNextOrderNum(cur, vID):\n \"\"\"return the integer which is one larger than the order number of the last task\"\"\"\n orderNum = execute_query(cur,\n 'SELECT Count(*) FROM OpenTasks where vID = ?', [vID])[0][0]\n orderNum = int(orderNum) + 1\n return orderNum\n\n\ndef fixOrderBeforeInsert(cur, vID, orderNum):\n \"\"\"Increment later tasks' order number by 1, orderNum is the order of the inserted task\n should be called before inserting the task \"\"\"\n cur.execute(\n 'UPDATE OpenTasks SET orderNum = orderNum + 1 WHERE vID = ? and orderNum >= ?'\n , [vID, orderNum])\n",
"step-4": "import csv\nimport sqlite3\nimport time\nfrom datetime import datetime, timedelta\nimport pandas as pd\nimport pytz\nimport json\nimport urllib\nimport numpy as np\nDATABASE = '/var/www/html/citibikeapp/citibikeapp/citibike_change.db'\n\n\ndef execute_query(cur, query, args=()):\n cur = cur.execute(query, args)\n rows = cur.fetchall()\n return rows\n\n\ndef convertTime(et):\n \"\"\"'2017-06-01 11:41:53 AM' to '2017-06-01 11:41:53' \"\"\"\n hour = int(et[11:13])\n if et.find('PM') != -1 and hour != 12:\n dateString = et[:10]\n hour = hour + 12\n et = dateString + ' ' + str(hour) + et[13:19]\n elif et.find('AM') != -1 and hour == 12:\n dateString = et[:10]\n hour = 0\n et = dateString + ' ' + '0' + str(hour) + et[13:19]\n else:\n et = et[:19]\n return et\n\n\ndef getNYtimenow():\n tz = pytz.timezone('America/New_York')\n time = str(datetime.now(tz))[:19]\n return time\n\n\ndef datetimeStringToObject(timeString):\n \"\"\"convert a string in format YYYY-MM-DD hh:mm:ss to a datetime object\"\"\"\n try:\n year = int(timeString[:4])\n month = int(timeString[5:7])\n day = int(timeString[8:10])\n hour = int(timeString[11:13])\n minute = int(timeString[14:16])\n result = datetime(year, month, day, hour, minute)\n return result\n except:\n return None\n\n\ndef timeStringToObject(timeString):\n \"\"\"convert a string in format hh:mm:ss to a datetime object with current date\"\"\"\n try:\n hour = int(timeString[:2])\n minute = int(timeString[3:5])\n result = datetime.today().replace(hour=hour, minute=minute, second=\n 0, microsecond=0)\n return result\n except:\n return None\n\n\ndef notSignedIn(vID):\n \"\"\"Return true is the drivers did not enter vehicle ID, \n return False if the drivers have entered the vehicle ID\"\"\"\n if str(vID) == '0':\n return True\n return False\n\n\ndef resetEstComp(cur, vID):\n \"\"\"estimate completion time goes to 0\"\"\"\n cur.execute('UPDATE OpenTasks SET estComplete = null WHERE vID = ? ', [vID]\n )\n\n\ndef getNextFixOrderNum(cur, vID):\n \"\"\"return the integer which is one larger than the order number of the last fixed task\"\"\"\n orderNum = execute_query(cur,\n 'SELECT Count(*) FROM OpenTasks where vID = ? and fixTask = 1', [vID])[\n 0][0]\n orderNum = int(orderNum) + 1\n return orderNum\n\n\ndef getNextOrderNum(cur, vID):\n \"\"\"return the integer which is one larger than the order number of the last task\"\"\"\n orderNum = execute_query(cur,\n 'SELECT Count(*) FROM OpenTasks where vID = ?', [vID])[0][0]\n orderNum = int(orderNum) + 1\n return orderNum\n\n\ndef fixOrderBeforeInsert(cur, vID, orderNum):\n \"\"\"Increment later tasks' order number by 1, orderNum is the order of the inserted task\n should be called before inserting the task \"\"\"\n cur.execute(\n 'UPDATE OpenTasks SET orderNum = orderNum + 1 WHERE vID = ? and orderNum >= ?'\n , [vID, orderNum])\n",
"step-5": "import csv\nimport sqlite3\nimport time\nfrom datetime import datetime, timedelta\nimport pandas as pd\nimport pytz\nimport json\nimport urllib\nimport numpy as np\n\nDATABASE = '/var/www/html/citibikeapp/citibikeapp/citibike_change.db'\n\ndef execute_query(cur,query, args=()):\n cur = cur.execute(query, args)\n rows = cur.fetchall()\n # cur.close()\n return rows\n\n\ndef convertTime(et):\n \"\"\"'2017-06-01 11:41:53 AM' to '2017-06-01 11:41:53' \"\"\" \n hour = int(et[11:13])\n if et.find('PM') != -1 and hour != 12:\n dateString = et[:10]\n hour = hour + 12\n et = dateString + ' ' + str(hour) + et[13:19]\n elif et.find('AM') != -1 and hour == 12:\n dateString = et[:10]\n hour = 0\n et = dateString + ' ' + '0'+str(hour) + et[13:19]\n else:\n et = et[:19]\n\n return et\n\n\ndef getNYtimenow():\n tz = pytz.timezone('America/New_York')\n time = str(datetime.now(tz))[:19]\n return time\n\ndef datetimeStringToObject(timeString):\n \"\"\"convert a string in format YYYY-MM-DD hh:mm:ss to a datetime object\"\"\"\n try:\n year = int(timeString[:4])\n month = int(timeString[5:7])\n day = int(timeString[8:10])\n hour = int(timeString[11:13])\n minute = int(timeString[14:16])\n result = datetime(year, month, day, hour, minute)\n return result\n except:\n return None\n\ndef timeStringToObject(timeString):\n \"\"\"convert a string in format hh:mm:ss to a datetime object with current date\"\"\"\n try:\n # year = datetime.now().year\n # month = datetime.now().month\n # day = datetime.now().day\n hour = int(timeString[:2])\n minute = int(timeString[3:5])\n result = datetime.today().replace(hour=hour, minute=minute, second=0, microsecond=0)\n return result\n except:\n return None\n\ndef notSignedIn(vID):\n \"\"\"Return true is the drivers did not enter vehicle ID, \n return False if the drivers have entered the vehicle ID\"\"\"\n if str(vID) == '0':\n return True\n return False\n\n\ndef resetEstComp(cur, vID):\n \"\"\"estimate completion time goes to 0\"\"\" \n cur.execute(\"\"\"UPDATE OpenTasks SET estComplete = null WHERE vID = ? \"\"\",[vID])\n\ndef getNextFixOrderNum(cur,vID):\n \"\"\"return the integer which is one larger than the order number of the last fixed task\"\"\"\n orderNum = execute_query(cur, \"\"\"SELECT Count(*) FROM OpenTasks where vID = ? and fixTask = 1\"\"\", [vID])[0][0]\n orderNum = int(orderNum) + 1\n return orderNum\n\ndef getNextOrderNum(cur,vID):\n \"\"\"return the integer which is one larger than the order number of the last task\"\"\"\n orderNum = execute_query(cur,\"\"\"SELECT Count(*) FROM OpenTasks where vID = ?\"\"\", [vID])[0][0]\n orderNum = int(orderNum) + 1\n return orderNum\n\ndef fixOrderBeforeInsert(cur,vID,orderNum):\n \"\"\"Increment later tasks' order number by 1, orderNum is the order of the inserted task\n should be called before inserting the task \"\"\"\n cur.execute(\"\"\"UPDATE OpenTasks SET orderNum = orderNum + 1 WHERE vID = ? and orderNum >= ?\"\"\",[vID, orderNum])",
"step-ids": [
4,
7,
10,
12,
13
]
}
|
[
4,
7,
10,
12,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('lists', '0004_auto_20180608_1835')]
operations = [migrations.AlterModelOptions(name='todo', options={
'ordering': ('-created_at',)}), migrations.AddField(model_name=
'todo', name='content', field=models.TextField(default='',
max_length=500))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('lists', '0004_auto_20180608_1835')]
operations = [migrations.AlterModelOptions(name='todo', options={
'ordering': ('-created_at',)}), migrations.AddField(model_name=
'todo', name='content', field=models.TextField(default='',
max_length=500))]
<|reserved_special_token_1|>
# Generated by Django 2.0.4 on 2018-06-09 05:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lists', '0004_auto_20180608_1835'),
]
operations = [
migrations.AlterModelOptions(
name='todo',
options={'ordering': ('-created_at',)},
),
migrations.AddField(
model_name='todo',
name='content',
field=models.TextField(default='', max_length=500),
),
]
|
flexible
|
{
"blob_id": "b27913d2cd29f174d79652af6da2846e397373fc",
"index": 1549,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('lists', '0004_auto_20180608_1835')]\n operations = [migrations.AlterModelOptions(name='todo', options={\n 'ordering': ('-created_at',)}), migrations.AddField(model_name=\n 'todo', name='content', field=models.TextField(default='',\n max_length=500))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('lists', '0004_auto_20180608_1835')]\n operations = [migrations.AlterModelOptions(name='todo', options={\n 'ordering': ('-created_at',)}), migrations.AddField(model_name=\n 'todo', name='content', field=models.TextField(default='',\n max_length=500))]\n",
"step-5": "# Generated by Django 2.0.4 on 2018-06-09 05:09\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('lists', '0004_auto_20180608_1835'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='todo',\n options={'ordering': ('-created_at',)},\n ),\n migrations.AddField(\n model_name='todo',\n name='content',\n field=models.TextField(default='', max_length=500),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def extract_field_from_request(request: Request, field_name: str
) ->typing.Optional[int]:
"""
Extracts attribte from request
if attribute is present in data it has precedence over query parameters
"""
try:
value = request.data.get(field_name)
except AttributeError:
raise ValidationError('Malformed request')
if not value:
value = request.query_params.get(field_name)
if value:
try:
return int(value)
except ValueError:
raise ValidationError(
f"Value of field '{field_name}' is not a valid integer ({value})"
)
return None
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def extract_organization_id_from_request_query(request):
return request.query_params.get('organization'
) or request.query_params.get('organization_id')
<|reserved_special_token_0|>
def extract_field_from_request(request: Request, field_name: str
) ->typing.Optional[int]:
"""
Extracts attribte from request
if attribute is present in data it has precedence over query parameters
"""
try:
value = request.data.get(field_name)
except AttributeError:
raise ValidationError('Malformed request')
if not value:
value = request.query_params.get(field_name)
if value:
try:
return int(value)
except ValueError:
raise ValidationError(
f"Value of field '{field_name}' is not a valid integer ({value})"
)
return None
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def extract_organization_id_from_request_query(request):
return request.query_params.get('organization'
) or request.query_params.get('organization_id')
def extract_organization_id_from_request_data(request) ->(int, bool):
"""
Returns the organization id from the request.data and a bool indicating if the key
was present in the data (to distinguish between missing data and empty input value)
:param request:
:return:
"""
for source in (request.data, request.GET):
if 'organization' in source:
return source.get('organization'), True
if 'organization_id' in request.data:
return source.get('organization_id'), True
return None, False
def extract_field_from_request(request: Request, field_name: str
) ->typing.Optional[int]:
"""
Extracts attribte from request
if attribute is present in data it has precedence over query parameters
"""
try:
value = request.data.get(field_name)
except AttributeError:
raise ValidationError('Malformed request')
if not value:
value = request.query_params.get(field_name)
if value:
try:
return int(value)
except ValueError:
raise ValidationError(
f"Value of field '{field_name}' is not a valid integer ({value})"
)
return None
<|reserved_special_token_1|>
import typing
from rest_framework.exceptions import ValidationError
from rest_framework.request import Request
def extract_organization_id_from_request_query(request):
return request.query_params.get('organization'
) or request.query_params.get('organization_id')
def extract_organization_id_from_request_data(request) ->(int, bool):
"""
Returns the organization id from the request.data and a bool indicating if the key
was present in the data (to distinguish between missing data and empty input value)
:param request:
:return:
"""
for source in (request.data, request.GET):
if 'organization' in source:
return source.get('organization'), True
if 'organization_id' in request.data:
return source.get('organization_id'), True
return None, False
def extract_field_from_request(request: Request, field_name: str
) ->typing.Optional[int]:
"""
Extracts attribte from request
if attribute is present in data it has precedence over query parameters
"""
try:
value = request.data.get(field_name)
except AttributeError:
raise ValidationError('Malformed request')
if not value:
value = request.query_params.get(field_name)
if value:
try:
return int(value)
except ValueError:
raise ValidationError(
f"Value of field '{field_name}' is not a valid integer ({value})"
)
return None
<|reserved_special_token_1|>
import typing
from rest_framework.exceptions import ValidationError
from rest_framework.request import Request
def extract_organization_id_from_request_query(request):
return request.query_params.get('organization') or request.query_params.get('organization_id')
def extract_organization_id_from_request_data(request) -> (int, bool):
"""
Returns the organization id from the request.data and a bool indicating if the key
was present in the data (to distinguish between missing data and empty input value)
:param request:
:return:
"""
for source in (request.data, request.GET):
if 'organization' in source:
return source.get('organization'), True
if 'organization_id' in request.data:
return source.get('organization_id'), True
return None, False
def extract_field_from_request(request: Request, field_name: str) -> typing.Optional[int]:
"""
Extracts attribte from request
if attribute is present in data it has precedence over query parameters
"""
try:
# Try to get value from data
value = request.data.get(field_name)
except AttributeError:
raise ValidationError('Malformed request')
if not value:
# Try to get value from query parameters
value = request.query_params.get(field_name)
if value:
try:
return int(value)
except ValueError:
raise ValidationError(f"Value of field '{field_name}' is not a valid integer ({value})")
return None
|
flexible
|
{
"blob_id": "0b7523035fdad74454e51dc9da9fc4e9bea2f6bf",
"index": 6904,
"step-1": "<mask token>\n\n\ndef extract_field_from_request(request: Request, field_name: str\n ) ->typing.Optional[int]:\n \"\"\"\n Extracts attribte from request\n if attribute is present in data it has precedence over query parameters\n \"\"\"\n try:\n value = request.data.get(field_name)\n except AttributeError:\n raise ValidationError('Malformed request')\n if not value:\n value = request.query_params.get(field_name)\n if value:\n try:\n return int(value)\n except ValueError:\n raise ValidationError(\n f\"Value of field '{field_name}' is not a valid integer ({value})\"\n )\n return None\n",
"step-2": "<mask token>\n\n\ndef extract_organization_id_from_request_query(request):\n return request.query_params.get('organization'\n ) or request.query_params.get('organization_id')\n\n\n<mask token>\n\n\ndef extract_field_from_request(request: Request, field_name: str\n ) ->typing.Optional[int]:\n \"\"\"\n Extracts attribte from request\n if attribute is present in data it has precedence over query parameters\n \"\"\"\n try:\n value = request.data.get(field_name)\n except AttributeError:\n raise ValidationError('Malformed request')\n if not value:\n value = request.query_params.get(field_name)\n if value:\n try:\n return int(value)\n except ValueError:\n raise ValidationError(\n f\"Value of field '{field_name}' is not a valid integer ({value})\"\n )\n return None\n",
"step-3": "<mask token>\n\n\ndef extract_organization_id_from_request_query(request):\n return request.query_params.get('organization'\n ) or request.query_params.get('organization_id')\n\n\ndef extract_organization_id_from_request_data(request) ->(int, bool):\n \"\"\"\n Returns the organization id from the request.data and a bool indicating if the key\n was present in the data (to distinguish between missing data and empty input value)\n :param request:\n :return:\n \"\"\"\n for source in (request.data, request.GET):\n if 'organization' in source:\n return source.get('organization'), True\n if 'organization_id' in request.data:\n return source.get('organization_id'), True\n return None, False\n\n\ndef extract_field_from_request(request: Request, field_name: str\n ) ->typing.Optional[int]:\n \"\"\"\n Extracts attribte from request\n if attribute is present in data it has precedence over query parameters\n \"\"\"\n try:\n value = request.data.get(field_name)\n except AttributeError:\n raise ValidationError('Malformed request')\n if not value:\n value = request.query_params.get(field_name)\n if value:\n try:\n return int(value)\n except ValueError:\n raise ValidationError(\n f\"Value of field '{field_name}' is not a valid integer ({value})\"\n )\n return None\n",
"step-4": "import typing\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.request import Request\n\n\ndef extract_organization_id_from_request_query(request):\n return request.query_params.get('organization'\n ) or request.query_params.get('organization_id')\n\n\ndef extract_organization_id_from_request_data(request) ->(int, bool):\n \"\"\"\n Returns the organization id from the request.data and a bool indicating if the key\n was present in the data (to distinguish between missing data and empty input value)\n :param request:\n :return:\n \"\"\"\n for source in (request.data, request.GET):\n if 'organization' in source:\n return source.get('organization'), True\n if 'organization_id' in request.data:\n return source.get('organization_id'), True\n return None, False\n\n\ndef extract_field_from_request(request: Request, field_name: str\n ) ->typing.Optional[int]:\n \"\"\"\n Extracts attribte from request\n if attribute is present in data it has precedence over query parameters\n \"\"\"\n try:\n value = request.data.get(field_name)\n except AttributeError:\n raise ValidationError('Malformed request')\n if not value:\n value = request.query_params.get(field_name)\n if value:\n try:\n return int(value)\n except ValueError:\n raise ValidationError(\n f\"Value of field '{field_name}' is not a valid integer ({value})\"\n )\n return None\n",
"step-5": "import typing\n\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.request import Request\n\n\ndef extract_organization_id_from_request_query(request):\n return request.query_params.get('organization') or request.query_params.get('organization_id')\n\n\ndef extract_organization_id_from_request_data(request) -> (int, bool):\n \"\"\"\n Returns the organization id from the request.data and a bool indicating if the key\n was present in the data (to distinguish between missing data and empty input value)\n :param request:\n :return:\n \"\"\"\n for source in (request.data, request.GET):\n if 'organization' in source:\n return source.get('organization'), True\n if 'organization_id' in request.data:\n return source.get('organization_id'), True\n return None, False\n\n\ndef extract_field_from_request(request: Request, field_name: str) -> typing.Optional[int]:\n \"\"\"\n Extracts attribte from request\n if attribute is present in data it has precedence over query parameters\n \"\"\"\n\n try:\n # Try to get value from data\n value = request.data.get(field_name)\n except AttributeError:\n raise ValidationError('Malformed request')\n\n if not value:\n # Try to get value from query parameters\n value = request.query_params.get(field_name)\n\n if value:\n try:\n return int(value)\n except ValueError:\n raise ValidationError(f\"Value of field '{field_name}' is not a valid integer ({value})\")\n\n return None\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import re
match = re.search(r'pi+', 'piiig')
print 'found', match.group() == "piii"
|
normal
|
{
"blob_id": "82083f16c18db35193fa2aa45bc28c5201962f90",
"index": 6704,
"step-1": "\n\nimport re\n\n\nmatch = re.search(r'pi+', 'piiig')\nprint 'found', match.group() == \"piii\"\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def memory(count: int, start_numbers: list):
numbers = defaultdict(lambda : tuple(2 * [None]), {el: (idx, None) for
idx, el in enumerate(start_numbers)})
last = start_numbers[-1]
for idx in range(len(numbers), count):
last = 0 if None in numbers[last] else reduce(lambda a, b: a - b,
numbers[last])
numbers[last] = idx, numbers[last][0]
print(
f'For starting numbers: {start_numbers}, the {count}th number is: {last}'
)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def memory(count: int, start_numbers: list):
numbers = defaultdict(lambda : tuple(2 * [None]), {el: (idx, None) for
idx, el in enumerate(start_numbers)})
last = start_numbers[-1]
for idx in range(len(numbers), count):
last = 0 if None in numbers[last] else reduce(lambda a, b: a - b,
numbers[last])
numbers[last] = idx, numbers[last][0]
print(
f'For starting numbers: {start_numbers}, the {count}th number is: {last}'
)
[memory(count, [8, 0, 17, 4, 1, 12]) for count in [2020, 30000000]]
<|reserved_special_token_1|>
from functools import reduce
from collections import defaultdict
def memory(count: int, start_numbers: list):
numbers = defaultdict(lambda : tuple(2 * [None]), {el: (idx, None) for
idx, el in enumerate(start_numbers)})
last = start_numbers[-1]
for idx in range(len(numbers), count):
last = 0 if None in numbers[last] else reduce(lambda a, b: a - b,
numbers[last])
numbers[last] = idx, numbers[last][0]
print(
f'For starting numbers: {start_numbers}, the {count}th number is: {last}'
)
[memory(count, [8, 0, 17, 4, 1, 12]) for count in [2020, 30000000]]
<|reserved_special_token_1|>
from functools import reduce
from collections import defaultdict
def memory(count: int, start_numbers: list):
numbers = defaultdict(lambda: tuple(2 * [None]), { el: (idx,None ) for idx,el in enumerate(start_numbers) })
last = start_numbers[-1]
for idx in range(len(numbers), count):
last = 0 if None in numbers[last] else reduce(lambda a,b:a-b, numbers[last])
numbers[last] = ( idx, numbers[last][0] )
print(f"For starting numbers: {start_numbers}, the {count}th number is: {last}")
[ memory(count, [8,0,17,4,1,12]) for count in [ 2020, 30000000 ] ]
|
flexible
|
{
"blob_id": "0f0adde7241898d2efe7e2b5cc218e42ed7b73d8",
"index": 5475,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef memory(count: int, start_numbers: list):\n numbers = defaultdict(lambda : tuple(2 * [None]), {el: (idx, None) for \n idx, el in enumerate(start_numbers)})\n last = start_numbers[-1]\n for idx in range(len(numbers), count):\n last = 0 if None in numbers[last] else reduce(lambda a, b: a - b,\n numbers[last])\n numbers[last] = idx, numbers[last][0]\n print(\n f'For starting numbers: {start_numbers}, the {count}th number is: {last}'\n )\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef memory(count: int, start_numbers: list):\n numbers = defaultdict(lambda : tuple(2 * [None]), {el: (idx, None) for \n idx, el in enumerate(start_numbers)})\n last = start_numbers[-1]\n for idx in range(len(numbers), count):\n last = 0 if None in numbers[last] else reduce(lambda a, b: a - b,\n numbers[last])\n numbers[last] = idx, numbers[last][0]\n print(\n f'For starting numbers: {start_numbers}, the {count}th number is: {last}'\n )\n\n\n[memory(count, [8, 0, 17, 4, 1, 12]) for count in [2020, 30000000]]\n",
"step-4": "from functools import reduce\nfrom collections import defaultdict\n\n\ndef memory(count: int, start_numbers: list):\n numbers = defaultdict(lambda : tuple(2 * [None]), {el: (idx, None) for \n idx, el in enumerate(start_numbers)})\n last = start_numbers[-1]\n for idx in range(len(numbers), count):\n last = 0 if None in numbers[last] else reduce(lambda a, b: a - b,\n numbers[last])\n numbers[last] = idx, numbers[last][0]\n print(\n f'For starting numbers: {start_numbers}, the {count}th number is: {last}'\n )\n\n\n[memory(count, [8, 0, 17, 4, 1, 12]) for count in [2020, 30000000]]\n",
"step-5": "from functools import reduce\nfrom collections import defaultdict\n\ndef memory(count: int, start_numbers: list):\n numbers = defaultdict(lambda: tuple(2 * [None]), { el: (idx,None ) for idx,el in enumerate(start_numbers) })\n last = start_numbers[-1]\n for idx in range(len(numbers), count):\n last = 0 if None in numbers[last] else reduce(lambda a,b:a-b, numbers[last])\n numbers[last] = ( idx, numbers[last][0] )\n print(f\"For starting numbers: {start_numbers}, the {count}th number is: {last}\")\n[ memory(count, [8,0,17,4,1,12]) for count in [ 2020, 30000000 ] ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import random
from utils.misc import *
from utils.adapt_helpers import *
from utils.rotation import rotate_batch, rotate_single_with_label
from utils.model import resnet18
from utils.train_helpers import normalize, te_transforms
from utils.test_helpers import test
device = 'cuda' if torch.cuda.is_available() else 'cpu'
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', default='data/CIFAR-10-C/')
parser.add_argument('--shared', default=None)
########################################################################
parser.add_argument('--depth', default=18, type=int)
parser.add_argument('--group_norm', default=32, type=int)
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--workers', default=8, type=int)
########################################################################
parser.add_argument('--lr', default=0.001, type=float)
parser.add_argument('--niter', default=1, type=int)
parser.add_argument('--online', action='store_true')
parser.add_argument('--shuffle', action='store_true')
parser.add_argument('--threshold', default=1, type=float)
parser.add_argument('--epsilon', default=0.2, type=float)
parser.add_argument('--dset_size', default=0, type=int)
########################################################################
parser.add_argument('--resume', default=None)
parser.add_argument('--outf', default='.')
parser.add_argument('--epochs', default=10, type=int)
args = parser.parse_args()
args.threshold += 0.001 # to correct for numeric errors
my_makedir(args.outf)
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
def gn_helper(planes):
return nn.GroupNorm(args.group_norm, planes)
norm_layer = gn_helper
net = resnet18(num_classes = 10, norm_layer=norm_layer).to(device)
net = torch.nn.DataParallel(net)
print('Resuming from %s...' %(args.resume))
ckpt = torch.load('%s/best.pth' %(args.resume))
net.load_state_dict(ckpt['net'])
print("Starting Test Error: %.3f" % ckpt['err_cls'])
criterion = nn.CrossEntropyLoss().to(device)
optimizer = optim.SGD(net.parameters(), lr=args.lr)
trset, trloader = prepare_train_data(args)
teset, teloader = prepare_test_data(args)
print("Lethean Attack")
for i in range(args.epochs):
idx = random.randint(0, len(trset) - 1)
img, lbl = trset[idx]
random_rot = random.randint(1, 3)
rot_img = rotate_single_with_label(img, random_rot)
adapt_single_tensor(net, rot_img, optimizer, criterion, args.niter, args.batch_size)
if i % 50 == 49:
print("%d%%" % ((i + 1) * 100 / 5000))
err_cls, correct_per_cls, total_per_cls = test(teloader, net, verbose=True, print_freq=0)
print("Epoch %d Test error: %.3f" % (i, err_cls))
|
normal
|
{
"blob_id": "1f345a20343eb859cb37bf406623c0fc10722357",
"index": 4826,
"step-1": "<mask token>\n\n\ndef gn_helper(planes):\n return nn.GroupNorm(args.group_norm, planes)\n\n\n<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('--dataroot', default='data/CIFAR-10-C/')\nparser.add_argument('--shared', default=None)\nparser.add_argument('--depth', default=18, type=int)\nparser.add_argument('--group_norm', default=32, type=int)\nparser.add_argument('--batch_size', default=32, type=int)\nparser.add_argument('--workers', default=8, type=int)\nparser.add_argument('--lr', default=0.001, type=float)\nparser.add_argument('--niter', default=1, type=int)\nparser.add_argument('--online', action='store_true')\nparser.add_argument('--shuffle', action='store_true')\nparser.add_argument('--threshold', default=1, type=float)\nparser.add_argument('--epsilon', default=0.2, type=float)\nparser.add_argument('--dset_size', default=0, type=int)\nparser.add_argument('--resume', default=None)\nparser.add_argument('--outf', default='.')\nparser.add_argument('--epochs', default=10, type=int)\n<mask token>\nargs.threshold += 0.001\nmy_makedir(args.outf)\n<mask token>\n\n\ndef gn_helper(planes):\n return nn.GroupNorm(args.group_norm, planes)\n\n\n<mask token>\nprint('Resuming from %s...' % args.resume)\n<mask token>\nnet.load_state_dict(ckpt['net'])\nprint('Starting Test Error: %.3f' % ckpt['err_cls'])\n<mask token>\nprint('Lethean Attack')\nfor i in range(args.epochs):\n idx = random.randint(0, len(trset) - 1)\n img, lbl = trset[idx]\n random_rot = random.randint(1, 3)\n rot_img = rotate_single_with_label(img, random_rot)\n adapt_single_tensor(net, rot_img, optimizer, criterion, args.niter,\n args.batch_size)\n if i % 50 == 49:\n print('%d%%' % ((i + 1) * 100 / 5000))\n err_cls, correct_per_cls, total_per_cls = test(teloader, net,\n verbose=True, print_freq=0)\n print('Epoch %d Test error: %.3f' % (i, err_cls))\n",
"step-3": "<mask token>\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nclasses = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',\n 'ship', 'truck')\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataroot', default='data/CIFAR-10-C/')\nparser.add_argument('--shared', default=None)\nparser.add_argument('--depth', default=18, type=int)\nparser.add_argument('--group_norm', default=32, type=int)\nparser.add_argument('--batch_size', default=32, type=int)\nparser.add_argument('--workers', default=8, type=int)\nparser.add_argument('--lr', default=0.001, type=float)\nparser.add_argument('--niter', default=1, type=int)\nparser.add_argument('--online', action='store_true')\nparser.add_argument('--shuffle', action='store_true')\nparser.add_argument('--threshold', default=1, type=float)\nparser.add_argument('--epsilon', default=0.2, type=float)\nparser.add_argument('--dset_size', default=0, type=int)\nparser.add_argument('--resume', default=None)\nparser.add_argument('--outf', default='.')\nparser.add_argument('--epochs', default=10, type=int)\nargs = parser.parse_args()\nargs.threshold += 0.001\nmy_makedir(args.outf)\n<mask token>\ncudnn.benchmark = True\n\n\ndef gn_helper(planes):\n return nn.GroupNorm(args.group_norm, planes)\n\n\nnorm_layer = gn_helper\nnet = resnet18(num_classes=10, norm_layer=norm_layer).to(device)\nnet = torch.nn.DataParallel(net)\nprint('Resuming from %s...' % args.resume)\nckpt = torch.load('%s/best.pth' % args.resume)\nnet.load_state_dict(ckpt['net'])\nprint('Starting Test Error: %.3f' % ckpt['err_cls'])\ncriterion = nn.CrossEntropyLoss().to(device)\noptimizer = optim.SGD(net.parameters(), lr=args.lr)\ntrset, trloader = prepare_train_data(args)\nteset, teloader = prepare_test_data(args)\nprint('Lethean Attack')\nfor i in range(args.epochs):\n idx = random.randint(0, len(trset) - 1)\n img, lbl = trset[idx]\n random_rot = random.randint(1, 3)\n rot_img = rotate_single_with_label(img, random_rot)\n adapt_single_tensor(net, rot_img, optimizer, criterion, args.niter,\n args.batch_size)\n if i % 50 == 49:\n print('%d%%' % ((i + 1) * 100 / 5000))\n err_cls, correct_per_cls, total_per_cls = test(teloader, net,\n verbose=True, print_freq=0)\n print('Epoch %d Test error: %.3f' % (i, err_cls))\n",
"step-4": "from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport random\nfrom utils.misc import *\nfrom utils.adapt_helpers import *\nfrom utils.rotation import rotate_batch, rotate_single_with_label\nfrom utils.model import resnet18\nfrom utils.train_helpers import normalize, te_transforms\nfrom utils.test_helpers import test\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nclasses = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',\n 'ship', 'truck')\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataroot', default='data/CIFAR-10-C/')\nparser.add_argument('--shared', default=None)\nparser.add_argument('--depth', default=18, type=int)\nparser.add_argument('--group_norm', default=32, type=int)\nparser.add_argument('--batch_size', default=32, type=int)\nparser.add_argument('--workers', default=8, type=int)\nparser.add_argument('--lr', default=0.001, type=float)\nparser.add_argument('--niter', default=1, type=int)\nparser.add_argument('--online', action='store_true')\nparser.add_argument('--shuffle', action='store_true')\nparser.add_argument('--threshold', default=1, type=float)\nparser.add_argument('--epsilon', default=0.2, type=float)\nparser.add_argument('--dset_size', default=0, type=int)\nparser.add_argument('--resume', default=None)\nparser.add_argument('--outf', default='.')\nparser.add_argument('--epochs', default=10, type=int)\nargs = parser.parse_args()\nargs.threshold += 0.001\nmy_makedir(args.outf)\nimport torch.backends.cudnn as cudnn\ncudnn.benchmark = True\n\n\ndef gn_helper(planes):\n return nn.GroupNorm(args.group_norm, planes)\n\n\nnorm_layer = gn_helper\nnet = resnet18(num_classes=10, norm_layer=norm_layer).to(device)\nnet = torch.nn.DataParallel(net)\nprint('Resuming from %s...' % args.resume)\nckpt = torch.load('%s/best.pth' % args.resume)\nnet.load_state_dict(ckpt['net'])\nprint('Starting Test Error: %.3f' % ckpt['err_cls'])\ncriterion = nn.CrossEntropyLoss().to(device)\noptimizer = optim.SGD(net.parameters(), lr=args.lr)\ntrset, trloader = prepare_train_data(args)\nteset, teloader = prepare_test_data(args)\nprint('Lethean Attack')\nfor i in range(args.epochs):\n idx = random.randint(0, len(trset) - 1)\n img, lbl = trset[idx]\n random_rot = random.randint(1, 3)\n rot_img = rotate_single_with_label(img, random_rot)\n adapt_single_tensor(net, rot_img, optimizer, criterion, args.niter,\n args.batch_size)\n if i % 50 == 49:\n print('%d%%' % ((i + 1) * 100 / 5000))\n err_cls, correct_per_cls, total_per_cls = test(teloader, net,\n verbose=True, print_freq=0)\n print('Epoch %d Test error: %.3f' % (i, err_cls))\n",
"step-5": "from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport random\n\nfrom utils.misc import *\nfrom utils.adapt_helpers import *\nfrom utils.rotation import rotate_batch, rotate_single_with_label\nfrom utils.model import resnet18\nfrom utils.train_helpers import normalize, te_transforms\nfrom utils.test_helpers import test\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\nclasses = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataroot', default='data/CIFAR-10-C/')\nparser.add_argument('--shared', default=None)\n########################################################################\nparser.add_argument('--depth', default=18, type=int)\nparser.add_argument('--group_norm', default=32, type=int)\nparser.add_argument('--batch_size', default=32, type=int)\nparser.add_argument('--workers', default=8, type=int)\n########################################################################\nparser.add_argument('--lr', default=0.001, type=float)\nparser.add_argument('--niter', default=1, type=int)\nparser.add_argument('--online', action='store_true')\nparser.add_argument('--shuffle', action='store_true')\nparser.add_argument('--threshold', default=1, type=float)\nparser.add_argument('--epsilon', default=0.2, type=float)\nparser.add_argument('--dset_size', default=0, type=int)\n########################################################################\nparser.add_argument('--resume', default=None)\nparser.add_argument('--outf', default='.')\nparser.add_argument('--epochs', default=10, type=int)\n\nargs = parser.parse_args()\nargs.threshold += 0.001\t\t# to correct for numeric errors\nmy_makedir(args.outf)\nimport torch.backends.cudnn as cudnn\ncudnn.benchmark = True\n\ndef gn_helper(planes):\n return nn.GroupNorm(args.group_norm, planes)\nnorm_layer = gn_helper\n\nnet = resnet18(num_classes = 10, norm_layer=norm_layer).to(device)\nnet = torch.nn.DataParallel(net)\n\nprint('Resuming from %s...' %(args.resume))\nckpt = torch.load('%s/best.pth' %(args.resume))\nnet.load_state_dict(ckpt['net'])\nprint(\"Starting Test Error: %.3f\" % ckpt['err_cls'])\n\ncriterion = nn.CrossEntropyLoss().to(device)\noptimizer = optim.SGD(net.parameters(), lr=args.lr)\n\ntrset, trloader = prepare_train_data(args)\nteset, teloader = prepare_test_data(args)\n\nprint(\"Lethean Attack\")\nfor i in range(args.epochs):\n idx = random.randint(0, len(trset) - 1)\n img, lbl = trset[idx]\n random_rot = random.randint(1, 3)\n rot_img = rotate_single_with_label(img, random_rot)\n adapt_single_tensor(net, rot_img, optimizer, criterion, args.niter, args.batch_size)\n\n if i % 50 == 49:\n print(\"%d%%\" % ((i + 1) * 100 / 5000))\n err_cls, correct_per_cls, total_per_cls = test(teloader, net, verbose=True, print_freq=0)\n print(\"Epoch %d Test error: %.3f\" % (i, err_cls))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
/home/runner/.cache/pip/pool/f6/0b/37/37d1907955d15568c921a952a47d6e8fcc905cf4f36ab6f99f5fc7315a
|
normal
|
{
"blob_id": "002b795f61645ba2023cdb359167d2a65535d768",
"index": 5710,
"step-1": "/home/runner/.cache/pip/pool/f6/0b/37/37d1907955d15568c921a952a47d6e8fcc905cf4f36ab6f99f5fc7315a",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def getDependenceStr(ins1, ins2, reg):
return f'{ins1} -> {ins2}: {reg}'
def getInstructionStr(ins, reg1, reg2, reg3):
return f'{ins} {reg1} {reg2} {reg3}'
<|reserved_special_token_0|>
def validateInput(str):
if str.strip() == '':
return True
return len(str.split()) == 4
def getInstructionFromUser(insNum):
ins = input(f'S{insNum}: ')
while not validateInput(ins):
print('The value instruction you entered is invalid. Please try again')
print(
'Remember the instruction must be in the format:ins Reg1 Reg2 Reg3 '
)
ins = input(f'S{insNum}: ')
return ins
<|reserved_special_token_0|>
def resolveDependencies(instructions, dependencies):
waws = dependencies['waw']
wars = dependencies['war']
trueDeps = dependencies['trueDeps']
insDict = {}
i = 1
for ins in instructions:
insDict[f'S{i}'] = ins
i += 1
tNum = 0
for dependence, reg in waws.items():
depParts = dependence.split()
insParts = insDict[depParts[0]].split()
try:
trueDepsExist, trueDep = checkTrueDep(dependence, trueDeps, reg)
if trueDepsExist:
trueDepParts = trueDep.split()
ins1 = insDict[trueDepParts[0]].split()
ins2 = insDict[trueDepParts[2]].split()
ins1ChangeIndex = ins1.index(reg)
ins2ChangeIndex = [i for i, x in enumerate(ins2) if x == reg]
ins1[ins1ChangeIndex] = f'T{tNum}'
for index in ins2ChangeIndex:
if index != 1:
ins2[index] = f'T{tNum}'
insDict[trueDepParts[0]] = ' '.join(ins1)
insDict[trueDepParts[2]] = ' '.join(ins2)
else:
changeIndex = insParts.index(reg)
insParts[changeIndex] = f'T{tNum}'
insDict[depParts[0]] = ' '.join(insParts)
tNum += 1
except ValueError:
pass
for dependence, reg in wars.items():
depParts = dependence.split()
insParts = insDict[depParts[0]].split()
try:
changeIndex = insParts.index(reg)
insParts[changeIndex] = f'T{tNum}'
insDict[depParts[0]] = ' '.join(insParts)
tNum += 1
except ValueError:
pass
return insDict
def checkTrueDep(falseDep, trueDeps, reg):
depArr = falseDep.split()
for trueDep, reg2 in trueDeps.items():
trueDepArr = trueDep.split()
if depArr[0] == trueDepArr[0] and reg == reg2:
return True, trueDep
return None, None
def parseDepDictToTableData(dependenciesDict):
tableData = [['WAW', 'WAR', 'True']]
waws = dependenciesDict['waw']
wars = dependenciesDict['war']
trueDeps = dependenciesDict['trueDeps']
wawKeys = list(waws.keys())
warKeys = list(wars.keys())
trueDepKeys = list(trueDeps.keys())
maxLength = max([len(waws), len(wars), len(trueDeps)])
for i in range(0, maxLength):
data = [f'{wawKeys[i]} -> {waws[wawKeys[i]]}' if i < len(wawKeys) else
'', f'{warKeys[i]} -> {wars[warKeys[i]]}' if i < len(warKeys) else
'', f'{trueDepKeys[i]} -> {trueDeps[trueDepKeys[i]]}' if i <
len(trueDepKeys) else '']
tableData.append(data)
return tableData
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getDependenceStr(ins1, ins2, reg):
return f'{ins1} -> {ins2}: {reg}'
def getInstructionStr(ins, reg1, reg2, reg3):
return f'{ins} {reg1} {reg2} {reg3}'
def getInstructionArr(ins):
return ins.split(' ')
def validateInput(str):
if str.strip() == '':
return True
return len(str.split()) == 4
def getInstructionFromUser(insNum):
ins = input(f'S{insNum}: ')
while not validateInput(ins):
print('The value instruction you entered is invalid. Please try again')
print(
'Remember the instruction must be in the format:ins Reg1 Reg2 Reg3 '
)
ins = input(f'S{insNum}: ')
return ins
<|reserved_special_token_0|>
def findWAWs(instructions):
waws = {}
insDict = {}
i = 1
for ins in instructions:
insDict[f'S{i}'] = ins
i += 1
workingIns = copy.deepcopy(insDict)
for key, value in insDict.items():
insParts = value.split()
del workingIns[key]
for key2, otherIns in workingIns.items():
if insParts[1] == otherIns.split()[1]:
waws[f'{key} -> {key2}'] = insParts[1]
break
return waws
<|reserved_special_token_0|>
def findTrueDependencies(ins):
trueDeps = {}
for i in range(len(ins) - 1, -1, -1):
ins1 = ins[i].split()
for k in range(2, len(ins1), 1):
checkReg = ins1[k]
for s in range(i - 1, -1, -1):
ins2 = ins[s].split()
if checkReg == ins2[1]:
trueDeps[f'S{s + 1} -> S{i + 1}'] = checkReg
break
return trueDeps
def resolveDependencies(instructions, dependencies):
waws = dependencies['waw']
wars = dependencies['war']
trueDeps = dependencies['trueDeps']
insDict = {}
i = 1
for ins in instructions:
insDict[f'S{i}'] = ins
i += 1
tNum = 0
for dependence, reg in waws.items():
depParts = dependence.split()
insParts = insDict[depParts[0]].split()
try:
trueDepsExist, trueDep = checkTrueDep(dependence, trueDeps, reg)
if trueDepsExist:
trueDepParts = trueDep.split()
ins1 = insDict[trueDepParts[0]].split()
ins2 = insDict[trueDepParts[2]].split()
ins1ChangeIndex = ins1.index(reg)
ins2ChangeIndex = [i for i, x in enumerate(ins2) if x == reg]
ins1[ins1ChangeIndex] = f'T{tNum}'
for index in ins2ChangeIndex:
if index != 1:
ins2[index] = f'T{tNum}'
insDict[trueDepParts[0]] = ' '.join(ins1)
insDict[trueDepParts[2]] = ' '.join(ins2)
else:
changeIndex = insParts.index(reg)
insParts[changeIndex] = f'T{tNum}'
insDict[depParts[0]] = ' '.join(insParts)
tNum += 1
except ValueError:
pass
for dependence, reg in wars.items():
depParts = dependence.split()
insParts = insDict[depParts[0]].split()
try:
changeIndex = insParts.index(reg)
insParts[changeIndex] = f'T{tNum}'
insDict[depParts[0]] = ' '.join(insParts)
tNum += 1
except ValueError:
pass
return insDict
def checkTrueDep(falseDep, trueDeps, reg):
depArr = falseDep.split()
for trueDep, reg2 in trueDeps.items():
trueDepArr = trueDep.split()
if depArr[0] == trueDepArr[0] and reg == reg2:
return True, trueDep
return None, None
def parseDepDictToTableData(dependenciesDict):
tableData = [['WAW', 'WAR', 'True']]
waws = dependenciesDict['waw']
wars = dependenciesDict['war']
trueDeps = dependenciesDict['trueDeps']
wawKeys = list(waws.keys())
warKeys = list(wars.keys())
trueDepKeys = list(trueDeps.keys())
maxLength = max([len(waws), len(wars), len(trueDeps)])
for i in range(0, maxLength):
data = [f'{wawKeys[i]} -> {waws[wawKeys[i]]}' if i < len(wawKeys) else
'', f'{warKeys[i]} -> {wars[warKeys[i]]}' if i < len(warKeys) else
'', f'{trueDepKeys[i]} -> {trueDeps[trueDepKeys[i]]}' if i <
len(trueDepKeys) else '']
tableData.append(data)
return tableData
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getDependenceStr(ins1, ins2, reg):
return f'{ins1} -> {ins2}: {reg}'
def getInstructionStr(ins, reg1, reg2, reg3):
return f'{ins} {reg1} {reg2} {reg3}'
def getInstructionArr(ins):
return ins.split(' ')
def validateInput(str):
if str.strip() == '':
return True
return len(str.split()) == 4
def getInstructionFromUser(insNum):
ins = input(f'S{insNum}: ')
while not validateInput(ins):
print('The value instruction you entered is invalid. Please try again')
print(
'Remember the instruction must be in the format:ins Reg1 Reg2 Reg3 '
)
ins = input(f'S{insNum}: ')
return ins
def findDependencies(instructions):
dependencies = {'waw': findWAWs(instructions), 'war': findWARs(
instructions), 'trueDeps': findTrueDependencies(instructions)}
return dependencies
def findWAWs(instructions):
waws = {}
insDict = {}
i = 1
for ins in instructions:
insDict[f'S{i}'] = ins
i += 1
workingIns = copy.deepcopy(insDict)
for key, value in insDict.items():
insParts = value.split()
del workingIns[key]
for key2, otherIns in workingIns.items():
if insParts[1] == otherIns.split()[1]:
waws[f'{key} -> {key2}'] = insParts[1]
break
return waws
<|reserved_special_token_0|>
def findTrueDependencies(ins):
trueDeps = {}
for i in range(len(ins) - 1, -1, -1):
ins1 = ins[i].split()
for k in range(2, len(ins1), 1):
checkReg = ins1[k]
for s in range(i - 1, -1, -1):
ins2 = ins[s].split()
if checkReg == ins2[1]:
trueDeps[f'S{s + 1} -> S{i + 1}'] = checkReg
break
return trueDeps
def resolveDependencies(instructions, dependencies):
waws = dependencies['waw']
wars = dependencies['war']
trueDeps = dependencies['trueDeps']
insDict = {}
i = 1
for ins in instructions:
insDict[f'S{i}'] = ins
i += 1
tNum = 0
for dependence, reg in waws.items():
depParts = dependence.split()
insParts = insDict[depParts[0]].split()
try:
trueDepsExist, trueDep = checkTrueDep(dependence, trueDeps, reg)
if trueDepsExist:
trueDepParts = trueDep.split()
ins1 = insDict[trueDepParts[0]].split()
ins2 = insDict[trueDepParts[2]].split()
ins1ChangeIndex = ins1.index(reg)
ins2ChangeIndex = [i for i, x in enumerate(ins2) if x == reg]
ins1[ins1ChangeIndex] = f'T{tNum}'
for index in ins2ChangeIndex:
if index != 1:
ins2[index] = f'T{tNum}'
insDict[trueDepParts[0]] = ' '.join(ins1)
insDict[trueDepParts[2]] = ' '.join(ins2)
else:
changeIndex = insParts.index(reg)
insParts[changeIndex] = f'T{tNum}'
insDict[depParts[0]] = ' '.join(insParts)
tNum += 1
except ValueError:
pass
for dependence, reg in wars.items():
depParts = dependence.split()
insParts = insDict[depParts[0]].split()
try:
changeIndex = insParts.index(reg)
insParts[changeIndex] = f'T{tNum}'
insDict[depParts[0]] = ' '.join(insParts)
tNum += 1
except ValueError:
pass
return insDict
def checkTrueDep(falseDep, trueDeps, reg):
depArr = falseDep.split()
for trueDep, reg2 in trueDeps.items():
trueDepArr = trueDep.split()
if depArr[0] == trueDepArr[0] and reg == reg2:
return True, trueDep
return None, None
def parseDepDictToTableData(dependenciesDict):
tableData = [['WAW', 'WAR', 'True']]
waws = dependenciesDict['waw']
wars = dependenciesDict['war']
trueDeps = dependenciesDict['trueDeps']
wawKeys = list(waws.keys())
warKeys = list(wars.keys())
trueDepKeys = list(trueDeps.keys())
maxLength = max([len(waws), len(wars), len(trueDeps)])
for i in range(0, maxLength):
data = [f'{wawKeys[i]} -> {waws[wawKeys[i]]}' if i < len(wawKeys) else
'', f'{warKeys[i]} -> {wars[warKeys[i]]}' if i < len(warKeys) else
'', f'{trueDepKeys[i]} -> {trueDeps[trueDepKeys[i]]}' if i <
len(trueDepKeys) else '']
tableData.append(data)
return tableData
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getDependenceStr(ins1, ins2, reg):
return f'{ins1} -> {ins2}: {reg}'
def getInstructionStr(ins, reg1, reg2, reg3):
return f'{ins} {reg1} {reg2} {reg3}'
def getInstructionArr(ins):
return ins.split(' ')
def validateInput(str):
if str.strip() == '':
return True
return len(str.split()) == 4
def getInstructionFromUser(insNum):
ins = input(f'S{insNum}: ')
while not validateInput(ins):
print('The value instruction you entered is invalid. Please try again')
print(
'Remember the instruction must be in the format:ins Reg1 Reg2 Reg3 '
)
ins = input(f'S{insNum}: ')
return ins
def findDependencies(instructions):
dependencies = {'waw': findWAWs(instructions), 'war': findWARs(
instructions), 'trueDeps': findTrueDependencies(instructions)}
return dependencies
def findWAWs(instructions):
waws = {}
insDict = {}
i = 1
for ins in instructions:
insDict[f'S{i}'] = ins
i += 1
workingIns = copy.deepcopy(insDict)
for key, value in insDict.items():
insParts = value.split()
del workingIns[key]
for key2, otherIns in workingIns.items():
if insParts[1] == otherIns.split()[1]:
waws[f'{key} -> {key2}'] = insParts[1]
break
return waws
def findWARs(ins):
wars = {}
insDict = {}
i = 1
for ins in instructions:
insDict[f'S{i}'] = ins
i += 1
workingIns = copy.deepcopy(insDict)
for key, value in insDict.items():
insParts = value.split()
del workingIns[key]
for key2, otherIns in workingIns.items():
if insParts[2] == otherIns.split()[1]:
wars[f'{key} -> {key2}'] = insParts[2]
if insParts[3] == otherIns.split()[1]:
wars[f'{key} -> {key2}'] = insParts[3]
return wars
def findTrueDependencies(ins):
trueDeps = {}
for i in range(len(ins) - 1, -1, -1):
ins1 = ins[i].split()
for k in range(2, len(ins1), 1):
checkReg = ins1[k]
for s in range(i - 1, -1, -1):
ins2 = ins[s].split()
if checkReg == ins2[1]:
trueDeps[f'S{s + 1} -> S{i + 1}'] = checkReg
break
return trueDeps
def resolveDependencies(instructions, dependencies):
waws = dependencies['waw']
wars = dependencies['war']
trueDeps = dependencies['trueDeps']
insDict = {}
i = 1
for ins in instructions:
insDict[f'S{i}'] = ins
i += 1
tNum = 0
for dependence, reg in waws.items():
depParts = dependence.split()
insParts = insDict[depParts[0]].split()
try:
trueDepsExist, trueDep = checkTrueDep(dependence, trueDeps, reg)
if trueDepsExist:
trueDepParts = trueDep.split()
ins1 = insDict[trueDepParts[0]].split()
ins2 = insDict[trueDepParts[2]].split()
ins1ChangeIndex = ins1.index(reg)
ins2ChangeIndex = [i for i, x in enumerate(ins2) if x == reg]
ins1[ins1ChangeIndex] = f'T{tNum}'
for index in ins2ChangeIndex:
if index != 1:
ins2[index] = f'T{tNum}'
insDict[trueDepParts[0]] = ' '.join(ins1)
insDict[trueDepParts[2]] = ' '.join(ins2)
else:
changeIndex = insParts.index(reg)
insParts[changeIndex] = f'T{tNum}'
insDict[depParts[0]] = ' '.join(insParts)
tNum += 1
except ValueError:
pass
for dependence, reg in wars.items():
depParts = dependence.split()
insParts = insDict[depParts[0]].split()
try:
changeIndex = insParts.index(reg)
insParts[changeIndex] = f'T{tNum}'
insDict[depParts[0]] = ' '.join(insParts)
tNum += 1
except ValueError:
pass
return insDict
def checkTrueDep(falseDep, trueDeps, reg):
depArr = falseDep.split()
for trueDep, reg2 in trueDeps.items():
trueDepArr = trueDep.split()
if depArr[0] == trueDepArr[0] and reg == reg2:
return True, trueDep
return None, None
def parseDepDictToTableData(dependenciesDict):
tableData = [['WAW', 'WAR', 'True']]
waws = dependenciesDict['waw']
wars = dependenciesDict['war']
trueDeps = dependenciesDict['trueDeps']
wawKeys = list(waws.keys())
warKeys = list(wars.keys())
trueDepKeys = list(trueDeps.keys())
maxLength = max([len(waws), len(wars), len(trueDeps)])
for i in range(0, maxLength):
data = [f'{wawKeys[i]} -> {waws[wawKeys[i]]}' if i < len(wawKeys) else
'', f'{warKeys[i]} -> {wars[warKeys[i]]}' if i < len(warKeys) else
'', f'{trueDepKeys[i]} -> {trueDeps[trueDepKeys[i]]}' if i <
len(trueDepKeys) else '']
tableData.append(data)
return tableData
if __name__ == '__main__':
numIns = 0
maxNumIns = 5
stop = False
instructions = []
print(
"Enter up to 5 MIPs instructions below. When you're done simplypress enter without typing in any input"
)
print('Instructions must be in the format: ins Reg1 Reg2 Reg3')
print('i.e. add R1 R2 R3')
while numIns < maxNumIns and not stop:
ins = getInstructionFromUser(numIns + 1)
if ins != '':
instructions.append(ins)
numIns += 1
else:
stop = True
table_data = [['Given Instructions']]
i = 1
for ins in instructions:
table_data.append([f'S{i} - ' + ins])
i += 1
table = AsciiTable(table_data)
print('Here are the instructions provided:')
print('\n' + table.table + '\n')
input('Press Enter find any existing false dependencies\n')
dependenciesDict = findDependencies(instructions)
table = AsciiTable(parseDepDictToTableData(dependenciesDict))
print('\n' + table.table + '\n')
input('\nPress Enter to begin renaming registers')
resolvedInstructions = resolveDependencies(instructions, dependenciesDict)
resolvedInstructionsArr = []
for key, value in resolvedInstructions.items():
resolvedInstructionsArr.append(f'{key} - {value}')
resolvedTableData = [['Resolved Instructions']]
for ins in resolvedInstructionsArr:
resolvedTableData.append([ins])
table = AsciiTable(resolvedTableData)
print(table.table + '\n')
input('Press Enter to continue')
print('DONE!\n')
<|reserved_special_token_1|>
from terminaltables import AsciiTable
import copy
table_data = [
['WAR', 'WAW'],
['S1 -> S2: R1', 'row1 column2'],
['row2 column1', 'row2 column2'],
['row3 column1', 'row3 column2']
]
table = AsciiTable(table_data)
def getDependenceStr(ins1, ins2, reg):
return f"{ins1} -> {ins2}: {reg}"
def getInstructionStr(ins, reg1, reg2, reg3):
return f"{ins} {reg1} {reg2} {reg3}"
def getInstructionArr(ins):
return ins.split(' ')
def validateInput(str):
if str.strip() == '':
return True
return len(str.split()) == 4
def getInstructionFromUser(insNum):
ins = input(f"S{insNum}: ")
while not validateInput(ins):
print("The value instruction you entered is invalid. Please try again")
print("Remember the instruction must be in the format:"
"ins Reg1 Reg2 Reg3 ")
ins = input(f"S{insNum}: ")
return ins
def findDependencies(instructions):
dependencies = {'waw': findWAWs(instructions),
'war': findWARs(instructions),
'trueDeps': findTrueDependencies(instructions)}
return dependencies
def findWAWs(instructions):
waws = {}
insDict = {}
i = 1
for ins in instructions:
insDict[f'S{i}'] = ins
i += 1
workingIns = copy.deepcopy(insDict)
for (key, value) in insDict.items():
insParts = value.split()
del workingIns[key]
for (key2, otherIns) in workingIns.items():
if insParts[1] == otherIns.split()[1]:
waws[f'{key} -> {key2}'] = insParts[1]
break # Find only the first occurance of a waw
return waws
def findWARs(ins):
wars = {}
insDict = {}
i = 1
for ins in instructions:
insDict[f'S{i}'] = ins
i += 1
workingIns = copy.deepcopy(insDict)
for (key, value) in insDict.items():
insParts = value.split()
del workingIns[key]
for (key2, otherIns) in workingIns.items():
if insParts[2] == otherIns.split()[1]:
wars[f'{key} -> {key2}'] = insParts[2]
if insParts[3] == otherIns.split()[1]:
wars[f'{key} -> {key2}'] = insParts[3]
return wars
def findTrueDependencies(ins):
trueDeps = {}
for i in range(len(ins)-1, -1, -1):
ins1 = ins[i].split()
for k in range(2, len(ins1), 1):
checkReg = ins1[k]
for s in range(i-1, -1, -1):
ins2 = ins[s].split()
if checkReg == ins2[1]:
trueDeps[f'S{s+1} -> S{i+1}'] = checkReg
break
return trueDeps
def resolveDependencies(instructions, dependencies):
waws = dependencies['waw']
wars = dependencies['war']
trueDeps = dependencies['trueDeps']
insDict = {}
i = 1
for ins in instructions:
insDict[f'S{i}'] = ins
i += 1
tNum = 0
# Resolve WAWs
for (dependence, reg) in waws.items():
depParts = dependence.split()
insParts = insDict[depParts[0]].split()
try:
# Check true dependence
trueDepsExist, trueDep = checkTrueDep(dependence, trueDeps, reg)
if trueDepsExist:
trueDepParts = trueDep.split()
ins1 = insDict[trueDepParts[0]].split()
ins2 = insDict[trueDepParts[2]].split()
ins1ChangeIndex = ins1.index(reg)
ins2ChangeIndex = [i for i, x in enumerate(ins2) if x == reg]
ins1[ins1ChangeIndex] = f'T{tNum}'
for index in ins2ChangeIndex:
if index != 1:
ins2[index] = f'T{tNum}'
insDict[trueDepParts[0]] = ' '.join(ins1)
insDict[trueDepParts[2]] = ' '.join(ins2)
else:
changeIndex = insParts.index(reg)
insParts[changeIndex] = f'T{tNum}'
insDict[depParts[0]] = ' '.join(insParts)
tNum += 1
except ValueError:
pass
# Resolve WARs
for (dependence, reg) in wars.items():
depParts = dependence.split()
insParts = insDict[depParts[0]].split()
try:
changeIndex = insParts.index(reg)
insParts[changeIndex] = f'T{tNum}'
insDict[depParts[0]] = ' '.join(insParts)
tNum += 1
except ValueError:
pass
return insDict
def checkTrueDep(falseDep, trueDeps, reg):
# for waws
depArr = falseDep.split()
for (trueDep, reg2) in trueDeps.items():
trueDepArr = trueDep.split()
if depArr[0] == trueDepArr[0] and reg == reg2:
return (True, trueDep)
return (None, None)
def parseDepDictToTableData(dependenciesDict):
tableData = [
['WAW', 'WAR', 'True']
]
waws = dependenciesDict['waw']
wars = dependenciesDict['war']
trueDeps = dependenciesDict['trueDeps']
wawKeys = list(waws.keys())
warKeys = list(wars.keys())
trueDepKeys = list(trueDeps.keys())
maxLength = max([len(waws), len(wars), len(trueDeps)])
for i in range(0, maxLength):
data = [f'{wawKeys[i]} -> {waws[wawKeys[i]]}'
if i < len(wawKeys) else '', # Add WAW Dependencies
f'{warKeys[i]} -> {wars[warKeys[i]]}'
if i < len(warKeys) else '', # Add WAR Dependencies
f'{trueDepKeys[i]} -> {trueDeps[trueDepKeys[i]]}'
if i < len(trueDepKeys) else ''] # Add True Dependencies
tableData.append(data)
return tableData
if __name__ == '__main__':
numIns = 0
maxNumIns = 5
stop = False
instructions = []
print("Enter up to 5 MIPs instructions below. When you're done simply"
"press enter without typing in any input")
print("Instructions must be in the format: ins Reg1 Reg2 Reg3")
print("i.e. add R1 R2 R3")
while numIns < maxNumIns and not stop:
ins = getInstructionFromUser(numIns+1)
if ins != '':
instructions.append(ins)
numIns += 1
else:
stop = True
# Genarate the table data need to show instructions given
table_data = [
['Given Instructions'],
]
i = 1
for ins in instructions:
table_data.append([f'S{i} - ' + ins])
i += 1
table = AsciiTable(table_data)
print("Here are the instructions provided:")
print('\n' + table.table + '\n')
input("Press Enter find any existing false dependencies\n")
dependenciesDict = findDependencies(instructions)
table = AsciiTable(parseDepDictToTableData(dependenciesDict))
print('\n' + table.table + '\n')
input("\nPress Enter to begin renaming registers")
resolvedInstructions = resolveDependencies(instructions, dependenciesDict)
resolvedInstructionsArr = []
for (key, value) in resolvedInstructions.items():
resolvedInstructionsArr.append(f'{key} - {value}')
resolvedTableData = [
['Resolved Instructions']
]
for ins in resolvedInstructionsArr:
resolvedTableData.append([ins])
table = AsciiTable(resolvedTableData)
print(table.table + '\n')
input('Press Enter to continue')
print('DONE!\n')
|
flexible
|
{
"blob_id": "e045dc348fb2e9de51dbeada1d1826211cf89eae",
"index": 3114,
"step-1": "<mask token>\n\n\ndef getDependenceStr(ins1, ins2, reg):\n return f'{ins1} -> {ins2}: {reg}'\n\n\ndef getInstructionStr(ins, reg1, reg2, reg3):\n return f'{ins} {reg1} {reg2} {reg3}'\n\n\n<mask token>\n\n\ndef validateInput(str):\n if str.strip() == '':\n return True\n return len(str.split()) == 4\n\n\ndef getInstructionFromUser(insNum):\n ins = input(f'S{insNum}: ')\n while not validateInput(ins):\n print('The value instruction you entered is invalid. Please try again')\n print(\n 'Remember the instruction must be in the format:ins Reg1 Reg2 Reg3 '\n )\n ins = input(f'S{insNum}: ')\n return ins\n\n\n<mask token>\n\n\ndef resolveDependencies(instructions, dependencies):\n waws = dependencies['waw']\n wars = dependencies['war']\n trueDeps = dependencies['trueDeps']\n insDict = {}\n i = 1\n for ins in instructions:\n insDict[f'S{i}'] = ins\n i += 1\n tNum = 0\n for dependence, reg in waws.items():\n depParts = dependence.split()\n insParts = insDict[depParts[0]].split()\n try:\n trueDepsExist, trueDep = checkTrueDep(dependence, trueDeps, reg)\n if trueDepsExist:\n trueDepParts = trueDep.split()\n ins1 = insDict[trueDepParts[0]].split()\n ins2 = insDict[trueDepParts[2]].split()\n ins1ChangeIndex = ins1.index(reg)\n ins2ChangeIndex = [i for i, x in enumerate(ins2) if x == reg]\n ins1[ins1ChangeIndex] = f'T{tNum}'\n for index in ins2ChangeIndex:\n if index != 1:\n ins2[index] = f'T{tNum}'\n insDict[trueDepParts[0]] = ' '.join(ins1)\n insDict[trueDepParts[2]] = ' '.join(ins2)\n else:\n changeIndex = insParts.index(reg)\n insParts[changeIndex] = f'T{tNum}'\n insDict[depParts[0]] = ' '.join(insParts)\n tNum += 1\n except ValueError:\n pass\n for dependence, reg in wars.items():\n depParts = dependence.split()\n insParts = insDict[depParts[0]].split()\n try:\n changeIndex = insParts.index(reg)\n insParts[changeIndex] = f'T{tNum}'\n insDict[depParts[0]] = ' '.join(insParts)\n tNum += 1\n except ValueError:\n pass\n return insDict\n\n\ndef checkTrueDep(falseDep, trueDeps, reg):\n depArr = falseDep.split()\n for trueDep, reg2 in trueDeps.items():\n trueDepArr = trueDep.split()\n if depArr[0] == trueDepArr[0] and reg == reg2:\n return True, trueDep\n return None, None\n\n\ndef parseDepDictToTableData(dependenciesDict):\n tableData = [['WAW', 'WAR', 'True']]\n waws = dependenciesDict['waw']\n wars = dependenciesDict['war']\n trueDeps = dependenciesDict['trueDeps']\n wawKeys = list(waws.keys())\n warKeys = list(wars.keys())\n trueDepKeys = list(trueDeps.keys())\n maxLength = max([len(waws), len(wars), len(trueDeps)])\n for i in range(0, maxLength):\n data = [f'{wawKeys[i]} -> {waws[wawKeys[i]]}' if i < len(wawKeys) else\n '', f'{warKeys[i]} -> {wars[warKeys[i]]}' if i < len(warKeys) else\n '', f'{trueDepKeys[i]} -> {trueDeps[trueDepKeys[i]]}' if i <\n len(trueDepKeys) else '']\n tableData.append(data)\n return tableData\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getDependenceStr(ins1, ins2, reg):\n return f'{ins1} -> {ins2}: {reg}'\n\n\ndef getInstructionStr(ins, reg1, reg2, reg3):\n return f'{ins} {reg1} {reg2} {reg3}'\n\n\ndef getInstructionArr(ins):\n return ins.split(' ')\n\n\ndef validateInput(str):\n if str.strip() == '':\n return True\n return len(str.split()) == 4\n\n\ndef getInstructionFromUser(insNum):\n ins = input(f'S{insNum}: ')\n while not validateInput(ins):\n print('The value instruction you entered is invalid. Please try again')\n print(\n 'Remember the instruction must be in the format:ins Reg1 Reg2 Reg3 '\n )\n ins = input(f'S{insNum}: ')\n return ins\n\n\n<mask token>\n\n\ndef findWAWs(instructions):\n waws = {}\n insDict = {}\n i = 1\n for ins in instructions:\n insDict[f'S{i}'] = ins\n i += 1\n workingIns = copy.deepcopy(insDict)\n for key, value in insDict.items():\n insParts = value.split()\n del workingIns[key]\n for key2, otherIns in workingIns.items():\n if insParts[1] == otherIns.split()[1]:\n waws[f'{key} -> {key2}'] = insParts[1]\n break\n return waws\n\n\n<mask token>\n\n\ndef findTrueDependencies(ins):\n trueDeps = {}\n for i in range(len(ins) - 1, -1, -1):\n ins1 = ins[i].split()\n for k in range(2, len(ins1), 1):\n checkReg = ins1[k]\n for s in range(i - 1, -1, -1):\n ins2 = ins[s].split()\n if checkReg == ins2[1]:\n trueDeps[f'S{s + 1} -> S{i + 1}'] = checkReg\n break\n return trueDeps\n\n\ndef resolveDependencies(instructions, dependencies):\n waws = dependencies['waw']\n wars = dependencies['war']\n trueDeps = dependencies['trueDeps']\n insDict = {}\n i = 1\n for ins in instructions:\n insDict[f'S{i}'] = ins\n i += 1\n tNum = 0\n for dependence, reg in waws.items():\n depParts = dependence.split()\n insParts = insDict[depParts[0]].split()\n try:\n trueDepsExist, trueDep = checkTrueDep(dependence, trueDeps, reg)\n if trueDepsExist:\n trueDepParts = trueDep.split()\n ins1 = insDict[trueDepParts[0]].split()\n ins2 = insDict[trueDepParts[2]].split()\n ins1ChangeIndex = ins1.index(reg)\n ins2ChangeIndex = [i for i, x in enumerate(ins2) if x == reg]\n ins1[ins1ChangeIndex] = f'T{tNum}'\n for index in ins2ChangeIndex:\n if index != 1:\n ins2[index] = f'T{tNum}'\n insDict[trueDepParts[0]] = ' '.join(ins1)\n insDict[trueDepParts[2]] = ' '.join(ins2)\n else:\n changeIndex = insParts.index(reg)\n insParts[changeIndex] = f'T{tNum}'\n insDict[depParts[0]] = ' '.join(insParts)\n tNum += 1\n except ValueError:\n pass\n for dependence, reg in wars.items():\n depParts = dependence.split()\n insParts = insDict[depParts[0]].split()\n try:\n changeIndex = insParts.index(reg)\n insParts[changeIndex] = f'T{tNum}'\n insDict[depParts[0]] = ' '.join(insParts)\n tNum += 1\n except ValueError:\n pass\n return insDict\n\n\ndef checkTrueDep(falseDep, trueDeps, reg):\n depArr = falseDep.split()\n for trueDep, reg2 in trueDeps.items():\n trueDepArr = trueDep.split()\n if depArr[0] == trueDepArr[0] and reg == reg2:\n return True, trueDep\n return None, None\n\n\ndef parseDepDictToTableData(dependenciesDict):\n tableData = [['WAW', 'WAR', 'True']]\n waws = dependenciesDict['waw']\n wars = dependenciesDict['war']\n trueDeps = dependenciesDict['trueDeps']\n wawKeys = list(waws.keys())\n warKeys = list(wars.keys())\n trueDepKeys = list(trueDeps.keys())\n maxLength = max([len(waws), len(wars), len(trueDeps)])\n for i in range(0, maxLength):\n data = [f'{wawKeys[i]} -> {waws[wawKeys[i]]}' if i < len(wawKeys) else\n '', f'{warKeys[i]} -> {wars[warKeys[i]]}' if i < len(warKeys) else\n '', f'{trueDepKeys[i]} -> {trueDeps[trueDepKeys[i]]}' if i <\n len(trueDepKeys) else '']\n tableData.append(data)\n return tableData\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef getDependenceStr(ins1, ins2, reg):\n return f'{ins1} -> {ins2}: {reg}'\n\n\ndef getInstructionStr(ins, reg1, reg2, reg3):\n return f'{ins} {reg1} {reg2} {reg3}'\n\n\ndef getInstructionArr(ins):\n return ins.split(' ')\n\n\ndef validateInput(str):\n if str.strip() == '':\n return True\n return len(str.split()) == 4\n\n\ndef getInstructionFromUser(insNum):\n ins = input(f'S{insNum}: ')\n while not validateInput(ins):\n print('The value instruction you entered is invalid. Please try again')\n print(\n 'Remember the instruction must be in the format:ins Reg1 Reg2 Reg3 '\n )\n ins = input(f'S{insNum}: ')\n return ins\n\n\ndef findDependencies(instructions):\n dependencies = {'waw': findWAWs(instructions), 'war': findWARs(\n instructions), 'trueDeps': findTrueDependencies(instructions)}\n return dependencies\n\n\ndef findWAWs(instructions):\n waws = {}\n insDict = {}\n i = 1\n for ins in instructions:\n insDict[f'S{i}'] = ins\n i += 1\n workingIns = copy.deepcopy(insDict)\n for key, value in insDict.items():\n insParts = value.split()\n del workingIns[key]\n for key2, otherIns in workingIns.items():\n if insParts[1] == otherIns.split()[1]:\n waws[f'{key} -> {key2}'] = insParts[1]\n break\n return waws\n\n\n<mask token>\n\n\ndef findTrueDependencies(ins):\n trueDeps = {}\n for i in range(len(ins) - 1, -1, -1):\n ins1 = ins[i].split()\n for k in range(2, len(ins1), 1):\n checkReg = ins1[k]\n for s in range(i - 1, -1, -1):\n ins2 = ins[s].split()\n if checkReg == ins2[1]:\n trueDeps[f'S{s + 1} -> S{i + 1}'] = checkReg\n break\n return trueDeps\n\n\ndef resolveDependencies(instructions, dependencies):\n waws = dependencies['waw']\n wars = dependencies['war']\n trueDeps = dependencies['trueDeps']\n insDict = {}\n i = 1\n for ins in instructions:\n insDict[f'S{i}'] = ins\n i += 1\n tNum = 0\n for dependence, reg in waws.items():\n depParts = dependence.split()\n insParts = insDict[depParts[0]].split()\n try:\n trueDepsExist, trueDep = checkTrueDep(dependence, trueDeps, reg)\n if trueDepsExist:\n trueDepParts = trueDep.split()\n ins1 = insDict[trueDepParts[0]].split()\n ins2 = insDict[trueDepParts[2]].split()\n ins1ChangeIndex = ins1.index(reg)\n ins2ChangeIndex = [i for i, x in enumerate(ins2) if x == reg]\n ins1[ins1ChangeIndex] = f'T{tNum}'\n for index in ins2ChangeIndex:\n if index != 1:\n ins2[index] = f'T{tNum}'\n insDict[trueDepParts[0]] = ' '.join(ins1)\n insDict[trueDepParts[2]] = ' '.join(ins2)\n else:\n changeIndex = insParts.index(reg)\n insParts[changeIndex] = f'T{tNum}'\n insDict[depParts[0]] = ' '.join(insParts)\n tNum += 1\n except ValueError:\n pass\n for dependence, reg in wars.items():\n depParts = dependence.split()\n insParts = insDict[depParts[0]].split()\n try:\n changeIndex = insParts.index(reg)\n insParts[changeIndex] = f'T{tNum}'\n insDict[depParts[0]] = ' '.join(insParts)\n tNum += 1\n except ValueError:\n pass\n return insDict\n\n\ndef checkTrueDep(falseDep, trueDeps, reg):\n depArr = falseDep.split()\n for trueDep, reg2 in trueDeps.items():\n trueDepArr = trueDep.split()\n if depArr[0] == trueDepArr[0] and reg == reg2:\n return True, trueDep\n return None, None\n\n\ndef parseDepDictToTableData(dependenciesDict):\n tableData = [['WAW', 'WAR', 'True']]\n waws = dependenciesDict['waw']\n wars = dependenciesDict['war']\n trueDeps = dependenciesDict['trueDeps']\n wawKeys = list(waws.keys())\n warKeys = list(wars.keys())\n trueDepKeys = list(trueDeps.keys())\n maxLength = max([len(waws), len(wars), len(trueDeps)])\n for i in range(0, maxLength):\n data = [f'{wawKeys[i]} -> {waws[wawKeys[i]]}' if i < len(wawKeys) else\n '', f'{warKeys[i]} -> {wars[warKeys[i]]}' if i < len(warKeys) else\n '', f'{trueDepKeys[i]} -> {trueDeps[trueDepKeys[i]]}' if i <\n len(trueDepKeys) else '']\n tableData.append(data)\n return tableData\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef getDependenceStr(ins1, ins2, reg):\n return f'{ins1} -> {ins2}: {reg}'\n\n\ndef getInstructionStr(ins, reg1, reg2, reg3):\n return f'{ins} {reg1} {reg2} {reg3}'\n\n\ndef getInstructionArr(ins):\n return ins.split(' ')\n\n\ndef validateInput(str):\n if str.strip() == '':\n return True\n return len(str.split()) == 4\n\n\ndef getInstructionFromUser(insNum):\n ins = input(f'S{insNum}: ')\n while not validateInput(ins):\n print('The value instruction you entered is invalid. Please try again')\n print(\n 'Remember the instruction must be in the format:ins Reg1 Reg2 Reg3 '\n )\n ins = input(f'S{insNum}: ')\n return ins\n\n\ndef findDependencies(instructions):\n dependencies = {'waw': findWAWs(instructions), 'war': findWARs(\n instructions), 'trueDeps': findTrueDependencies(instructions)}\n return dependencies\n\n\ndef findWAWs(instructions):\n waws = {}\n insDict = {}\n i = 1\n for ins in instructions:\n insDict[f'S{i}'] = ins\n i += 1\n workingIns = copy.deepcopy(insDict)\n for key, value in insDict.items():\n insParts = value.split()\n del workingIns[key]\n for key2, otherIns in workingIns.items():\n if insParts[1] == otherIns.split()[1]:\n waws[f'{key} -> {key2}'] = insParts[1]\n break\n return waws\n\n\ndef findWARs(ins):\n wars = {}\n insDict = {}\n i = 1\n for ins in instructions:\n insDict[f'S{i}'] = ins\n i += 1\n workingIns = copy.deepcopy(insDict)\n for key, value in insDict.items():\n insParts = value.split()\n del workingIns[key]\n for key2, otherIns in workingIns.items():\n if insParts[2] == otherIns.split()[1]:\n wars[f'{key} -> {key2}'] = insParts[2]\n if insParts[3] == otherIns.split()[1]:\n wars[f'{key} -> {key2}'] = insParts[3]\n return wars\n\n\ndef findTrueDependencies(ins):\n trueDeps = {}\n for i in range(len(ins) - 1, -1, -1):\n ins1 = ins[i].split()\n for k in range(2, len(ins1), 1):\n checkReg = ins1[k]\n for s in range(i - 1, -1, -1):\n ins2 = ins[s].split()\n if checkReg == ins2[1]:\n trueDeps[f'S{s + 1} -> S{i + 1}'] = checkReg\n break\n return trueDeps\n\n\ndef resolveDependencies(instructions, dependencies):\n waws = dependencies['waw']\n wars = dependencies['war']\n trueDeps = dependencies['trueDeps']\n insDict = {}\n i = 1\n for ins in instructions:\n insDict[f'S{i}'] = ins\n i += 1\n tNum = 0\n for dependence, reg in waws.items():\n depParts = dependence.split()\n insParts = insDict[depParts[0]].split()\n try:\n trueDepsExist, trueDep = checkTrueDep(dependence, trueDeps, reg)\n if trueDepsExist:\n trueDepParts = trueDep.split()\n ins1 = insDict[trueDepParts[0]].split()\n ins2 = insDict[trueDepParts[2]].split()\n ins1ChangeIndex = ins1.index(reg)\n ins2ChangeIndex = [i for i, x in enumerate(ins2) if x == reg]\n ins1[ins1ChangeIndex] = f'T{tNum}'\n for index in ins2ChangeIndex:\n if index != 1:\n ins2[index] = f'T{tNum}'\n insDict[trueDepParts[0]] = ' '.join(ins1)\n insDict[trueDepParts[2]] = ' '.join(ins2)\n else:\n changeIndex = insParts.index(reg)\n insParts[changeIndex] = f'T{tNum}'\n insDict[depParts[0]] = ' '.join(insParts)\n tNum += 1\n except ValueError:\n pass\n for dependence, reg in wars.items():\n depParts = dependence.split()\n insParts = insDict[depParts[0]].split()\n try:\n changeIndex = insParts.index(reg)\n insParts[changeIndex] = f'T{tNum}'\n insDict[depParts[0]] = ' '.join(insParts)\n tNum += 1\n except ValueError:\n pass\n return insDict\n\n\ndef checkTrueDep(falseDep, trueDeps, reg):\n depArr = falseDep.split()\n for trueDep, reg2 in trueDeps.items():\n trueDepArr = trueDep.split()\n if depArr[0] == trueDepArr[0] and reg == reg2:\n return True, trueDep\n return None, None\n\n\ndef parseDepDictToTableData(dependenciesDict):\n tableData = [['WAW', 'WAR', 'True']]\n waws = dependenciesDict['waw']\n wars = dependenciesDict['war']\n trueDeps = dependenciesDict['trueDeps']\n wawKeys = list(waws.keys())\n warKeys = list(wars.keys())\n trueDepKeys = list(trueDeps.keys())\n maxLength = max([len(waws), len(wars), len(trueDeps)])\n for i in range(0, maxLength):\n data = [f'{wawKeys[i]} -> {waws[wawKeys[i]]}' if i < len(wawKeys) else\n '', f'{warKeys[i]} -> {wars[warKeys[i]]}' if i < len(warKeys) else\n '', f'{trueDepKeys[i]} -> {trueDeps[trueDepKeys[i]]}' if i <\n len(trueDepKeys) else '']\n tableData.append(data)\n return tableData\n\n\nif __name__ == '__main__':\n numIns = 0\n maxNumIns = 5\n stop = False\n instructions = []\n print(\n \"Enter up to 5 MIPs instructions below. When you're done simplypress enter without typing in any input\"\n )\n print('Instructions must be in the format: ins Reg1 Reg2 Reg3')\n print('i.e. add R1 R2 R3')\n while numIns < maxNumIns and not stop:\n ins = getInstructionFromUser(numIns + 1)\n if ins != '':\n instructions.append(ins)\n numIns += 1\n else:\n stop = True\n table_data = [['Given Instructions']]\n i = 1\n for ins in instructions:\n table_data.append([f'S{i} - ' + ins])\n i += 1\n table = AsciiTable(table_data)\n print('Here are the instructions provided:')\n print('\\n' + table.table + '\\n')\n input('Press Enter find any existing false dependencies\\n')\n dependenciesDict = findDependencies(instructions)\n table = AsciiTable(parseDepDictToTableData(dependenciesDict))\n print('\\n' + table.table + '\\n')\n input('\\nPress Enter to begin renaming registers')\n resolvedInstructions = resolveDependencies(instructions, dependenciesDict)\n resolvedInstructionsArr = []\n for key, value in resolvedInstructions.items():\n resolvedInstructionsArr.append(f'{key} - {value}')\n resolvedTableData = [['Resolved Instructions']]\n for ins in resolvedInstructionsArr:\n resolvedTableData.append([ins])\n table = AsciiTable(resolvedTableData)\n print(table.table + '\\n')\n input('Press Enter to continue')\n print('DONE!\\n')\n",
"step-5": "from terminaltables import AsciiTable\nimport copy\n\ntable_data = [\n ['WAR', 'WAW'],\n ['S1 -> S2: R1', 'row1 column2'],\n ['row2 column1', 'row2 column2'],\n ['row3 column1', 'row3 column2']\n]\ntable = AsciiTable(table_data)\n\n\ndef getDependenceStr(ins1, ins2, reg):\n return f\"{ins1} -> {ins2}: {reg}\"\n\n\ndef getInstructionStr(ins, reg1, reg2, reg3):\n return f\"{ins} {reg1} {reg2} {reg3}\"\n\n\ndef getInstructionArr(ins):\n return ins.split(' ')\n\n\ndef validateInput(str):\n if str.strip() == '':\n return True\n\n return len(str.split()) == 4\n\n\ndef getInstructionFromUser(insNum):\n ins = input(f\"S{insNum}: \")\n\n while not validateInput(ins):\n print(\"The value instruction you entered is invalid. Please try again\")\n print(\"Remember the instruction must be in the format:\"\n \"ins Reg1 Reg2 Reg3 \")\n ins = input(f\"S{insNum}: \")\n return ins\n\n\ndef findDependencies(instructions):\n dependencies = {'waw': findWAWs(instructions),\n 'war': findWARs(instructions),\n 'trueDeps': findTrueDependencies(instructions)}\n return dependencies\n\n\ndef findWAWs(instructions):\n waws = {}\n insDict = {}\n i = 1\n\n for ins in instructions:\n insDict[f'S{i}'] = ins\n i += 1\n\n workingIns = copy.deepcopy(insDict)\n\n for (key, value) in insDict.items():\n insParts = value.split()\n\n del workingIns[key]\n\n for (key2, otherIns) in workingIns.items():\n if insParts[1] == otherIns.split()[1]:\n waws[f'{key} -> {key2}'] = insParts[1]\n break # Find only the first occurance of a waw\n return waws\n\n\ndef findWARs(ins):\n wars = {}\n insDict = {}\n i = 1\n\n for ins in instructions:\n insDict[f'S{i}'] = ins\n i += 1\n\n workingIns = copy.deepcopy(insDict)\n\n for (key, value) in insDict.items():\n insParts = value.split()\n\n del workingIns[key]\n\n for (key2, otherIns) in workingIns.items():\n if insParts[2] == otherIns.split()[1]:\n wars[f'{key} -> {key2}'] = insParts[2]\n if insParts[3] == otherIns.split()[1]:\n wars[f'{key} -> {key2}'] = insParts[3]\n return wars\n\n\ndef findTrueDependencies(ins):\n trueDeps = {}\n for i in range(len(ins)-1, -1, -1):\n ins1 = ins[i].split()\n for k in range(2, len(ins1), 1):\n checkReg = ins1[k]\n for s in range(i-1, -1, -1):\n ins2 = ins[s].split()\n if checkReg == ins2[1]:\n trueDeps[f'S{s+1} -> S{i+1}'] = checkReg\n break\n return trueDeps\n\n\ndef resolveDependencies(instructions, dependencies):\n waws = dependencies['waw']\n wars = dependencies['war']\n trueDeps = dependencies['trueDeps']\n insDict = {}\n i = 1\n\n for ins in instructions:\n insDict[f'S{i}'] = ins\n i += 1\n\n tNum = 0\n\n # Resolve WAWs\n for (dependence, reg) in waws.items():\n depParts = dependence.split()\n insParts = insDict[depParts[0]].split()\n\n try:\n # Check true dependence\n trueDepsExist, trueDep = checkTrueDep(dependence, trueDeps, reg)\n if trueDepsExist:\n trueDepParts = trueDep.split()\n ins1 = insDict[trueDepParts[0]].split()\n ins2 = insDict[trueDepParts[2]].split()\n\n ins1ChangeIndex = ins1.index(reg)\n ins2ChangeIndex = [i for i, x in enumerate(ins2) if x == reg]\n\n ins1[ins1ChangeIndex] = f'T{tNum}'\n for index in ins2ChangeIndex:\n if index != 1:\n ins2[index] = f'T{tNum}'\n\n insDict[trueDepParts[0]] = ' '.join(ins1)\n insDict[trueDepParts[2]] = ' '.join(ins2)\n else:\n changeIndex = insParts.index(reg)\n insParts[changeIndex] = f'T{tNum}'\n\n insDict[depParts[0]] = ' '.join(insParts)\n tNum += 1\n except ValueError:\n pass\n\n # Resolve WARs\n for (dependence, reg) in wars.items():\n depParts = dependence.split()\n insParts = insDict[depParts[0]].split()\n\n try:\n changeIndex = insParts.index(reg)\n insParts[changeIndex] = f'T{tNum}'\n\n insDict[depParts[0]] = ' '.join(insParts)\n tNum += 1\n except ValueError:\n pass\n\n return insDict\n\n\ndef checkTrueDep(falseDep, trueDeps, reg):\n # for waws\n depArr = falseDep.split()\n for (trueDep, reg2) in trueDeps.items():\n trueDepArr = trueDep.split()\n if depArr[0] == trueDepArr[0] and reg == reg2:\n return (True, trueDep)\n return (None, None)\n\n\ndef parseDepDictToTableData(dependenciesDict):\n tableData = [\n ['WAW', 'WAR', 'True']\n ]\n waws = dependenciesDict['waw']\n wars = dependenciesDict['war']\n trueDeps = dependenciesDict['trueDeps']\n\n wawKeys = list(waws.keys())\n warKeys = list(wars.keys())\n trueDepKeys = list(trueDeps.keys())\n\n maxLength = max([len(waws), len(wars), len(trueDeps)])\n for i in range(0, maxLength):\n data = [f'{wawKeys[i]} -> {waws[wawKeys[i]]}'\n if i < len(wawKeys) else '', # Add WAW Dependencies\n\n f'{warKeys[i]} -> {wars[warKeys[i]]}'\n if i < len(warKeys) else '', # Add WAR Dependencies\n\n f'{trueDepKeys[i]} -> {trueDeps[trueDepKeys[i]]}'\n if i < len(trueDepKeys) else ''] # Add True Dependencies\n\n tableData.append(data)\n return tableData\n\n\nif __name__ == '__main__':\n numIns = 0\n maxNumIns = 5\n stop = False\n instructions = []\n\n print(\"Enter up to 5 MIPs instructions below. When you're done simply\"\n \"press enter without typing in any input\")\n print(\"Instructions must be in the format: ins Reg1 Reg2 Reg3\")\n print(\"i.e. add R1 R2 R3\")\n while numIns < maxNumIns and not stop:\n ins = getInstructionFromUser(numIns+1)\n if ins != '':\n instructions.append(ins)\n numIns += 1\n else:\n stop = True\n\n # Genarate the table data need to show instructions given\n table_data = [\n ['Given Instructions'],\n ]\n\n i = 1\n for ins in instructions:\n table_data.append([f'S{i} - ' + ins])\n i += 1\n\n table = AsciiTable(table_data)\n print(\"Here are the instructions provided:\")\n print('\\n' + table.table + '\\n')\n input(\"Press Enter find any existing false dependencies\\n\")\n dependenciesDict = findDependencies(instructions)\n table = AsciiTable(parseDepDictToTableData(dependenciesDict))\n print('\\n' + table.table + '\\n')\n input(\"\\nPress Enter to begin renaming registers\")\n resolvedInstructions = resolveDependencies(instructions, dependenciesDict)\n resolvedInstructionsArr = []\n for (key, value) in resolvedInstructions.items():\n resolvedInstructionsArr.append(f'{key} - {value}')\n resolvedTableData = [\n ['Resolved Instructions']\n ]\n\n for ins in resolvedInstructionsArr:\n resolvedTableData.append([ins])\n table = AsciiTable(resolvedTableData)\n print(table.table + '\\n')\n input('Press Enter to continue')\n print('DONE!\\n')\n",
"step-ids": [
7,
10,
11,
13,
16
]
}
|
[
7,
10,
11,
13,
16
] |
from django.shortcuts import render,redirect
from .forms import UserRegisterForm, IsEmri ,TestForm,PDF_Rapor
from django.contrib import messages
from django.contrib.auth import authenticate, login ,logout
from django.http import HttpResponseRedirect, HttpResponse ,JsonResponse
from django.urls import reverse
from django.db.models import Max
from django.contrib.auth.models import User
from .models import Emir , Test, Bildirim, Uretim, Valf
from .models import Valf_montaj,Valf_test,Valf_govde,Valf_fm200,Valf_havuz,Valf_final_montaj
from django.contrib.auth.decorators import login_required
import json, platform, base64, datetime, os
from django.utils import timezone
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.csrf import csrf_exempt
from django.core.files.storage import FileSystemStorage
from django.template.loader import render_to_string
from weasyprint import HTML
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from base64 import b64decode
# Create your views here.
#mac = platform.machine()[:3] # eğer device ras pi ise 'arm' döner
server = '192.168.1.38:8000'
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
print(ip)
return ip
def bildirim(request):
bugun = timezone.now()
birGunOnce = bugun - timezone.timedelta(days=14)
bildirimq = Bildirim.objects.filter(zaman__range=[birGunOnce,bugun])
temp = []
for o in bildirimq.values():
temp.append(o)
bildirims = list(temp)
print(bildirims)
return JsonResponse(bildirims,safe=False)
@login_required
def index(request):
#Bildirim.objects.all().delete()
grup = request.user.grup
birim = request.user.birim
emirler = Emir.objects.filter(durum="Aktif")
l = list()
for e in emirler.values():
data = dict()
data['is_emri'] = e['is_emri']
data['valfmontaj'] = Valf.objects.filter(is_emri_id=e['id']).filter(valf_montaj_id__isnull=False).values_list('valf_montaj_id',flat=True).count()or 0
data['valftest'] = Valf.objects.filter(is_emri_id=e['id']).filter(valf_test_id__isnull=False).values_list('valf_test_id',flat=True).count()or 0
data['valfgovde'] =Valf.objects.filter(is_emri_id=e['id']).filter(valf_govde_id__isnull=False).values_list('valf_govde_id',flat=True).count()or 0
data['fm200'] = Valf.objects.filter(is_emri_id=e['id']).filter(fm200_azot_id__isnull=False).values_list('fm200_azot_id',flat=True).count()or 0
data['havuztest'] = Valf.objects.filter(is_emri_id=e['id']).filter(havuz_id__isnull=False).values_list('havuz_id',flat=True).count()or 0
data['finalmontaj'] = Valf.objects.filter(is_emri_id=e['id']).filter(valf_final_montaj_id__isnull=False).values_list('valf_final_montaj_id',flat=True).count()or 0
l.append(data)
print(l)
return render(request,'index.html', { 'grup' : grup, "emirler" : emirler, 'birim': birim,'server' : server,'uretims':l})
@login_required
def arama(request):
mac = request.user_agent.os.family
q = request.GET.get('q') or request.GET.get('uretim')
emir = request.GET.get('emir')
emirs = Emir.objects.all()
media_url = settings.MEDIA_URL
aranan = ""
if q:
aranan = q
elif emir:
aranan = "isemri"
else:
print('bos')
grup = request.user.grup
birim = request.user.birim
testler = Test.objects.filter(tur=q)
# valfmontaj=Valf_montaj.objects.all()
# valfgovde=Valf_govde.objects.all()
# finalmontaj=Valf_final_montaj.objects.all()
# fm200=Valf_fm200.objectsobjects.all()
print(q)
# if q == "valfmontaj":
# uretims = Uretim.objects.filter(tur="kurlenme")
# else:
# uretims = Uretim.objects.filter(tur=q)
# print(uretims)
if q == "valfmontaj":
uretims = Valf_montaj.objects.all()
elif q == "valfgovde":
uretims = Valf_govde.objects.all()
elif q == "fm200":
uretims = Valf_fm200.objects.all()
elif q == "havuztest":
uretims = Valf_havuz.objects.all()
elif q == "finalmontaj":
uretims = Valf_final_montaj.objects.all()
else:
uretims = Uretim.objects.filter(tur=q)
print(uretims)
if emir == "tumu":
emirler = Emir.objects.all()
else:
emirler = Emir.objects.filter(is_emri=emir)
return render(request,'arama.html',{ 'mac' : mac , 'testler' : testler , 'grup': grup,"emirler": emirler, "aranan": aranan, "emirs":emirs, 'birim': birim,'media_url':media_url,"uretims":uretims,'server' : server})
@login_required
@csrf_exempt
def giriskalite(request):
mac = request.user_agent.os.family
grup = request.user.grup
birim = request.user.birim
#Test.objects.all().delete() #Test sonuçlarını silmek için
fullname = request.user.first_name + ' ' + request.user.last_name
if request.method == 'POST':
if request.POST.dict()['tur'] == 'basinc':
veris = json.loads(request.POST.dict()['veri'])
for veri in veris:
t = Test(tur='basinc',seri_no = veri[0] , acma = veri[1] , kapatma = veri[2], kabul_durumu = veri[3], testi_yapan = fullname)
t.save(force_insert=True)
elif request.POST.dict()['tur'] == 'manometre':
veris = json.loads(request.POST.dict()['veri'])
for veri in veris:
t = Test(tur='manometre',seri_no = veri[0] , okunan_deger = veri[1], kabul_durumu = veri[2] ,testi_yapan = fullname)
t.save(force_insert=True)
elif request.POST.dict()['tur'] == 'altnipel':
print(request.POST)
kontrolResult= nipelSeriNoKontrol(request)
if kontrolResult == True :
if request.FILES:
upload_file = request.FILES['file']
fs = FileSystemStorage()
fs.save(upload_file.name,upload_file)
next_lot_no = getNextLotNo( request.POST.dict()['tur'])
t = Test(tur='altnipel',lot_no = next_lot_no , pdf_ismi = request.POST.get('pdf_ismi') ,baslangic_seri_no = request.POST.get('baslangic_seri_no'),bitis_seri_no = request.POST.get('bitis_seri_no'), kabul_durumu = request.POST.get('kabulAlt'),testi_yapan = fullname)
t.save(force_insert=True)
messages.success(request,'Alt nipel testi başarıyla kaydedildi.')
elif request.POST.dict()['tur'] == 'ustnipel':
print(request.POST)
kontrolResult= nipelSeriNoKontrol(request)
if kontrolResult == True :
if request.FILES:
upload_file = request.FILES['file']
fs = FileSystemStorage()
fs.save(upload_file.name,upload_file)
next_lot_no = getNextLotNo( request.POST.dict()['tur'])
t = Test(tur='ustnipel',lot_no = next_lot_no , pdf_ismi = request.POST.get('pdf_ismi') ,baslangic_seri_no = request.POST.get('baslangic_seri_no'),bitis_seri_no = request.POST.get('bitis_seri_no'), kabul_durumu = request.POST.get('kabulUst'),testi_yapan = fullname)
t.save(force_insert=True)
messages.success(request,'Üst nipel testi başarıyla kaydedildi.')
elif request.POST.dict()['tur'] == 'bakirmembran':
print(request.POST)
next_lot_no = getNextLotNo( request.POST.get('test_tur') )
if request.FILES:
upload_file = request.FILES['file']
fs = FileSystemStorage()
fs.save(upload_file.name,upload_file)
t = Test(tur=request.POST.get('test_tur'), lot_no = next_lot_no, pdf_ismi = request.POST.get('pdf_ismi') ,test_basinci = request.POST.get('test_basinci'),
patlama_basinci = request.POST.get('patlama_basinci'), kabul_durumu = request.POST.get('kabulBak'),testi_yapan = fullname)
t.save(force_insert=True)
if(request.POST.get('test_tur') =='bakirmembran'):
messages.success(request,'Bakır membran testi başarıyla kaydedildi.')
else:
messages.success(request,'Emniyet ventili testi başarıyla kaydedildi.')
"""
elif request.POST.get('tur') == 'emniyet':
print(request.POST)
if request.FILES:
upload_file = request.FILES['file']
fs = FileSystemStorage()
fs.save(upload_file.name,upload_file)
next_lot_no = getNextLotNo( request.POST.dict()['tur'])
t = Test(tur='emniyet',lot_no =next_lot_no, pdf_ismi = request.POST.get('pdf_ismi') ,test_basinci = request.POST.get('test_basinci'), patlama_basinci = request.POST.get('patlama_basinci'),kabul_durumu = request.POST.get('kabulEmn'),testi_yapan = fullname)
t.save(force_insert=True)
messages.success(request,'Emniyet ventili testi başarıyla kaydedildi.')
"""
return render(request,'giris-kalite-kontrol.html',{ 'mac' : mac , 'grup': grup, 'birim': birim,'server' : server})
def getNextLotNo(tur):
test_with_max_lot_no = Test.objects.filter(tur=tur).order_by('-lot_no').first()
if(test_with_max_lot_no == None):
max_lot_no=0
else:
max_lot_no=test_with_max_lot_no.lot_no
return max_lot_no + 1
def nipelSeriNoKontrol(request):
baslangic_seri_no = request.POST.get('baslangic_seri_no')
bitis_seri_no = request.POST.get('bitis_seri_no')
errorFlag=0
if(int(baslangic_seri_no) > int(bitis_seri_no)):
errorFlag=1
messages.warning(request,'Başlangıç seri numarası, bitiş seri numarasından büyük olamaz!')
return False
testler = Test.objects.filter(tur=request.POST.dict()['tur'] )
seri_no_aralık_range= range(int(baslangic_seri_no),int(bitis_seri_no)+1)
seri_no_aralık_list= set(seri_no_aralık_range)
for test in testler:
seri_no_aralık_test_range= range(int(test.baslangic_seri_no),int(test.bitis_seri_no)+1)
intersection_set= seri_no_aralık_list.intersection(seri_no_aralık_test_range)
if len(intersection_set) != 0 :
messages.warning(request,'Seri numarası aralığı mevcut bir seri numarası aralığı ile çakışmaktadır!')
return False
return True
@login_required
@csrf_exempt
def uretimkontrol(request):
mac = request.user_agent.os.family
ip = get_client_ip(request)
ip == '192.168.1.36'
grup = request.user.grup
birim = request.user.birim
#Uretim.objects.all().delete() #Test sonuçlarını silmek için bu yorumu açabilirsiniz
fullname = request.user.first_name + ' ' + request.user.last_name
if request.method == 'POST':
if request.POST.dict()['tur'] == 'valfmontaj':
veris = json.loads(request.POST.dict()['veri'])
print(veris)
t = Uretim(tur='valfmontaj' , okunan_deger = veris[0] ,personel = request.user.get_full_name())
t.save(force_insert=True)
b = Bildirim(tur="baslangic",kisi = request.user.get_full_name())
b.save(force_insert=True)
elif request.POST.dict()['tur'] == 'kurlenme':
veris = json.loads(request.POST.dict()['veri'])
'''neval
if not Uretim.objects.all():
vsn = 1
else:
a = Uretim.objects.all().order_by('-vsn').values()[0]
s = a['vsn']
vsn = s + 1
v = Valf(vsn=vsn, is_emri=veris[0])
v.save(force_insert=True)
e = Emir.objects.get(is_emri=veris[0])
e.durum = 'Aktif'
e.save()
t = Uretim(tur='montaj_kurlenme' ,vsn = vsn, is_emri = veris[0] ,personel = request.user.get_full_name(),alt_nipel_no = veris[1],bakir_membran_no = veris[2],ust_nipel_no = veris[3],manometre_no = veris[4],basincanahtari_no = veris[5],montaj_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10))
t.save(force_insert=True)
return HttpResponse(str(vsn))
'''
print("deneme")
#burası sonradan düzenlenecek Berker
# e = Emir.objects.get(is_emri=veris[0])
# e.durum = 'Aktif'
# e.save()
is_emri_adi=veris[0]
emir=Emir.objects.get(is_emri= is_emri_adi)
personel_id=request.user.id
alt_nipel_no = veris[1]
bakir_membran_no = veris[2]
ust_nipel_no = veris[3]
manometre_no = veris[4]
basincanahtari_no = veris[5]
sibop = veris[6]
print("deneme2")
try:
kayit_tarihi=timezone.now()
#kurlenme_bitis=timezone.now()+timezone.timedelta(minutes=10)
valf_montaj = Valf_montaj(montaj_personel_id= personel_id, alt_nipel_no=alt_nipel_no,bakir_membran_no=bakir_membran_no,ust_nipel_no=ust_nipel_no,manometre_no=manometre_no,basincanahtari_no=basincanahtari_no,montaj_tarihi=kayit_tarihi,sibop=sibop)
valf_montaj.save()
valf = Valf(is_emri=emir,valf_montaj=valf_montaj)
valf.save()
return HttpResponse(str(valf.id))
except Exception as err:
print(" KAyıt HAstası > ", err)
elif request.POST.dict()['tur'] == 'valftest':
try:
valf_seri_no = json.loads(request.POST.dict()['valf_seri_no'])
uygun = json.loads(request.POST.dict()['uygun'])
valf = Valf.objects.get(id=valf_seri_no )
personel_id=User.objects.get(id=request.user.id)
test_tarihi=timezone.now()
acma = str(uygun)
kapama = str(uygun)
sebep = str(uygun)
if (uygun==True):
sebep=None
valf_test= Valf_test( test_personel=personel_id,test_tarihi=test_tarihi,uygun=uygun)
valf_test.save()
valf.valf_test=valf_test
valf.save()
except Exception as err:
print(err)
elif request.POST.dict()['tur'] == 'valfgovde':
veri = json.loads(request.POST.dict()['veri'])
'''neval
v = Valf.objects.get(vsn=veri[3])
is_emri = v.is_emri
print('veri[5],sodyum miktarı:: ',veri[5] )
t = Uretim.objects.get(vsn=veri[3])
t.tur='govde_kurlenme'
t.tork_degeri = veri[0]
t.uygunluk = veri[1]
t.sebep = veri[2]
t.tsn = veri[4]
t.personel = request.user.get_full_name()
t.govde_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10)
# t = Uretim(tur='valfgovde',tork_degeri = veri[0] ,is_emri=is_emri, uygunluk = veri[1] , sebep = veri[2],
# vsn = veri[3],tsn = veri[4], personel = request.user.get_full_name(),govde_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10))
t.save()
'''
valf_seri_no=veri[3]
valf = Valf.objects.get(id=valf_seri_no )
valf.durum='valf_govde'
valf.save()
personel_id=request.user.id
kayit_tarihi=timezone.now()
kurlenme_bitis=timezone.now()+timezone.timedelta(minutes=10)
tork=veri[0]
tup_seri_no=veri[4]
sodyum_miktari=veri[5]
uygunluk=veri[1]
sebep=veri[2]
if (uygunluk=='on'):
sebep=None
valf_govde= Valf_govde(valf=valf, personel_id=personel_id,kayit_tarihi=kayit_tarihi,kurlenme_bitis=kurlenme_bitis,tork=tork,tup_seri_no=tup_seri_no,sodyum_miktari=sodyum_miktari,uygunluk=uygunluk,sebep=sebep)
valf_govde.save()
elif request.POST.dict()['tur'] == 'fm200':
veri = json.loads(request.POST.dict()['veri'])
'''neval
v = Valf.objects.get(vsn=veri[4])
is_emri = v.is_emri
print(veri)
t = Uretim.objects.get(vsn=veri[4])
t.tur='fm200_kurlenme'
t.bos_agirlik = veri[0]
t.rekorlu_agirlik = veri[1]
t.fm200 = veri[2]
t.azot = veri[3]
t.personel = request.user.get_full_name()
t.fm200_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10)
t.save()
'''
valf_seri_no=veri[4]
valf = Valf.objects.get(id=valf_seri_no )
valf.durum='valf_fm200'
valf.save()
personel_id=request.user.id
kayit_tarihi=timezone.now()
kurlenme_bitis=timezone.now()+timezone.timedelta(minutes=10)
bos_agirlik =veri[0]
rekorlu_agirlik=veri[1]
fm200 = veri[2]
azot = veri[3]
valf_fm200= Valf_fm200(valf=valf, personel_id=personel_id,kayit_tarihi=kayit_tarihi,kurlenme_bitis=kurlenme_bitis, bos_agirlik =bos_agirlik,rekorlu_agirlik=rekorlu_agirlik, fm200 = fm200,azot = azot)
valf_fm200.save()
elif request.POST.dict()['tur'] == 'havuztest':
veri = json.loads(request.POST.dict()['veri'])
'''neval
print(veri)
v = Valf.objects.get(vsn=veri[0])
is_emri = v.is_emri
t = Uretim(tur='havuztest',vsn = veri[0],tsn = veri[0],is_emri=is_emri , uygunluk = veri[1] ,
acma = veri[2], kapatma = veri[3],sebep = veri[4], personel = request.user.get_full_name())
t.save(force_insert=True)
'''
print("veri",veri)
valf_seri_no=veri[0]
valf = Valf.objects.get(id=valf_seri_no )
valf.durum='valf_havuz_test'
valf.save()
personel_id=request.user.id
kayit_tarihi=timezone.now()
uygunluk= veri[1]
tup_cidar_sicaklik =veri[2]
tup_basinc = veri[3]
sebep=veri[4]
if (uygunluk):
sebep=None
valf_havuz= Valf_havuz(valf=valf, personel_id=personel_id,kayit_tarihi=kayit_tarihi,tup_cidar_sicaklik=tup_cidar_sicaklik, tup_basinc =tup_basinc,uygunluk=uygunluk, sebep = sebep)
valf_havuz.save()
elif request.POST.dict()['tur'] == 'finalmontaj':
veri = json.loads(request.POST.dict()['veri'])
'''neval
print(veri)
v = Valf.objects.get(vsn=veri[1])
is_emri = v.is_emri
t = Uretim.objects.get(vsn=veri[1])
t.tur='finalmontaj'
t.etiket_seri_no = veri[0]
t.fsn = veri[2]
t.funye_seri_omaj = veri[3]
t.basinc_anahtari_omaj = veri[4]
t. personel = request.user.get_full_name()
#t = Uretim(tur='finalmontaj',etiket_seri_no = veri[0],is_emri=is_emri , vsn = veri[1] , fsn = veri[2],
# funye_seri_omaj = veri[3],basinc_anahtari_omaj = veri[4], personel = request.user.get_full_name())
t.save()
tup_sayisi_str=Emir.objects.filter(is_emri=is_emri).values()[0]['tup_sayisi']
'''
valf_seri_no=veri[1]
valf = Valf.objects.get(id=valf_seri_no )
valf.durum='valf_final_montaj'
valf.save()
personel_id=request.user.id
kayit_tarihi=timezone.now()
etiket_seri_no = veri[0]
funye_seri_no = veri[2]
funye_seri_omaj = veri[3]
basinc_anahtari_omaj = veri[4]
valf_final_montaj= Valf_final_montaj(valf=valf, personel_id=personel_id,kayit_tarihi=kayit_tarihi,etiket_seri_no = etiket_seri_no,funye_seri_no = funye_seri_no ,funye_seri_omaj = funye_seri_omaj,basinc_anahtari_omaj = basinc_anahtari_omaj)
valf_final_montaj.save()
emir = Emir.objects.get(is_emri=valf.is_emri)
emir_tup_sayisi = int(emir.tup_sayisi )
emir_biten_valf_sayi = Valf.objects.filter(is_emri=emir,durum='valf_final_montaj').count()
print('emir_biten_valf_sayi',emir_biten_valf_sayi)
print('emir_tup_sayisi',emir_tup_sayisi)
if(emir_biten_valf_sayi == emir_tup_sayisi):
emir.durum = 'Bitmiş'
emir.save()
b = Bildirim(tur = "bitis" , kisi = request.user.get_full_name())
b.save(force_insert=True)
now = timezone.now()
#montajkurlenmesi=Valf_montaj.objects.filter(kurlenme_bitis_tarihi__gte=now)
montajkurlenmesi=Valf_montaj.objects.all()
# govdekurlenmesi=Valf_govde.objects.filter(kurlenme_bitis__gte=now)
fm200kurlenmesi=Valf_fm200.objects.filter(fm200_kurlenme_bitis_tarihi__gte=now)
#acikemirleri= Emir.objects.filter(durum__in=("Aktif","Başlanmamış"))
acikemirleri=Emir.objects.filter(durum='Aktif').values()
aktifemirler= Emir.objects.filter(durum="Aktif")
####Duplikasyonu önlemek için yaptık ###############
govde_emir = list(dict.fromkeys(Valf.objects.filter(valf_govde_id__isnull=False).values_list('is_emri_id',flat=True)))
fm200_emir = list(dict.fromkeys(Valf.objects.filter(fm200_azot_id__isnull=False).values_list('is_emri_id',flat=True)))
###################################################
#return render(request,'uretim-kontrol.html',{ 'acikemirleri':acikemirleri, 'grup': grup, 'birim': birim, 'ip': ip,'now':now, 'kurlenmes':montajkurlenmesi,'fm200kurlenmes':fm200kurlenmesi, 'govdekurlenmes': govdekurlenmesi ,'server' : server})
return render(request,'uretim-kontrol.html',{'grup': grup, 'birim': birim, 'ip': ip,'now':now,'server':server, 'acikemirleri':acikemirleri,'fm200kurlenmes':fm200kurlenmesi,'kurlenmes':montajkurlenmesi,'aktifemirler':aktifemirler,'govde_emir':govde_emir,'fm200_emir':fm200_emir})
@csrf_exempt
def acikisemirleri(request):
emirler = Emir.objects.filter(durum__in=("Aktif","Başlanmamış"))
temp = []
for o in emirler.values():
temp.append(o['is_emri'])
veri = list(temp)
@login_required
@csrf_exempt
def isemri(request):
mac = request.user_agent.os.family
grup = request.user.grup
birim = request.user.birim
#Emir.objects.all().delete()
fullname = request.user.first_name + ' ' + request.user.last_name
emirler = Emir.objects.all()
form = IsEmri(request.POST)
if request.method == 'POST':
if 'tur' in request.POST.dict():
if request.POST.dict()['tur'] == 'oncelik':
veri = json.loads(request.POST.dict()['veri'])
print(veri)
for key in veri:
em = Emir.objects.get(is_emri=key)
em.oncelik = veri[key]
em.save()
o = Bildirim(tur="oncelik")
o.save()
return HttpResponse('onceliktamam')
else:
if form.is_valid():
if not Emir.objects.all():
son_oncelik = 1
else:
a = Emir.objects.all().order_by('-oncelik').values()[0]
s = a['oncelik']
son_oncelik = s + 1
emir = form.save()
emir.refresh_from_db()
emir.is_emri = form.cleaned_data.get('is_emri')
emir.urun_kodu = form.cleaned_data.get('urun_kodu')
emir.baslangic = form.cleaned_data.get('baslangic')
emir.bitis = form.cleaned_data.get('bitis')
emir.emri_veren = form.cleaned_data.get('emri_veren')
emir.tup_govde_turu = form.cleaned_data.get('tup_govde_turu')
emir.valf_turu = form.cleaned_data.get('valf_turu')
emir.renk = form.cleaned_data.get('renk')
emir.emniyet_ventil_turu = form.cleaned_data.get('emniyet_ventil_turu')
emir.siparis = form.cleaned_data.get('siparis')
emir.fm200bosagirlikmindeger= form.cleaned_data.get('fm200bosagirlikmindeger')
emir.fm200bosagirlikmaxdeger = form.cleaned_data.get('fm200bosagirlikmaxdeger')
emir.fm200dolummiktarimindeger= form.cleaned_data.get('fm200dolummiktarimindeger')
emir.fm200dolummiktarimaxdeger = form.cleaned_data.get('fm200dolummiktarimaxdeger')
#if(request.user.grup == "planlama"):
t = Bildirim(tur = "is emri",emri_veren_grup = grup, emri_veren = request.user.get_full_name(), is_emri = form.cleaned_data.get('is_emri'))
t.save(force_insert=True)
emir.oncelik = son_oncelik
messages.success(request,'Emir başarıyla eklendi!')
emir.save()
form.full_clean()
return(HttpResponseRedirect(reverse('isemri')))
else:
messages.warning(request,'İş emri eklenemedi.Lütfen tekrar deneyin!Hata: {}'.format(form.errors))
else:
form = IsEmri()
form.fields["emri_veren"].initial = fullname
return render(request,'is-emri.html', { 'form' : form , 'emirler': emirler , 'mac' : mac , 'fullname' : fullname ,'grup' : grup , 'birim': birim,'server' : server})
#@login_required
def yetkilendirme(request):
mac = request.user_agent.os.family
#grup = "yonetici"#request.user.grup
#birim = request.user.birim
grup = "Yönetici"
birim = "IT"
kullanicilar = User.objects.all()
if grup == 'Yönetici' and birim == 'IT' or grup == 'Mühendis' and birim == 'IT':
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid(): #and profile_form.is_valid():
user = form.save()
user.refresh_from_db()
user.first_name = form.cleaned_data.get('first_name')
user.last_name = form.cleaned_data.get('last_name')
user.grup = form.cleaned_data.get('grup')
user.save()
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password1')
messages.success(request,'{} isimli kullanıcı {} isimli gruba eklendi!'.format(username,user.grup))
return(HttpResponseRedirect(reverse('yetkilendirme')))
else:
print(form.errors)
else:
form = UserRegisterForm()
return render(request,'kullanici-yetkilendirme.html',{'form':form,'kullanicilar':kullanicilar , 'mac' : mac , 'grup' : grup, 'birim': birim,'server' : server})
else:
return(HttpResponseRedirect(reverse('403')))
@login_required
def performans(request):
mac = request.user_agent.os.family
grup = request.user.grup
birim = request.user.birim
kullanicilar = User.objects.all()
return render(request,'performans.html',{ 'mac' : mac , 'grup':grup, 'birim': birim, 'kullanicilar': kullanicilar,'server' : server})
@login_required
@csrf_exempt
def yazdir(request):
mac = request.user_agent.os.family
grup = request.user.grup
birim = request.user.birim
if True:#grup == 'Yönetici' and birim == 'IT':
if request.method == 'POST':
i = Emir.objects.filter(durum=request.POST['durum'])
temp = []
for obj in i.values():
times = obj['emir_zamani'].strftime("%d %B %Y (%H:%M:%S)")
temp.append(obj['is_emri'] + " " + times)
veri = list(temp)
return JsonResponse(veri,safe=False)
return render(request,'yazdir.html',{ 'mac' : mac , 'grup':grup, 'birim': birim,'server' : server})
else:
return(HttpResponseRedirect(reverse('403')))
@login_required
def ulogout(request):
logout(request)
return(HttpResponseRedirect(reverse('ulogin')))
@csrf_exempt
def ulogin(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username,password=password)
if user:
if user.is_active:
login(request,user)
print('{} kullanıcısı tarafından başarılı giriş'.format(username))
return redirect('arama')
else:
messages.warning(request,'Kullanıcı adınızı yada parolanızı yanlış girdiniz.')
else:
print("Birisi login olmayı denedi ve başarısız oldu!")
messages.warning(request,'Kullanıcı adınızı yada parolanızı yanlış girdiniz.')
return(HttpResponseRedirect(reverse('ulogin')))
else:
return render(request,'login.html',{})
def _403(request):
return render(request,'403.html',{})
def handler404(request,exception):
return render(request, '403.html', status=404)
@csrf_exempt
def kullanicijson(request):
username = request.POST.get('username')
b = User.objects.filter(username=username).values('first_name','last_name','username','grup')
veri = list(b)
return JsonResponse(veri,safe=False)
@csrf_exempt
def kullanicisil(request):
username = request.POST.get('username')
print(username)
sildi = User.objects.filter(username=username).delete()
if sildi:
return HttpResponse('silindi')
else:
return HttpResponse('silinemedi')
@csrf_exempt
def kullaniciduzelt(request):
veri = request.POST.get('bilgi')
veri = json.loads(veri)
a = User.objects.get(username=veri["eskisi"])
a.username = veri["username"]
a.first_name = veri["first_name"]
a.last_name = veri["last_name"]
a.grup = veri["grup"]
a.birim = veri["birim"]
a.save()
return HttpResponse('duzeltildi')
@csrf_exempt
def passwordreset(request):
ps = request.POST.get('ps1')
if request.POST.get('username'):
u = User.objects.get(username=request.POST.get('username'))
u.set_password(ps)
u.save()
return HttpResponse('parola değiştirildi')
return HttpResponse('bir hata var')
def get_first_and_lastname(username):
try:
first_name=User.objects.filter(username=username).first().first_name
last_name=User.objects.filter(username=username).first().last_name
return "{} {}".format(first_name,last_name)
except:
return 'isim soyisim'
@csrf_exempt
def pdf(request):
if request.GET.get('qr'):
qr = request.GET.get('qr')
print(qr.split(" ")[0])
i = qr.split(" ")[0]
# elif request.GET.get('valfqr'):
# qr = request.GET.get('valfqr')
# v = Valf.objects.get(vsn=qr)
# i = v.is_emri
print("---------------------")
valf_no = request.GET.get('vsn')
Valf_montaj_Data=Valf_montaj.objects.filter(id=Valf.objects.filter(id=valf_no).first().valf_montaj_id).first()
Valf_fm200_Data=Valf_fm200.objects.filter(id=Valf.objects.filter(id=valf_no).first().fm200_azot_id).first()
Valf_havuz_Data=Valf_havuz.objects.filter(id=Valf.objects.filter(id=valf_no).first().havuz_id).first()
Valf_final_Data=Valf_final_montaj.objects.filter(id=Valf.objects.filter(id=valf_no).first().valf_final_montaj_id).first()
Valf_test_Data=Valf_test.objects.filter(id=Valf.objects.filter(id=valf_no).first().valf_test_id).first()
Valf_govde_Data=Valf_govde.objects.filter(id=Valf.objects.filter(id=valf_no).first().valf_govde_id).first()
Emir_Data=Emir.objects.filter(is_emri=i).first()
valf_final = Valf.objects.filter(id=valf_no).values_list('valf_final_montaj_id',flat=True).first()
urun_seri_no = Valf_final_montaj.objects.filter(id=valf_final).values_list('urun_seri_no',flat=True).first()
print("---------------------")
try:
valfmontajPersonel = get_first_and_lastname(User.objects.filter(id=Valf_montaj_Data.montaj_personel_id).first().username)
except:
valfmontajPersonel = ''
try:
valfmontajTarih = Valf_montaj_Data.montaj_tarihi
except:
valfmontajTarih = ''
try:
altnipelno = Valf_montaj_Data.alt_nipel_no
except:
altnipelno = ''
try:
ustnipelno = Valf_montaj_Data.ust_nipel_no
except:
ustnipelno = ''
try:
switchno = Valf_montaj_Data.basincanahtari_no
except:
switchno = ''
try:
manometreno = Valf_montaj_Data.manometre_no
except:
manometreno = ''
try:
valftestPersonel = get_first_and_lastname(User.objects.filter(id=Valf_test_Data.test_personel_id).first().username)
except:
valftestPersonel = ''
try:
valftestTarih = Valf_test_Data.test_tarihi
except:
valftestTarih = ''
try:
valfTestUygun = 'Uygun' if Valf_test_Data.uygun == True else 'Uygun Değil'
except:
valfTestUygun = Valf_test_Data.uygun
try:
valfgovdePersonel = get_first_and_lastname(User.objects.filter(id=Valf_govde_Data.govde_personel_id).first().username)
except:
valfgovdePersonel = ''
try:
valfgovdeTarih = Valf_govde_Data.govde_tarihi
except:
valfgovdeTarih = ''
try:
valfGovdeUygun = 'Uygun' if Valf_govde_Data.uygunluk == True else 'Uygun Değil'
except:
valfGovdeUygun = ''
try:
fm200Personel = get_first_and_lastname(User.objects.filter(id=Valf_fm200_Data.fm200_personel_id).first().username)
except:
fm200Personel = ''
try:
fm200Tarih = Valf_fm200_Data.kayit_tarihi
except:
fm200Tarih = ''
try:
bosAgirlik = Valf_fm200_Data.bos_agirlik
except:
bosAgirlik = ''
try:
doluAgirlik = Valf_fm200_Data.dolu_agirlik
except:
doluAgirlik = ''
# try: Duruma Göre sonradan eklenebilir diye silmiyoruz!
# azot = fm200[0]['azot']
# except:
# azot = ''
try:
bar = Valf_fm200_Data.bar
except:
bar = ''
try:
havuztestPersonel = get_first_and_lastname(User.objects.filter(id=Valf_havuz_Data.havuz_personel_id).first().username)
except:
havuztestPersonel = ''
try:
havuztestTarih = Valf_havuz_Data.kayit_tarihi
except:
havuztestTarih = ''
try:
havuzTestUygun = 'Uygun' if Valf_havuz_Data.uygunluk == True else 'Uygun Değil'
except:
havuzTestUygun = ''
try:
finalmontajPersonel = get_first_and_lastname(User.objects.filter(id=Valf_final_Data.personel_id).first().username)
except:
finalmontajPersonel = ''
try:
finalmontajTarih = Valf_final_Data.kayit_tarihi
except:
finalmontajTarih = ''
try:
membranTipi = Emir_Data.valf_turu
except:
membranTipi = ''
try:
ventilTipi = Emir_Data.emniyet_ventil_turu
except:
ventilTipi = ''
try:
tugovdetipi= Emir_Data.tup_govde_turu
except:
tugovdetipi= ''
try:
siboplotno = Valf_montaj_Data.sibop
except:
siboplotno = ''
print(valftestPersonel,Emir_Data.emniyet_ventil_turu)
veri = "veri"
html_string = render_to_string('external/pdf-template.html', {'veri': veri, "qr": urun_seri_no,
'valfmontajPersonel': valfmontajPersonel, 'valfmontajTarih':valfmontajTarih,'valfgovdePersonel':valfgovdePersonel,
'valftestPersonel': valftestPersonel, 'valftestTarih': valftestTarih,'valfTestUygun':valfTestUygun,'havuzTestUygun':havuzTestUygun,
'valfgovdePersonel': valftestPersonel, 'valfgovdeTarih': valfgovdeTarih,'valfGovdeUygun':valfGovdeUygun,'valfMontajUygun':"Uygun*",'fm200Uygun':"Uygun*",'finalMontajUygun':"Uygun*",
'fm200Personel': fm200Personel, 'fm200Tarih': fm200Tarih,
'bosAgirlik' : bosAgirlik, 'doluAgirlik' : doluAgirlik,
'havuztestPersonel': havuztestPersonel, 'havuztestTarih': havuztestTarih,
'finalmontajPersonel': finalmontajPersonel, 'finalmontajTarih': finalmontajTarih,
'altnipelno': altnipelno, 'ustnipelno': ustnipelno, 'switchno': switchno,'manometreno': manometreno,
'is_emri': i,'membranTipi': membranTipi,'ventilTipi': ventilTipi,'urunserino':urun_seri_no,'bar':bar,'tugovdetipi':tugovdetipi,'siboplotno':siboplotno
}, request=request)
html = HTML(string=html_string, base_url=request.build_absolute_uri())
html.write_pdf(target='/tmp/' + qr + '.pdf');
fs = FileSystemStorage('/tmp/')
with fs.open(qr + '.pdf') as pdf:
response = HttpResponse(pdf, content_type='application/pdf')
response['Content-Disposition'] = 'inline; filename="pdf.pdf"'
return response
return response
#Test sonuçları
@csrf_exempt
def dashboard(request):
bugun = timezone.now()
print(request.POST.get('gun_sayisi'))
gun = int(request.POST.get('gun_sayisi'))
kac_gun = bugun - timezone.timedelta(days=gun)
veris = Test.objects.filter(test_tarihi__range=[kac_gun,bugun])
temp = []
for o in veris.values():
temp.append(o)
veri = list(temp)
print("dashboard", veri)
return JsonResponse(veri,safe=False)
@csrf_exempt
def uretimdurum(request):
i = request.POST.get('is_emri')
print(i)
veri = list()
print(Valf.objects.filter(is_emri_id=i).values_list('valf_montaj_id',flat=True).count(),Valf.objects.filter(is_emri_id=i).filter(valf_test_id__isnull=False).values_list('valf_test_id',flat=True).count())
try:
veri.append(Valf.objects.filter(is_emri_id=i).filter(valf_montaj_id__isnull=False).values_list('valf_montaj_id',flat=True).count())
veri.append(Valf.objects.filter(is_emri_id=i).filter(valf_test_id__isnull=False).values_list('valf_test_id',flat=True).count())
veri.append(Valf.objects.filter(is_emri_id=i).filter(valf_govde_id__isnull=False).values_list('valf_govde_id',flat=True).count())
veri.append(Valf.objects.filter(is_emri_id=i).filter(fm200_azot_id__isnull=False).values_list('fm200_azot_id',flat=True).count())
veri.append(Valf.objects.filter(is_emri_id=i).filter(havuz_id__isnull=False).values_list('havuz_id',flat=True).count())
veri.append(Valf.objects.filter(is_emri_id=i).filter(valf_final_montaj_id__isnull=False).values_list('valf_final_montaj_id',flat=True).count())
veri.append(Emir.objects.filter(id=i).values()[0]['tup_sayisi'])
except Exception as err:
print(err)
veri = [0,0,0,0,0,0,10]
print(veri)
return JsonResponse(veri,safe=False)
@csrf_exempt
def personeldurum(request):
p = request.POST.get('personel')
g = request.POST.get('gun_sayisi')
print(p,g)
bugun = timezone.now()
gun = int(request.POST.get('gun_sayisi'))
kac_gun = bugun - timezone.timedelta(days=gun)
veris = Test.objects.filter(test_tarihi__range=[kac_gun,bugun])
veri = list()
try:
veri.append(Test.objects.filter(test_tarihi__range=[kac_gun,bugun]).filter(tur="manometre").filter(testi_yapan=p).count())
veri.append(Test.objects.filter(test_tarihi__range=[kac_gun,bugun]).filter(tur="basinc").filter(testi_yapan=p).count())
veri.append(Test.objects.filter(test_tarihi__range=[kac_gun,bugun]).filter(tur="altnipel").filter(testi_yapan=p).count())
veri.append(Test.objects.filter(test_tarihi__range=[kac_gun,bugun]).filter(tur="ustnipel").filter(testi_yapan=p).count())
veri.append(Test.objects.filter(test_tarihi__range=[kac_gun,bugun]).filter(tur="bakirmembran").filter(testi_yapan=p).count())
veri.append(Test.objects.filter(test_tarihi__range=[kac_gun,bugun]).filter(tur="emniyet").filter(testi_yapan=p).count())
veri.append(Uretim.objects.filter(date__range=[kac_gun,bugun]).filter(tur="kurlenme").filter(personel=p).count())
veri.append(Uretim.objects.filter(date__range=[kac_gun,bugun]).filter(tur="valftest").filter(personel=p).count())
veri.append(Uretim.objects.filter(date__range=[kac_gun,bugun]).filter(tur="valfgovde").filter(personel=p).count())
veri.append(Uretim.objects.filter(date__range=[kac_gun,bugun]).filter(tur="fm200").filter(personel=p).count())
veri.append(Uretim.objects.filter(date__range=[kac_gun,bugun]).filter(tur="havuztest").filter(personel=p).count())
veri.append(Uretim.objects.filter(date__range=[kac_gun,bugun]).filter(tur="finalmontaj").filter(personel=p).count())
except:
veri = [0,0,0,0,0,0,0,0,0,0,0,10]
print(veri)
return JsonResponse(veri,safe=False)
@csrf_exempt
def tupTuru(request):
if request.method == 'POST':
try:
u = Emir.objects.filter(is_emri=request.POST.dict()['is_emri']).first()
bos_agirlik_miktari= u.bos_agirlik_miktari
fm200_miktari= u.fm200_miktari
renk= u.renk
response= bos_agirlik_miktari + ';'+ fm200_miktari +';'+renk
return HttpResponse(str(response))
except e :
print(e)
return str('tur')
@csrf_exempt
def getEmirNo(request):
if request.method == 'POST':
vsn=request.POST.dict()['veri']
print('getEmirNo',vsn)
try:
is_emri = Emir.objects.filter(id=vsn).values_list('is_emri',flat=True).first()
return HttpResponse(str(is_emri))
except:
return HttpResponse(str('NO'))
return str('is_emri')
@csrf_exempt
def kontrolEt(request):
if request.method == 'POST':
tur = request.POST['tur']
veri = request.POST['veri']
isemri = request.POST['isemri']
t = Test.objects.filter(tur=tur)
r = "NO"
if(tur == 'altnipel'):
t = Test.objects.filter(tur=tur)
try:
if(int(veri) in t.values_list('lot_no',flat=True)):
r = ('OK')
else:
r = ('NO')
except:
r = 'NO'
if(tur == 'ustnipel'):
t = Test.objects.filter(tur=tur)
try:
if(veri in t.values_list('baslangic_seri_no',flat=True)):
r = ('OK')
else:
r = ('NO')
except:
r = "NO"
if(tur == 'manometre'):
t = Test.objects.filter(tur=tur)
try:
if(veri in t.values_list('seri_no',flat=True)):
r = ('OK')
else:
r = ('NO')
except Exception as e:
print(e)
r = "NO"
if(tur == 'basinc'):
t = Test.objects.filter(tur=tur)
try:
if(veri in t.values_list('seri_no',flat=True)):
r = ('OK')
else:
r = ('NO')
except:
r = "NO"
if(tur == 'bakirmembran'):
t = Test.objects.filter(tur=tur)
try:
if(int(veri) in t.values_list('lot_no',flat=True)):
r = ('OK')
else:
r = ('NO')
except:
r = "NO"
if(tur == 'emniyet'):
t = Test.objects.filter(tur=tur)
try:
if(veri in t.values_list('lot_no',flat=True)):
r = ('OK')
else:
r = ('NO')
except:
r = "NO"
if(tur == 'valf_govde'):
try:
#Valf.objects.filter(valf_montaj_id=veri).values_list('valf_test_id',flat = True).first()
#a = isinstance(Valf.objects.filter(valf_montaj_id=veri).values_list('valf_test_id',flat = True).first(),int)
valf_id=Valf.objects.filter(valf_montaj_id=veri).values_list('valf_test_id',flat = True).first()
if isinstance(valf_id,int):
Valf_test.objects.filter(id=valf_id).values_list('uygun',flat = True).first()
if (Valf_test.objects.filter(id=valf_id).values_list('uygun',flat = True).first()):
r = ('OK')
else:
r = ('NO')
else:
r = ('NO')
except:
r = "NO"
if(tur == 'sibop'):
print(tur,veri,t.values_list('lot_no',flat=True))
t = Test.objects.filter(tur=tur)
try:
if(int(veri) in t.values_list('lot_no',flat=True)):
r = ('OK')
else:
r = ('NO')
except:
r = "NO"
return HttpResponse(r)
@csrf_exempt
def kurlenmeKontrol(request):
if request.method == 'POST':
r = "NO"
tur = request.POST['tur']
vsn = request.POST['veri']
print('kurlenmeKontrol',tur,vsn)
if(tur == 'montaj_kurlenme'):
try:
u = Uretim.objects.filter(vsn=vsn)
print(u,"---------------")
if(u.values()[0]['montaj_kurlenme_zamani']<timezone.now()):
r = 'OK'
else:
r = 'NO'
except:
r = 'NO'
elif (tur=='govde_kurlenme'):
try:
u = Uretim.objects.filter(vsn=vsn)
print('govde_kurlenme_zamani',u.values()[0]['govde_kurlenme_zamani'])
print('now',timezone.now())
if(u.values()[0]['govde_kurlenme_zamani']<timezone.now()):
r = 'OK'
else:
r = 'NO'
except:
r = 'NO'
elif (tur=='valf_test'):
print("içerdeyim-----> Valf Test")
try:
print(vsn,"----------------------------")
valf_montaj_id = Valf.objects.filter(id=vsn).first().valf_montaj_id
print(valf_montaj_id)
tarih = Valf_montaj.objects.filter(id=valf_montaj_id).first().kurlenme_bitis_tarihi
print(tarih)
print(type(timezone.now()),timezone.now())
print(type(tarih),tarih)
if(tarih<timezone.now()):
print("büyüktür")
r='OK'
else:
print("küçük")
r='NO'
except Exception as err:
print('r',err)
r='NO'
elif (tur=='pdfkontrol'):
print(vsn)
try:
if Valf.objects.filter(valf_montaj_id=vsn).count():
r='OK'
else:
r='NO'
except Exception as err:
r='NO'
print(err)
return HttpResponse(r)
@csrf_exempt
def newVSN(request):
if request.method == 'POST':
vsn = ""
if not Uretim.objects.all():
vsn = 1
else:
a = Uretim.objects.all().order_by('-vsn').values()[0]
s = a['vsn']
print('sssss',s)
vsn = s + 1
print(vsn)
r = (str(vsn))
return HttpResponse(r)
#return HttpResponse(str(vsn))
#return JsonResponse({'vsn':vsn})
@csrf_exempt
def hardreset(request):
print('Hard')
|
normal
|
{
"blob_id": "74dd9151195fef41862c2793621172518f1f486d",
"index": 5248,
"step-1": "<mask token>\n\n\n@login_required\ndef index(request):\n grup = request.user.grup\n birim = request.user.birim\n emirler = Emir.objects.filter(durum='Aktif')\n l = list()\n for e in emirler.values():\n data = dict()\n data['is_emri'] = e['is_emri']\n data['valfmontaj'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n valf_montaj_id__isnull=False).values_list('valf_montaj_id',\n flat=True).count() or 0\n data['valftest'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n valf_test_id__isnull=False).values_list('valf_test_id', flat=True\n ).count() or 0\n data['valfgovde'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n valf_govde_id__isnull=False).values_list('valf_govde_id', flat=True\n ).count() or 0\n data['fm200'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n fm200_azot_id__isnull=False).values_list('fm200_azot_id', flat=True\n ).count() or 0\n data['havuztest'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n havuz_id__isnull=False).values_list('havuz_id', flat=True).count(\n ) or 0\n data['finalmontaj'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n valf_final_montaj_id__isnull=False).values_list(\n 'valf_final_montaj_id', flat=True).count() or 0\n l.append(data)\n print(l)\n return render(request, 'index.html', {'grup': grup, 'emirler': emirler,\n 'birim': birim, 'server': server, 'uretims': l})\n\n\n<mask token>\n\n\ndef getNextLotNo(tur):\n test_with_max_lot_no = Test.objects.filter(tur=tur).order_by('-lot_no'\n ).first()\n if test_with_max_lot_no == None:\n max_lot_no = 0\n else:\n max_lot_no = test_with_max_lot_no.lot_no\n return max_lot_no + 1\n\n\n<mask token>\n\n\n@login_required\n@csrf_exempt\ndef uretimkontrol(request):\n mac = request.user_agent.os.family\n ip = get_client_ip(request)\n ip == '192.168.1.36'\n grup = request.user.grup\n birim = request.user.birim\n fullname = request.user.first_name + ' ' + request.user.last_name\n if request.method == 'POST':\n if request.POST.dict()['tur'] == 'valfmontaj':\n veris = json.loads(request.POST.dict()['veri'])\n print(veris)\n t = Uretim(tur='valfmontaj', okunan_deger=veris[0], personel=\n request.user.get_full_name())\n t.save(force_insert=True)\n b = Bildirim(tur='baslangic', kisi=request.user.get_full_name())\n b.save(force_insert=True)\n elif request.POST.dict()['tur'] == 'kurlenme':\n veris = json.loads(request.POST.dict()['veri'])\n \"\"\"neval\n if not Uretim.objects.all():\n vsn = 1\n else:\n a = Uretim.objects.all().order_by('-vsn').values()[0]\n s = a['vsn']\n vsn = s + 1\n v = Valf(vsn=vsn, is_emri=veris[0])\n v.save(force_insert=True)\n e = Emir.objects.get(is_emri=veris[0])\n e.durum = 'Aktif'\n e.save()\n t = Uretim(tur='montaj_kurlenme' ,vsn = vsn, is_emri = veris[0] ,personel = request.user.get_full_name(),alt_nipel_no = veris[1],bakir_membran_no = veris[2],ust_nipel_no = veris[3],manometre_no = veris[4],basincanahtari_no = veris[5],montaj_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10))\n t.save(force_insert=True)\n return HttpResponse(str(vsn))\n \"\"\"\n print('deneme')\n is_emri_adi = veris[0]\n emir = Emir.objects.get(is_emri=is_emri_adi)\n personel_id = request.user.id\n alt_nipel_no = veris[1]\n bakir_membran_no = veris[2]\n ust_nipel_no = veris[3]\n manometre_no = veris[4]\n basincanahtari_no = veris[5]\n sibop = veris[6]\n print('deneme2')\n try:\n kayit_tarihi = timezone.now()\n valf_montaj = Valf_montaj(montaj_personel_id=personel_id,\n alt_nipel_no=alt_nipel_no, bakir_membran_no=\n bakir_membran_no, ust_nipel_no=ust_nipel_no,\n manometre_no=manometre_no, basincanahtari_no=\n basincanahtari_no, montaj_tarihi=kayit_tarihi, sibop=sibop)\n valf_montaj.save()\n valf = Valf(is_emri=emir, valf_montaj=valf_montaj)\n valf.save()\n return HttpResponse(str(valf.id))\n except Exception as err:\n print(' KAyıt HAstası > ', err)\n elif request.POST.dict()['tur'] == 'valftest':\n try:\n valf_seri_no = json.loads(request.POST.dict()['valf_seri_no'])\n uygun = json.loads(request.POST.dict()['uygun'])\n valf = Valf.objects.get(id=valf_seri_no)\n personel_id = User.objects.get(id=request.user.id)\n test_tarihi = timezone.now()\n acma = str(uygun)\n kapama = str(uygun)\n sebep = str(uygun)\n if uygun == True:\n sebep = None\n valf_test = Valf_test(test_personel=personel_id,\n test_tarihi=test_tarihi, uygun=uygun)\n valf_test.save()\n valf.valf_test = valf_test\n valf.save()\n except Exception as err:\n print(err)\n elif request.POST.dict()['tur'] == 'valfgovde':\n veri = json.loads(request.POST.dict()['veri'])\n \"\"\"neval\n v = Valf.objects.get(vsn=veri[3])\n is_emri = v.is_emri\n print('veri[5],sodyum miktarı:: ',veri[5] )\n t = Uretim.objects.get(vsn=veri[3])\n t.tur='govde_kurlenme'\n t.tork_degeri = veri[0]\n t.uygunluk = veri[1]\n t.sebep = veri[2]\n t.tsn = veri[4]\n t.personel = request.user.get_full_name()\n t.govde_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10)\n # t = Uretim(tur='valfgovde',tork_degeri = veri[0] ,is_emri=is_emri, uygunluk = veri[1] , sebep = veri[2],\n # vsn = veri[3],tsn = veri[4], personel = request.user.get_full_name(),govde_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10))\n t.save()\n \"\"\"\n valf_seri_no = veri[3]\n valf = Valf.objects.get(id=valf_seri_no)\n valf.durum = 'valf_govde'\n valf.save()\n personel_id = request.user.id\n kayit_tarihi = timezone.now()\n kurlenme_bitis = timezone.now() + timezone.timedelta(minutes=10)\n tork = veri[0]\n tup_seri_no = veri[4]\n sodyum_miktari = veri[5]\n uygunluk = veri[1]\n sebep = veri[2]\n if uygunluk == 'on':\n sebep = None\n valf_govde = Valf_govde(valf=valf, personel_id=personel_id,\n kayit_tarihi=kayit_tarihi, kurlenme_bitis=kurlenme_bitis,\n tork=tork, tup_seri_no=tup_seri_no, sodyum_miktari=\n sodyum_miktari, uygunluk=uygunluk, sebep=sebep)\n valf_govde.save()\n elif request.POST.dict()['tur'] == 'fm200':\n veri = json.loads(request.POST.dict()['veri'])\n \"\"\"neval\n v = Valf.objects.get(vsn=veri[4])\n is_emri = v.is_emri\n print(veri)\n t = Uretim.objects.get(vsn=veri[4])\n t.tur='fm200_kurlenme'\n t.bos_agirlik = veri[0]\n t.rekorlu_agirlik = veri[1]\n t.fm200 = veri[2]\n t.azot = veri[3]\n t.personel = request.user.get_full_name()\n t.fm200_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10) \n t.save()\n \"\"\"\n valf_seri_no = veri[4]\n valf = Valf.objects.get(id=valf_seri_no)\n valf.durum = 'valf_fm200'\n valf.save()\n personel_id = request.user.id\n kayit_tarihi = timezone.now()\n kurlenme_bitis = timezone.now() + timezone.timedelta(minutes=10)\n bos_agirlik = veri[0]\n rekorlu_agirlik = veri[1]\n fm200 = veri[2]\n azot = veri[3]\n valf_fm200 = Valf_fm200(valf=valf, personel_id=personel_id,\n kayit_tarihi=kayit_tarihi, kurlenme_bitis=kurlenme_bitis,\n bos_agirlik=bos_agirlik, rekorlu_agirlik=rekorlu_agirlik,\n fm200=fm200, azot=azot)\n valf_fm200.save()\n elif request.POST.dict()['tur'] == 'havuztest':\n veri = json.loads(request.POST.dict()['veri'])\n \"\"\"neval\n print(veri)\n v = Valf.objects.get(vsn=veri[0])\n is_emri = v.is_emri\n t = Uretim(tur='havuztest',vsn = veri[0],tsn = veri[0],is_emri=is_emri , uygunluk = veri[1] , \n acma = veri[2], kapatma = veri[3],sebep = veri[4], personel = request.user.get_full_name())\n t.save(force_insert=True)\n \"\"\"\n print('veri', veri)\n valf_seri_no = veri[0]\n valf = Valf.objects.get(id=valf_seri_no)\n valf.durum = 'valf_havuz_test'\n valf.save()\n personel_id = request.user.id\n kayit_tarihi = timezone.now()\n uygunluk = veri[1]\n tup_cidar_sicaklik = veri[2]\n tup_basinc = veri[3]\n sebep = veri[4]\n if uygunluk:\n sebep = None\n valf_havuz = Valf_havuz(valf=valf, personel_id=personel_id,\n kayit_tarihi=kayit_tarihi, tup_cidar_sicaklik=\n tup_cidar_sicaklik, tup_basinc=tup_basinc, uygunluk=\n uygunluk, sebep=sebep)\n valf_havuz.save()\n elif request.POST.dict()['tur'] == 'finalmontaj':\n veri = json.loads(request.POST.dict()['veri'])\n \"\"\"neval\n \n print(veri)\n v = Valf.objects.get(vsn=veri[1])\n is_emri = v.is_emri\n t = Uretim.objects.get(vsn=veri[1])\n t.tur='finalmontaj'\n t.etiket_seri_no = veri[0]\n t.fsn = veri[2]\n t.funye_seri_omaj = veri[3]\n t.basinc_anahtari_omaj = veri[4]\n t. personel = request.user.get_full_name()\n #t = Uretim(tur='finalmontaj',etiket_seri_no = veri[0],is_emri=is_emri , vsn = veri[1] , fsn = veri[2],\n # funye_seri_omaj = veri[3],basinc_anahtari_omaj = veri[4], personel = request.user.get_full_name())\n t.save()\n tup_sayisi_str=Emir.objects.filter(is_emri=is_emri).values()[0]['tup_sayisi']\n \"\"\"\n valf_seri_no = veri[1]\n valf = Valf.objects.get(id=valf_seri_no)\n valf.durum = 'valf_final_montaj'\n valf.save()\n personel_id = request.user.id\n kayit_tarihi = timezone.now()\n etiket_seri_no = veri[0]\n funye_seri_no = veri[2]\n funye_seri_omaj = veri[3]\n basinc_anahtari_omaj = veri[4]\n valf_final_montaj = Valf_final_montaj(valf=valf, personel_id=\n personel_id, kayit_tarihi=kayit_tarihi, etiket_seri_no=\n etiket_seri_no, funye_seri_no=funye_seri_no,\n funye_seri_omaj=funye_seri_omaj, basinc_anahtari_omaj=\n basinc_anahtari_omaj)\n valf_final_montaj.save()\n emir = Emir.objects.get(is_emri=valf.is_emri)\n emir_tup_sayisi = int(emir.tup_sayisi)\n emir_biten_valf_sayi = Valf.objects.filter(is_emri=emir, durum=\n 'valf_final_montaj').count()\n print('emir_biten_valf_sayi', emir_biten_valf_sayi)\n print('emir_tup_sayisi', emir_tup_sayisi)\n if emir_biten_valf_sayi == emir_tup_sayisi:\n emir.durum = 'Bitmiş'\n emir.save()\n b = Bildirim(tur='bitis', kisi=request.user.get_full_name())\n b.save(force_insert=True)\n now = timezone.now()\n montajkurlenmesi = Valf_montaj.objects.all()\n fm200kurlenmesi = Valf_fm200.objects.filter(\n fm200_kurlenme_bitis_tarihi__gte=now)\n acikemirleri = Emir.objects.filter(durum='Aktif').values()\n aktifemirler = Emir.objects.filter(durum='Aktif')\n govde_emir = list(dict.fromkeys(Valf.objects.filter(\n valf_govde_id__isnull=False).values_list('is_emri_id', flat=True)))\n fm200_emir = list(dict.fromkeys(Valf.objects.filter(\n fm200_azot_id__isnull=False).values_list('is_emri_id', flat=True)))\n return render(request, 'uretim-kontrol.html', {'grup': grup, 'birim':\n birim, 'ip': ip, 'now': now, 'server': server, 'acikemirleri':\n acikemirleri, 'fm200kurlenmes': fm200kurlenmesi, 'kurlenmes':\n montajkurlenmesi, 'aktifemirler': aktifemirler, 'govde_emir':\n govde_emir, 'fm200_emir': fm200_emir})\n\n\n<mask token>\n\n\ndef _403(request):\n return render(request, '403.html', {})\n\n\n<mask token>\n\n\n@csrf_exempt\ndef kullanicisil(request):\n username = request.POST.get('username')\n print(username)\n sildi = User.objects.filter(username=username).delete()\n if sildi:\n return HttpResponse('silindi')\n else:\n return HttpResponse('silinemedi')\n\n\n<mask token>\n\n\n@csrf_exempt\ndef personeldurum(request):\n p = request.POST.get('personel')\n g = request.POST.get('gun_sayisi')\n print(p, g)\n bugun = timezone.now()\n gun = int(request.POST.get('gun_sayisi'))\n kac_gun = bugun - timezone.timedelta(days=gun)\n veris = Test.objects.filter(test_tarihi__range=[kac_gun, bugun])\n veri = list()\n try:\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='manometre').filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='basinc').filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='altnipel').filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='ustnipel').filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='bakirmembran').filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='emniyet').filter(testi_yapan=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='kurlenme').filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='valftest').filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='valfgovde').filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='fm200').filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='havuztest').filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='finalmontaj').filter(personel=p).count())\n except:\n veri = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10]\n print(veri)\n return JsonResponse(veri, safe=False)\n\n\n<mask token>\n\n\n@csrf_exempt\ndef kontrolEt(request):\n if request.method == 'POST':\n tur = request.POST['tur']\n veri = request.POST['veri']\n isemri = request.POST['isemri']\n t = Test.objects.filter(tur=tur)\n r = 'NO'\n if tur == 'altnipel':\n t = Test.objects.filter(tur=tur)\n try:\n if int(veri) in t.values_list('lot_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'ustnipel':\n t = Test.objects.filter(tur=tur)\n try:\n if veri in t.values_list('baslangic_seri_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'manometre':\n t = Test.objects.filter(tur=tur)\n try:\n if veri in t.values_list('seri_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except Exception as e:\n print(e)\n r = 'NO'\n if tur == 'basinc':\n t = Test.objects.filter(tur=tur)\n try:\n if veri in t.values_list('seri_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'bakirmembran':\n t = Test.objects.filter(tur=tur)\n try:\n if int(veri) in t.values_list('lot_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'emniyet':\n t = Test.objects.filter(tur=tur)\n try:\n if veri in t.values_list('lot_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'valf_govde':\n try:\n valf_id = Valf.objects.filter(valf_montaj_id=veri).values_list(\n 'valf_test_id', flat=True).first()\n if isinstance(valf_id, int):\n Valf_test.objects.filter(id=valf_id).values_list('uygun',\n flat=True).first()\n if Valf_test.objects.filter(id=valf_id).values_list('uygun'\n , flat=True).first():\n r = 'OK'\n else:\n r = 'NO'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'sibop':\n print(tur, veri, t.values_list('lot_no', flat=True))\n t = Test.objects.filter(tur=tur)\n try:\n if int(veri) in t.values_list('lot_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n return HttpResponse(r)\n\n\n@csrf_exempt\ndef kurlenmeKontrol(request):\n if request.method == 'POST':\n r = 'NO'\n tur = request.POST['tur']\n vsn = request.POST['veri']\n print('kurlenmeKontrol', tur, vsn)\n if tur == 'montaj_kurlenme':\n try:\n u = Uretim.objects.filter(vsn=vsn)\n print(u, '---------------')\n if u.values()[0]['montaj_kurlenme_zamani'] < timezone.now():\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n elif tur == 'govde_kurlenme':\n try:\n u = Uretim.objects.filter(vsn=vsn)\n print('govde_kurlenme_zamani', u.values()[0][\n 'govde_kurlenme_zamani'])\n print('now', timezone.now())\n if u.values()[0]['govde_kurlenme_zamani'] < timezone.now():\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n elif tur == 'valf_test':\n print('içerdeyim-----> Valf Test')\n try:\n print(vsn, '----------------------------')\n valf_montaj_id = Valf.objects.filter(id=vsn).first(\n ).valf_montaj_id\n print(valf_montaj_id)\n tarih = Valf_montaj.objects.filter(id=valf_montaj_id).first(\n ).kurlenme_bitis_tarihi\n print(tarih)\n print(type(timezone.now()), timezone.now())\n print(type(tarih), tarih)\n if tarih < timezone.now():\n print('büyüktür')\n r = 'OK'\n else:\n print('küçük')\n r = 'NO'\n except Exception as err:\n print('r', err)\n r = 'NO'\n elif tur == 'pdfkontrol':\n print(vsn)\n try:\n if Valf.objects.filter(valf_montaj_id=vsn).count():\n r = 'OK'\n else:\n r = 'NO'\n except Exception as err:\n r = 'NO'\n print(err)\n return HttpResponse(r)\n\n\n@csrf_exempt\ndef newVSN(request):\n if request.method == 'POST':\n vsn = ''\n if not Uretim.objects.all():\n vsn = 1\n else:\n a = Uretim.objects.all().order_by('-vsn').values()[0]\n s = a['vsn']\n print('sssss', s)\n vsn = s + 1\n print(vsn)\n r = str(vsn)\n return HttpResponse(r)\n\n\n@csrf_exempt\ndef hardreset(request):\n print('Hard')\n",
"step-2": "<mask token>\n\n\ndef bildirim(request):\n bugun = timezone.now()\n birGunOnce = bugun - timezone.timedelta(days=14)\n bildirimq = Bildirim.objects.filter(zaman__range=[birGunOnce, bugun])\n temp = []\n for o in bildirimq.values():\n temp.append(o)\n bildirims = list(temp)\n print(bildirims)\n return JsonResponse(bildirims, safe=False)\n\n\n@login_required\ndef index(request):\n grup = request.user.grup\n birim = request.user.birim\n emirler = Emir.objects.filter(durum='Aktif')\n l = list()\n for e in emirler.values():\n data = dict()\n data['is_emri'] = e['is_emri']\n data['valfmontaj'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n valf_montaj_id__isnull=False).values_list('valf_montaj_id',\n flat=True).count() or 0\n data['valftest'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n valf_test_id__isnull=False).values_list('valf_test_id', flat=True\n ).count() or 0\n data['valfgovde'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n valf_govde_id__isnull=False).values_list('valf_govde_id', flat=True\n ).count() or 0\n data['fm200'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n fm200_azot_id__isnull=False).values_list('fm200_azot_id', flat=True\n ).count() or 0\n data['havuztest'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n havuz_id__isnull=False).values_list('havuz_id', flat=True).count(\n ) or 0\n data['finalmontaj'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n valf_final_montaj_id__isnull=False).values_list(\n 'valf_final_montaj_id', flat=True).count() or 0\n l.append(data)\n print(l)\n return render(request, 'index.html', {'grup': grup, 'emirler': emirler,\n 'birim': birim, 'server': server, 'uretims': l})\n\n\n@login_required\ndef arama(request):\n mac = request.user_agent.os.family\n q = request.GET.get('q') or request.GET.get('uretim')\n emir = request.GET.get('emir')\n emirs = Emir.objects.all()\n media_url = settings.MEDIA_URL\n aranan = ''\n if q:\n aranan = q\n elif emir:\n aranan = 'isemri'\n else:\n print('bos')\n grup = request.user.grup\n birim = request.user.birim\n testler = Test.objects.filter(tur=q)\n print(q)\n if q == 'valfmontaj':\n uretims = Valf_montaj.objects.all()\n elif q == 'valfgovde':\n uretims = Valf_govde.objects.all()\n elif q == 'fm200':\n uretims = Valf_fm200.objects.all()\n elif q == 'havuztest':\n uretims = Valf_havuz.objects.all()\n elif q == 'finalmontaj':\n uretims = Valf_final_montaj.objects.all()\n else:\n uretims = Uretim.objects.filter(tur=q)\n print(uretims)\n if emir == 'tumu':\n emirler = Emir.objects.all()\n else:\n emirler = Emir.objects.filter(is_emri=emir)\n return render(request, 'arama.html', {'mac': mac, 'testler': testler,\n 'grup': grup, 'emirler': emirler, 'aranan': aranan, 'emirs': emirs,\n 'birim': birim, 'media_url': media_url, 'uretims': uretims,\n 'server': server})\n\n\n@login_required\n@csrf_exempt\ndef giriskalite(request):\n mac = request.user_agent.os.family\n grup = request.user.grup\n birim = request.user.birim\n fullname = request.user.first_name + ' ' + request.user.last_name\n if request.method == 'POST':\n if request.POST.dict()['tur'] == 'basinc':\n veris = json.loads(request.POST.dict()['veri'])\n for veri in veris:\n t = Test(tur='basinc', seri_no=veri[0], acma=veri[1],\n kapatma=veri[2], kabul_durumu=veri[3], testi_yapan=fullname\n )\n t.save(force_insert=True)\n elif request.POST.dict()['tur'] == 'manometre':\n veris = json.loads(request.POST.dict()['veri'])\n for veri in veris:\n t = Test(tur='manometre', seri_no=veri[0], okunan_deger=\n veri[1], kabul_durumu=veri[2], testi_yapan=fullname)\n t.save(force_insert=True)\n elif request.POST.dict()['tur'] == 'altnipel':\n print(request.POST)\n kontrolResult = nipelSeriNoKontrol(request)\n if kontrolResult == True:\n if request.FILES:\n upload_file = request.FILES['file']\n fs = FileSystemStorage()\n fs.save(upload_file.name, upload_file)\n next_lot_no = getNextLotNo(request.POST.dict()['tur'])\n t = Test(tur='altnipel', lot_no=next_lot_no, pdf_ismi=\n request.POST.get('pdf_ismi'), baslangic_seri_no=request\n .POST.get('baslangic_seri_no'), bitis_seri_no=request.\n POST.get('bitis_seri_no'), kabul_durumu=request.POST.\n get('kabulAlt'), testi_yapan=fullname)\n t.save(force_insert=True)\n messages.success(request,\n 'Alt nipel testi başarıyla kaydedildi.')\n elif request.POST.dict()['tur'] == 'ustnipel':\n print(request.POST)\n kontrolResult = nipelSeriNoKontrol(request)\n if kontrolResult == True:\n if request.FILES:\n upload_file = request.FILES['file']\n fs = FileSystemStorage()\n fs.save(upload_file.name, upload_file)\n next_lot_no = getNextLotNo(request.POST.dict()['tur'])\n t = Test(tur='ustnipel', lot_no=next_lot_no, pdf_ismi=\n request.POST.get('pdf_ismi'), baslangic_seri_no=request\n .POST.get('baslangic_seri_no'), bitis_seri_no=request.\n POST.get('bitis_seri_no'), kabul_durumu=request.POST.\n get('kabulUst'), testi_yapan=fullname)\n t.save(force_insert=True)\n messages.success(request,\n 'Üst nipel testi başarıyla kaydedildi.')\n elif request.POST.dict()['tur'] == 'bakirmembran':\n print(request.POST)\n next_lot_no = getNextLotNo(request.POST.get('test_tur'))\n if request.FILES:\n upload_file = request.FILES['file']\n fs = FileSystemStorage()\n fs.save(upload_file.name, upload_file)\n t = Test(tur=request.POST.get('test_tur'), lot_no=next_lot_no,\n pdf_ismi=request.POST.get('pdf_ismi'), test_basinci=request\n .POST.get('test_basinci'), patlama_basinci=request.POST.get\n ('patlama_basinci'), kabul_durumu=request.POST.get(\n 'kabulBak'), testi_yapan=fullname)\n t.save(force_insert=True)\n if request.POST.get('test_tur') == 'bakirmembran':\n messages.success(request,\n 'Bakır membran testi başarıyla kaydedildi.')\n else:\n messages.success(request,\n 'Emniyet ventili testi başarıyla kaydedildi.')\n \"\"\"\n elif request.POST.get('tur') == 'emniyet':\n print(request.POST)\n if request.FILES:\n upload_file = request.FILES['file']\n fs = FileSystemStorage()\n fs.save(upload_file.name,upload_file)\n \n next_lot_no = getNextLotNo( request.POST.dict()['tur'])\n t = Test(tur='emniyet',lot_no =next_lot_no, pdf_ismi = request.POST.get('pdf_ismi') ,test_basinci = request.POST.get('test_basinci'), patlama_basinci = request.POST.get('patlama_basinci'),kabul_durumu = request.POST.get('kabulEmn'),testi_yapan = fullname)\n t.save(force_insert=True)\n messages.success(request,'Emniyet ventili testi başarıyla kaydedildi.')\n \"\"\"\n return render(request, 'giris-kalite-kontrol.html', {'mac': mac, 'grup':\n grup, 'birim': birim, 'server': server})\n\n\ndef getNextLotNo(tur):\n test_with_max_lot_no = Test.objects.filter(tur=tur).order_by('-lot_no'\n ).first()\n if test_with_max_lot_no == None:\n max_lot_no = 0\n else:\n max_lot_no = test_with_max_lot_no.lot_no\n return max_lot_no + 1\n\n\n<mask token>\n\n\n@login_required\n@csrf_exempt\ndef uretimkontrol(request):\n mac = request.user_agent.os.family\n ip = get_client_ip(request)\n ip == '192.168.1.36'\n grup = request.user.grup\n birim = request.user.birim\n fullname = request.user.first_name + ' ' + request.user.last_name\n if request.method == 'POST':\n if request.POST.dict()['tur'] == 'valfmontaj':\n veris = json.loads(request.POST.dict()['veri'])\n print(veris)\n t = Uretim(tur='valfmontaj', okunan_deger=veris[0], personel=\n request.user.get_full_name())\n t.save(force_insert=True)\n b = Bildirim(tur='baslangic', kisi=request.user.get_full_name())\n b.save(force_insert=True)\n elif request.POST.dict()['tur'] == 'kurlenme':\n veris = json.loads(request.POST.dict()['veri'])\n \"\"\"neval\n if not Uretim.objects.all():\n vsn = 1\n else:\n a = Uretim.objects.all().order_by('-vsn').values()[0]\n s = a['vsn']\n vsn = s + 1\n v = Valf(vsn=vsn, is_emri=veris[0])\n v.save(force_insert=True)\n e = Emir.objects.get(is_emri=veris[0])\n e.durum = 'Aktif'\n e.save()\n t = Uretim(tur='montaj_kurlenme' ,vsn = vsn, is_emri = veris[0] ,personel = request.user.get_full_name(),alt_nipel_no = veris[1],bakir_membran_no = veris[2],ust_nipel_no = veris[3],manometre_no = veris[4],basincanahtari_no = veris[5],montaj_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10))\n t.save(force_insert=True)\n return HttpResponse(str(vsn))\n \"\"\"\n print('deneme')\n is_emri_adi = veris[0]\n emir = Emir.objects.get(is_emri=is_emri_adi)\n personel_id = request.user.id\n alt_nipel_no = veris[1]\n bakir_membran_no = veris[2]\n ust_nipel_no = veris[3]\n manometre_no = veris[4]\n basincanahtari_no = veris[5]\n sibop = veris[6]\n print('deneme2')\n try:\n kayit_tarihi = timezone.now()\n valf_montaj = Valf_montaj(montaj_personel_id=personel_id,\n alt_nipel_no=alt_nipel_no, bakir_membran_no=\n bakir_membran_no, ust_nipel_no=ust_nipel_no,\n manometre_no=manometre_no, basincanahtari_no=\n basincanahtari_no, montaj_tarihi=kayit_tarihi, sibop=sibop)\n valf_montaj.save()\n valf = Valf(is_emri=emir, valf_montaj=valf_montaj)\n valf.save()\n return HttpResponse(str(valf.id))\n except Exception as err:\n print(' KAyıt HAstası > ', err)\n elif request.POST.dict()['tur'] == 'valftest':\n try:\n valf_seri_no = json.loads(request.POST.dict()['valf_seri_no'])\n uygun = json.loads(request.POST.dict()['uygun'])\n valf = Valf.objects.get(id=valf_seri_no)\n personel_id = User.objects.get(id=request.user.id)\n test_tarihi = timezone.now()\n acma = str(uygun)\n kapama = str(uygun)\n sebep = str(uygun)\n if uygun == True:\n sebep = None\n valf_test = Valf_test(test_personel=personel_id,\n test_tarihi=test_tarihi, uygun=uygun)\n valf_test.save()\n valf.valf_test = valf_test\n valf.save()\n except Exception as err:\n print(err)\n elif request.POST.dict()['tur'] == 'valfgovde':\n veri = json.loads(request.POST.dict()['veri'])\n \"\"\"neval\n v = Valf.objects.get(vsn=veri[3])\n is_emri = v.is_emri\n print('veri[5],sodyum miktarı:: ',veri[5] )\n t = Uretim.objects.get(vsn=veri[3])\n t.tur='govde_kurlenme'\n t.tork_degeri = veri[0]\n t.uygunluk = veri[1]\n t.sebep = veri[2]\n t.tsn = veri[4]\n t.personel = request.user.get_full_name()\n t.govde_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10)\n # t = Uretim(tur='valfgovde',tork_degeri = veri[0] ,is_emri=is_emri, uygunluk = veri[1] , sebep = veri[2],\n # vsn = veri[3],tsn = veri[4], personel = request.user.get_full_name(),govde_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10))\n t.save()\n \"\"\"\n valf_seri_no = veri[3]\n valf = Valf.objects.get(id=valf_seri_no)\n valf.durum = 'valf_govde'\n valf.save()\n personel_id = request.user.id\n kayit_tarihi = timezone.now()\n kurlenme_bitis = timezone.now() + timezone.timedelta(minutes=10)\n tork = veri[0]\n tup_seri_no = veri[4]\n sodyum_miktari = veri[5]\n uygunluk = veri[1]\n sebep = veri[2]\n if uygunluk == 'on':\n sebep = None\n valf_govde = Valf_govde(valf=valf, personel_id=personel_id,\n kayit_tarihi=kayit_tarihi, kurlenme_bitis=kurlenme_bitis,\n tork=tork, tup_seri_no=tup_seri_no, sodyum_miktari=\n sodyum_miktari, uygunluk=uygunluk, sebep=sebep)\n valf_govde.save()\n elif request.POST.dict()['tur'] == 'fm200':\n veri = json.loads(request.POST.dict()['veri'])\n \"\"\"neval\n v = Valf.objects.get(vsn=veri[4])\n is_emri = v.is_emri\n print(veri)\n t = Uretim.objects.get(vsn=veri[4])\n t.tur='fm200_kurlenme'\n t.bos_agirlik = veri[0]\n t.rekorlu_agirlik = veri[1]\n t.fm200 = veri[2]\n t.azot = veri[3]\n t.personel = request.user.get_full_name()\n t.fm200_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10) \n t.save()\n \"\"\"\n valf_seri_no = veri[4]\n valf = Valf.objects.get(id=valf_seri_no)\n valf.durum = 'valf_fm200'\n valf.save()\n personel_id = request.user.id\n kayit_tarihi = timezone.now()\n kurlenme_bitis = timezone.now() + timezone.timedelta(minutes=10)\n bos_agirlik = veri[0]\n rekorlu_agirlik = veri[1]\n fm200 = veri[2]\n azot = veri[3]\n valf_fm200 = Valf_fm200(valf=valf, personel_id=personel_id,\n kayit_tarihi=kayit_tarihi, kurlenme_bitis=kurlenme_bitis,\n bos_agirlik=bos_agirlik, rekorlu_agirlik=rekorlu_agirlik,\n fm200=fm200, azot=azot)\n valf_fm200.save()\n elif request.POST.dict()['tur'] == 'havuztest':\n veri = json.loads(request.POST.dict()['veri'])\n \"\"\"neval\n print(veri)\n v = Valf.objects.get(vsn=veri[0])\n is_emri = v.is_emri\n t = Uretim(tur='havuztest',vsn = veri[0],tsn = veri[0],is_emri=is_emri , uygunluk = veri[1] , \n acma = veri[2], kapatma = veri[3],sebep = veri[4], personel = request.user.get_full_name())\n t.save(force_insert=True)\n \"\"\"\n print('veri', veri)\n valf_seri_no = veri[0]\n valf = Valf.objects.get(id=valf_seri_no)\n valf.durum = 'valf_havuz_test'\n valf.save()\n personel_id = request.user.id\n kayit_tarihi = timezone.now()\n uygunluk = veri[1]\n tup_cidar_sicaklik = veri[2]\n tup_basinc = veri[3]\n sebep = veri[4]\n if uygunluk:\n sebep = None\n valf_havuz = Valf_havuz(valf=valf, personel_id=personel_id,\n kayit_tarihi=kayit_tarihi, tup_cidar_sicaklik=\n tup_cidar_sicaklik, tup_basinc=tup_basinc, uygunluk=\n uygunluk, sebep=sebep)\n valf_havuz.save()\n elif request.POST.dict()['tur'] == 'finalmontaj':\n veri = json.loads(request.POST.dict()['veri'])\n \"\"\"neval\n \n print(veri)\n v = Valf.objects.get(vsn=veri[1])\n is_emri = v.is_emri\n t = Uretim.objects.get(vsn=veri[1])\n t.tur='finalmontaj'\n t.etiket_seri_no = veri[0]\n t.fsn = veri[2]\n t.funye_seri_omaj = veri[3]\n t.basinc_anahtari_omaj = veri[4]\n t. personel = request.user.get_full_name()\n #t = Uretim(tur='finalmontaj',etiket_seri_no = veri[0],is_emri=is_emri , vsn = veri[1] , fsn = veri[2],\n # funye_seri_omaj = veri[3],basinc_anahtari_omaj = veri[4], personel = request.user.get_full_name())\n t.save()\n tup_sayisi_str=Emir.objects.filter(is_emri=is_emri).values()[0]['tup_sayisi']\n \"\"\"\n valf_seri_no = veri[1]\n valf = Valf.objects.get(id=valf_seri_no)\n valf.durum = 'valf_final_montaj'\n valf.save()\n personel_id = request.user.id\n kayit_tarihi = timezone.now()\n etiket_seri_no = veri[0]\n funye_seri_no = veri[2]\n funye_seri_omaj = veri[3]\n basinc_anahtari_omaj = veri[4]\n valf_final_montaj = Valf_final_montaj(valf=valf, personel_id=\n personel_id, kayit_tarihi=kayit_tarihi, etiket_seri_no=\n etiket_seri_no, funye_seri_no=funye_seri_no,\n funye_seri_omaj=funye_seri_omaj, basinc_anahtari_omaj=\n basinc_anahtari_omaj)\n valf_final_montaj.save()\n emir = Emir.objects.get(is_emri=valf.is_emri)\n emir_tup_sayisi = int(emir.tup_sayisi)\n emir_biten_valf_sayi = Valf.objects.filter(is_emri=emir, durum=\n 'valf_final_montaj').count()\n print('emir_biten_valf_sayi', emir_biten_valf_sayi)\n print('emir_tup_sayisi', emir_tup_sayisi)\n if emir_biten_valf_sayi == emir_tup_sayisi:\n emir.durum = 'Bitmiş'\n emir.save()\n b = Bildirim(tur='bitis', kisi=request.user.get_full_name())\n b.save(force_insert=True)\n now = timezone.now()\n montajkurlenmesi = Valf_montaj.objects.all()\n fm200kurlenmesi = Valf_fm200.objects.filter(\n fm200_kurlenme_bitis_tarihi__gte=now)\n acikemirleri = Emir.objects.filter(durum='Aktif').values()\n aktifemirler = Emir.objects.filter(durum='Aktif')\n govde_emir = list(dict.fromkeys(Valf.objects.filter(\n valf_govde_id__isnull=False).values_list('is_emri_id', flat=True)))\n fm200_emir = list(dict.fromkeys(Valf.objects.filter(\n fm200_azot_id__isnull=False).values_list('is_emri_id', flat=True)))\n return render(request, 'uretim-kontrol.html', {'grup': grup, 'birim':\n birim, 'ip': ip, 'now': now, 'server': server, 'acikemirleri':\n acikemirleri, 'fm200kurlenmes': fm200kurlenmesi, 'kurlenmes':\n montajkurlenmesi, 'aktifemirler': aktifemirler, 'govde_emir':\n govde_emir, 'fm200_emir': fm200_emir})\n\n\n@csrf_exempt\ndef acikisemirleri(request):\n emirler = Emir.objects.filter(durum__in=('Aktif', 'Başlanmamış'))\n temp = []\n for o in emirler.values():\n temp.append(o['is_emri'])\n veri = list(temp)\n\n\n@login_required\n@csrf_exempt\ndef isemri(request):\n mac = request.user_agent.os.family\n grup = request.user.grup\n birim = request.user.birim\n fullname = request.user.first_name + ' ' + request.user.last_name\n emirler = Emir.objects.all()\n form = IsEmri(request.POST)\n if request.method == 'POST':\n if 'tur' in request.POST.dict():\n if request.POST.dict()['tur'] == 'oncelik':\n veri = json.loads(request.POST.dict()['veri'])\n print(veri)\n for key in veri:\n em = Emir.objects.get(is_emri=key)\n em.oncelik = veri[key]\n em.save()\n o = Bildirim(tur='oncelik')\n o.save()\n return HttpResponse('onceliktamam')\n elif form.is_valid():\n if not Emir.objects.all():\n son_oncelik = 1\n else:\n a = Emir.objects.all().order_by('-oncelik').values()[0]\n s = a['oncelik']\n son_oncelik = s + 1\n emir = form.save()\n emir.refresh_from_db()\n emir.is_emri = form.cleaned_data.get('is_emri')\n emir.urun_kodu = form.cleaned_data.get('urun_kodu')\n emir.baslangic = form.cleaned_data.get('baslangic')\n emir.bitis = form.cleaned_data.get('bitis')\n emir.emri_veren = form.cleaned_data.get('emri_veren')\n emir.tup_govde_turu = form.cleaned_data.get('tup_govde_turu')\n emir.valf_turu = form.cleaned_data.get('valf_turu')\n emir.renk = form.cleaned_data.get('renk')\n emir.emniyet_ventil_turu = form.cleaned_data.get(\n 'emniyet_ventil_turu')\n emir.siparis = form.cleaned_data.get('siparis')\n emir.fm200bosagirlikmindeger = form.cleaned_data.get(\n 'fm200bosagirlikmindeger')\n emir.fm200bosagirlikmaxdeger = form.cleaned_data.get(\n 'fm200bosagirlikmaxdeger')\n emir.fm200dolummiktarimindeger = form.cleaned_data.get(\n 'fm200dolummiktarimindeger')\n emir.fm200dolummiktarimaxdeger = form.cleaned_data.get(\n 'fm200dolummiktarimaxdeger')\n t = Bildirim(tur='is emri', emri_veren_grup=grup, emri_veren=\n request.user.get_full_name(), is_emri=form.cleaned_data.get\n ('is_emri'))\n t.save(force_insert=True)\n emir.oncelik = son_oncelik\n messages.success(request, 'Emir başarıyla eklendi!')\n emir.save()\n form.full_clean()\n return HttpResponseRedirect(reverse('isemri'))\n else:\n messages.warning(request,\n 'İş emri eklenemedi.Lütfen tekrar deneyin!Hata: {}'.format(\n form.errors))\n else:\n form = IsEmri()\n form.fields['emri_veren'].initial = fullname\n return render(request, 'is-emri.html', {'form': form, 'emirler':\n emirler, 'mac': mac, 'fullname': fullname, 'grup': grup, 'birim':\n birim, 'server': server})\n\n\ndef yetkilendirme(request):\n mac = request.user_agent.os.family\n grup = 'Yönetici'\n birim = 'IT'\n kullanicilar = User.objects.all()\n if (grup == 'Yönetici' and birim == 'IT' or grup == 'Mühendis' and \n birim == 'IT'):\n if request.method == 'POST':\n form = UserRegisterForm(request.POST)\n if form.is_valid():\n user = form.save()\n user.refresh_from_db()\n user.first_name = form.cleaned_data.get('first_name')\n user.last_name = form.cleaned_data.get('last_name')\n user.grup = form.cleaned_data.get('grup')\n user.save()\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password1')\n messages.success(request,\n '{} isimli kullanıcı {} isimli gruba eklendi!'.format(\n username, user.grup))\n return HttpResponseRedirect(reverse('yetkilendirme'))\n else:\n print(form.errors)\n else:\n form = UserRegisterForm()\n return render(request, 'kullanici-yetkilendirme.html', {'form':\n form, 'kullanicilar': kullanicilar, 'mac': mac, 'grup': grup,\n 'birim': birim, 'server': server})\n else:\n return HttpResponseRedirect(reverse('403'))\n\n\n@login_required\ndef performans(request):\n mac = request.user_agent.os.family\n grup = request.user.grup\n birim = request.user.birim\n kullanicilar = User.objects.all()\n return render(request, 'performans.html', {'mac': mac, 'grup': grup,\n 'birim': birim, 'kullanicilar': kullanicilar, 'server': server})\n\n\n<mask token>\n\n\n@login_required\ndef ulogout(request):\n logout(request)\n return HttpResponseRedirect(reverse('ulogin'))\n\n\n@csrf_exempt\ndef ulogin(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n login(request, user)\n print('{} kullanıcısı tarafından başarılı giriş'.format(\n username))\n return redirect('arama')\n else:\n messages.warning(request,\n 'Kullanıcı adınızı yada parolanızı yanlış girdiniz.')\n else:\n print('Birisi login olmayı denedi ve başarısız oldu!')\n messages.warning(request,\n 'Kullanıcı adınızı yada parolanızı yanlış girdiniz.')\n return HttpResponseRedirect(reverse('ulogin'))\n else:\n return render(request, 'login.html', {})\n\n\ndef _403(request):\n return render(request, '403.html', {})\n\n\ndef handler404(request, exception):\n return render(request, '403.html', status=404)\n\n\n@csrf_exempt\ndef kullanicijson(request):\n username = request.POST.get('username')\n b = User.objects.filter(username=username).values('first_name',\n 'last_name', 'username', 'grup')\n veri = list(b)\n return JsonResponse(veri, safe=False)\n\n\n@csrf_exempt\ndef kullanicisil(request):\n username = request.POST.get('username')\n print(username)\n sildi = User.objects.filter(username=username).delete()\n if sildi:\n return HttpResponse('silindi')\n else:\n return HttpResponse('silinemedi')\n\n\n@csrf_exempt\ndef kullaniciduzelt(request):\n veri = request.POST.get('bilgi')\n veri = json.loads(veri)\n a = User.objects.get(username=veri['eskisi'])\n a.username = veri['username']\n a.first_name = veri['first_name']\n a.last_name = veri['last_name']\n a.grup = veri['grup']\n a.birim = veri['birim']\n a.save()\n return HttpResponse('duzeltildi')\n\n\n<mask token>\n\n\n@csrf_exempt\ndef pdf(request):\n if request.GET.get('qr'):\n qr = request.GET.get('qr')\n print(qr.split(' ')[0])\n i = qr.split(' ')[0]\n print('---------------------')\n valf_no = request.GET.get('vsn')\n Valf_montaj_Data = Valf_montaj.objects.filter(id=Valf.objects.filter(id\n =valf_no).first().valf_montaj_id).first()\n Valf_fm200_Data = Valf_fm200.objects.filter(id=Valf.objects.filter(id=\n valf_no).first().fm200_azot_id).first()\n Valf_havuz_Data = Valf_havuz.objects.filter(id=Valf.objects.filter(id=\n valf_no).first().havuz_id).first()\n Valf_final_Data = Valf_final_montaj.objects.filter(id=Valf.objects.\n filter(id=valf_no).first().valf_final_montaj_id).first()\n Valf_test_Data = Valf_test.objects.filter(id=Valf.objects.filter(id=\n valf_no).first().valf_test_id).first()\n Valf_govde_Data = Valf_govde.objects.filter(id=Valf.objects.filter(id=\n valf_no).first().valf_govde_id).first()\n Emir_Data = Emir.objects.filter(is_emri=i).first()\n valf_final = Valf.objects.filter(id=valf_no).values_list(\n 'valf_final_montaj_id', flat=True).first()\n urun_seri_no = Valf_final_montaj.objects.filter(id=valf_final).values_list(\n 'urun_seri_no', flat=True).first()\n print('---------------------')\n try:\n valfmontajPersonel = get_first_and_lastname(User.objects.filter(id=\n Valf_montaj_Data.montaj_personel_id).first().username)\n except:\n valfmontajPersonel = ''\n try:\n valfmontajTarih = Valf_montaj_Data.montaj_tarihi\n except:\n valfmontajTarih = ''\n try:\n altnipelno = Valf_montaj_Data.alt_nipel_no\n except:\n altnipelno = ''\n try:\n ustnipelno = Valf_montaj_Data.ust_nipel_no\n except:\n ustnipelno = ''\n try:\n switchno = Valf_montaj_Data.basincanahtari_no\n except:\n switchno = ''\n try:\n manometreno = Valf_montaj_Data.manometre_no\n except:\n manometreno = ''\n try:\n valftestPersonel = get_first_and_lastname(User.objects.filter(id=\n Valf_test_Data.test_personel_id).first().username)\n except:\n valftestPersonel = ''\n try:\n valftestTarih = Valf_test_Data.test_tarihi\n except:\n valftestTarih = ''\n try:\n valfTestUygun = ('Uygun' if Valf_test_Data.uygun == True else\n 'Uygun Değil')\n except:\n valfTestUygun = Valf_test_Data.uygun\n try:\n valfgovdePersonel = get_first_and_lastname(User.objects.filter(id=\n Valf_govde_Data.govde_personel_id).first().username)\n except:\n valfgovdePersonel = ''\n try:\n valfgovdeTarih = Valf_govde_Data.govde_tarihi\n except:\n valfgovdeTarih = ''\n try:\n valfGovdeUygun = ('Uygun' if Valf_govde_Data.uygunluk == True else\n 'Uygun Değil')\n except:\n valfGovdeUygun = ''\n try:\n fm200Personel = get_first_and_lastname(User.objects.filter(id=\n Valf_fm200_Data.fm200_personel_id).first().username)\n except:\n fm200Personel = ''\n try:\n fm200Tarih = Valf_fm200_Data.kayit_tarihi\n except:\n fm200Tarih = ''\n try:\n bosAgirlik = Valf_fm200_Data.bos_agirlik\n except:\n bosAgirlik = ''\n try:\n doluAgirlik = Valf_fm200_Data.dolu_agirlik\n except:\n doluAgirlik = ''\n try:\n bar = Valf_fm200_Data.bar\n except:\n bar = ''\n try:\n havuztestPersonel = get_first_and_lastname(User.objects.filter(id=\n Valf_havuz_Data.havuz_personel_id).first().username)\n except:\n havuztestPersonel = ''\n try:\n havuztestTarih = Valf_havuz_Data.kayit_tarihi\n except:\n havuztestTarih = ''\n try:\n havuzTestUygun = ('Uygun' if Valf_havuz_Data.uygunluk == True else\n 'Uygun Değil')\n except:\n havuzTestUygun = ''\n try:\n finalmontajPersonel = get_first_and_lastname(User.objects.filter(id\n =Valf_final_Data.personel_id).first().username)\n except:\n finalmontajPersonel = ''\n try:\n finalmontajTarih = Valf_final_Data.kayit_tarihi\n except:\n finalmontajTarih = ''\n try:\n membranTipi = Emir_Data.valf_turu\n except:\n membranTipi = ''\n try:\n ventilTipi = Emir_Data.emniyet_ventil_turu\n except:\n ventilTipi = ''\n try:\n tugovdetipi = Emir_Data.tup_govde_turu\n except:\n tugovdetipi = ''\n try:\n siboplotno = Valf_montaj_Data.sibop\n except:\n siboplotno = ''\n print(valftestPersonel, Emir_Data.emniyet_ventil_turu)\n veri = 'veri'\n html_string = render_to_string('external/pdf-template.html', {'veri':\n veri, 'qr': urun_seri_no, 'valfmontajPersonel': valfmontajPersonel,\n 'valfmontajTarih': valfmontajTarih, 'valfgovdePersonel':\n valfgovdePersonel, 'valftestPersonel': valftestPersonel,\n 'valftestTarih': valftestTarih, 'valfTestUygun': valfTestUygun,\n 'havuzTestUygun': havuzTestUygun, 'valfgovdePersonel':\n valftestPersonel, 'valfgovdeTarih': valfgovdeTarih,\n 'valfGovdeUygun': valfGovdeUygun, 'valfMontajUygun': 'Uygun*',\n 'fm200Uygun': 'Uygun*', 'finalMontajUygun': 'Uygun*',\n 'fm200Personel': fm200Personel, 'fm200Tarih': fm200Tarih,\n 'bosAgirlik': bosAgirlik, 'doluAgirlik': doluAgirlik,\n 'havuztestPersonel': havuztestPersonel, 'havuztestTarih':\n havuztestTarih, 'finalmontajPersonel': finalmontajPersonel,\n 'finalmontajTarih': finalmontajTarih, 'altnipelno': altnipelno,\n 'ustnipelno': ustnipelno, 'switchno': switchno, 'manometreno':\n manometreno, 'is_emri': i, 'membranTipi': membranTipi, 'ventilTipi':\n ventilTipi, 'urunserino': urun_seri_no, 'bar': bar, 'tugovdetipi':\n tugovdetipi, 'siboplotno': siboplotno}, request=request)\n html = HTML(string=html_string, base_url=request.build_absolute_uri())\n html.write_pdf(target='/tmp/' + qr + '.pdf')\n fs = FileSystemStorage('/tmp/')\n with fs.open(qr + '.pdf') as pdf:\n response = HttpResponse(pdf, content_type='application/pdf')\n response['Content-Disposition'] = 'inline; filename=\"pdf.pdf\"'\n return response\n return response\n\n\n@csrf_exempt\ndef dashboard(request):\n bugun = timezone.now()\n print(request.POST.get('gun_sayisi'))\n gun = int(request.POST.get('gun_sayisi'))\n kac_gun = bugun - timezone.timedelta(days=gun)\n veris = Test.objects.filter(test_tarihi__range=[kac_gun, bugun])\n temp = []\n for o in veris.values():\n temp.append(o)\n veri = list(temp)\n print('dashboard', veri)\n return JsonResponse(veri, safe=False)\n\n\n<mask token>\n\n\n@csrf_exempt\ndef personeldurum(request):\n p = request.POST.get('personel')\n g = request.POST.get('gun_sayisi')\n print(p, g)\n bugun = timezone.now()\n gun = int(request.POST.get('gun_sayisi'))\n kac_gun = bugun - timezone.timedelta(days=gun)\n veris = Test.objects.filter(test_tarihi__range=[kac_gun, bugun])\n veri = list()\n try:\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='manometre').filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='basinc').filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='altnipel').filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='ustnipel').filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='bakirmembran').filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='emniyet').filter(testi_yapan=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='kurlenme').filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='valftest').filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='valfgovde').filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='fm200').filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='havuztest').filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='finalmontaj').filter(personel=p).count())\n except:\n veri = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10]\n print(veri)\n return JsonResponse(veri, safe=False)\n\n\n@csrf_exempt\ndef tupTuru(request):\n if request.method == 'POST':\n try:\n u = Emir.objects.filter(is_emri=request.POST.dict()['is_emri']\n ).first()\n bos_agirlik_miktari = u.bos_agirlik_miktari\n fm200_miktari = u.fm200_miktari\n renk = u.renk\n response = bos_agirlik_miktari + ';' + fm200_miktari + ';' + renk\n return HttpResponse(str(response))\n except e:\n print(e)\n return str('tur')\n\n\n<mask token>\n\n\n@csrf_exempt\ndef kontrolEt(request):\n if request.method == 'POST':\n tur = request.POST['tur']\n veri = request.POST['veri']\n isemri = request.POST['isemri']\n t = Test.objects.filter(tur=tur)\n r = 'NO'\n if tur == 'altnipel':\n t = Test.objects.filter(tur=tur)\n try:\n if int(veri) in t.values_list('lot_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'ustnipel':\n t = Test.objects.filter(tur=tur)\n try:\n if veri in t.values_list('baslangic_seri_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'manometre':\n t = Test.objects.filter(tur=tur)\n try:\n if veri in t.values_list('seri_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except Exception as e:\n print(e)\n r = 'NO'\n if tur == 'basinc':\n t = Test.objects.filter(tur=tur)\n try:\n if veri in t.values_list('seri_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'bakirmembran':\n t = Test.objects.filter(tur=tur)\n try:\n if int(veri) in t.values_list('lot_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'emniyet':\n t = Test.objects.filter(tur=tur)\n try:\n if veri in t.values_list('lot_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'valf_govde':\n try:\n valf_id = Valf.objects.filter(valf_montaj_id=veri).values_list(\n 'valf_test_id', flat=True).first()\n if isinstance(valf_id, int):\n Valf_test.objects.filter(id=valf_id).values_list('uygun',\n flat=True).first()\n if Valf_test.objects.filter(id=valf_id).values_list('uygun'\n , flat=True).first():\n r = 'OK'\n else:\n r = 'NO'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'sibop':\n print(tur, veri, t.values_list('lot_no', flat=True))\n t = Test.objects.filter(tur=tur)\n try:\n if int(veri) in t.values_list('lot_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n return HttpResponse(r)\n\n\n@csrf_exempt\ndef kurlenmeKontrol(request):\n if request.method == 'POST':\n r = 'NO'\n tur = request.POST['tur']\n vsn = request.POST['veri']\n print('kurlenmeKontrol', tur, vsn)\n if tur == 'montaj_kurlenme':\n try:\n u = Uretim.objects.filter(vsn=vsn)\n print(u, '---------------')\n if u.values()[0]['montaj_kurlenme_zamani'] < timezone.now():\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n elif tur == 'govde_kurlenme':\n try:\n u = Uretim.objects.filter(vsn=vsn)\n print('govde_kurlenme_zamani', u.values()[0][\n 'govde_kurlenme_zamani'])\n print('now', timezone.now())\n if u.values()[0]['govde_kurlenme_zamani'] < timezone.now():\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n elif tur == 'valf_test':\n print('içerdeyim-----> Valf Test')\n try:\n print(vsn, '----------------------------')\n valf_montaj_id = Valf.objects.filter(id=vsn).first(\n ).valf_montaj_id\n print(valf_montaj_id)\n tarih = Valf_montaj.objects.filter(id=valf_montaj_id).first(\n ).kurlenme_bitis_tarihi\n print(tarih)\n print(type(timezone.now()), timezone.now())\n print(type(tarih), tarih)\n if tarih < timezone.now():\n print('büyüktür')\n r = 'OK'\n else:\n print('küçük')\n r = 'NO'\n except Exception as err:\n print('r', err)\n r = 'NO'\n elif tur == 'pdfkontrol':\n print(vsn)\n try:\n if Valf.objects.filter(valf_montaj_id=vsn).count():\n r = 'OK'\n else:\n r = 'NO'\n except Exception as err:\n r = 'NO'\n print(err)\n return HttpResponse(r)\n\n\n@csrf_exempt\ndef newVSN(request):\n if request.method == 'POST':\n vsn = ''\n if not Uretim.objects.all():\n vsn = 1\n else:\n a = Uretim.objects.all().order_by('-vsn').values()[0]\n s = a['vsn']\n print('sssss', s)\n vsn = s + 1\n print(vsn)\n r = str(vsn)\n return HttpResponse(r)\n\n\n@csrf_exempt\ndef hardreset(request):\n print('Hard')\n",
"step-3": "<mask token>\n\n\ndef bildirim(request):\n bugun = timezone.now()\n birGunOnce = bugun - timezone.timedelta(days=14)\n bildirimq = Bildirim.objects.filter(zaman__range=[birGunOnce, bugun])\n temp = []\n for o in bildirimq.values():\n temp.append(o)\n bildirims = list(temp)\n print(bildirims)\n return JsonResponse(bildirims, safe=False)\n\n\n@login_required\ndef index(request):\n grup = request.user.grup\n birim = request.user.birim\n emirler = Emir.objects.filter(durum='Aktif')\n l = list()\n for e in emirler.values():\n data = dict()\n data['is_emri'] = e['is_emri']\n data['valfmontaj'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n valf_montaj_id__isnull=False).values_list('valf_montaj_id',\n flat=True).count() or 0\n data['valftest'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n valf_test_id__isnull=False).values_list('valf_test_id', flat=True\n ).count() or 0\n data['valfgovde'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n valf_govde_id__isnull=False).values_list('valf_govde_id', flat=True\n ).count() or 0\n data['fm200'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n fm200_azot_id__isnull=False).values_list('fm200_azot_id', flat=True\n ).count() or 0\n data['havuztest'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n havuz_id__isnull=False).values_list('havuz_id', flat=True).count(\n ) or 0\n data['finalmontaj'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n valf_final_montaj_id__isnull=False).values_list(\n 'valf_final_montaj_id', flat=True).count() or 0\n l.append(data)\n print(l)\n return render(request, 'index.html', {'grup': grup, 'emirler': emirler,\n 'birim': birim, 'server': server, 'uretims': l})\n\n\n@login_required\ndef arama(request):\n mac = request.user_agent.os.family\n q = request.GET.get('q') or request.GET.get('uretim')\n emir = request.GET.get('emir')\n emirs = Emir.objects.all()\n media_url = settings.MEDIA_URL\n aranan = ''\n if q:\n aranan = q\n elif emir:\n aranan = 'isemri'\n else:\n print('bos')\n grup = request.user.grup\n birim = request.user.birim\n testler = Test.objects.filter(tur=q)\n print(q)\n if q == 'valfmontaj':\n uretims = Valf_montaj.objects.all()\n elif q == 'valfgovde':\n uretims = Valf_govde.objects.all()\n elif q == 'fm200':\n uretims = Valf_fm200.objects.all()\n elif q == 'havuztest':\n uretims = Valf_havuz.objects.all()\n elif q == 'finalmontaj':\n uretims = Valf_final_montaj.objects.all()\n else:\n uretims = Uretim.objects.filter(tur=q)\n print(uretims)\n if emir == 'tumu':\n emirler = Emir.objects.all()\n else:\n emirler = Emir.objects.filter(is_emri=emir)\n return render(request, 'arama.html', {'mac': mac, 'testler': testler,\n 'grup': grup, 'emirler': emirler, 'aranan': aranan, 'emirs': emirs,\n 'birim': birim, 'media_url': media_url, 'uretims': uretims,\n 'server': server})\n\n\n@login_required\n@csrf_exempt\ndef giriskalite(request):\n mac = request.user_agent.os.family\n grup = request.user.grup\n birim = request.user.birim\n fullname = request.user.first_name + ' ' + request.user.last_name\n if request.method == 'POST':\n if request.POST.dict()['tur'] == 'basinc':\n veris = json.loads(request.POST.dict()['veri'])\n for veri in veris:\n t = Test(tur='basinc', seri_no=veri[0], acma=veri[1],\n kapatma=veri[2], kabul_durumu=veri[3], testi_yapan=fullname\n )\n t.save(force_insert=True)\n elif request.POST.dict()['tur'] == 'manometre':\n veris = json.loads(request.POST.dict()['veri'])\n for veri in veris:\n t = Test(tur='manometre', seri_no=veri[0], okunan_deger=\n veri[1], kabul_durumu=veri[2], testi_yapan=fullname)\n t.save(force_insert=True)\n elif request.POST.dict()['tur'] == 'altnipel':\n print(request.POST)\n kontrolResult = nipelSeriNoKontrol(request)\n if kontrolResult == True:\n if request.FILES:\n upload_file = request.FILES['file']\n fs = FileSystemStorage()\n fs.save(upload_file.name, upload_file)\n next_lot_no = getNextLotNo(request.POST.dict()['tur'])\n t = Test(tur='altnipel', lot_no=next_lot_no, pdf_ismi=\n request.POST.get('pdf_ismi'), baslangic_seri_no=request\n .POST.get('baslangic_seri_no'), bitis_seri_no=request.\n POST.get('bitis_seri_no'), kabul_durumu=request.POST.\n get('kabulAlt'), testi_yapan=fullname)\n t.save(force_insert=True)\n messages.success(request,\n 'Alt nipel testi başarıyla kaydedildi.')\n elif request.POST.dict()['tur'] == 'ustnipel':\n print(request.POST)\n kontrolResult = nipelSeriNoKontrol(request)\n if kontrolResult == True:\n if request.FILES:\n upload_file = request.FILES['file']\n fs = FileSystemStorage()\n fs.save(upload_file.name, upload_file)\n next_lot_no = getNextLotNo(request.POST.dict()['tur'])\n t = Test(tur='ustnipel', lot_no=next_lot_no, pdf_ismi=\n request.POST.get('pdf_ismi'), baslangic_seri_no=request\n .POST.get('baslangic_seri_no'), bitis_seri_no=request.\n POST.get('bitis_seri_no'), kabul_durumu=request.POST.\n get('kabulUst'), testi_yapan=fullname)\n t.save(force_insert=True)\n messages.success(request,\n 'Üst nipel testi başarıyla kaydedildi.')\n elif request.POST.dict()['tur'] == 'bakirmembran':\n print(request.POST)\n next_lot_no = getNextLotNo(request.POST.get('test_tur'))\n if request.FILES:\n upload_file = request.FILES['file']\n fs = FileSystemStorage()\n fs.save(upload_file.name, upload_file)\n t = Test(tur=request.POST.get('test_tur'), lot_no=next_lot_no,\n pdf_ismi=request.POST.get('pdf_ismi'), test_basinci=request\n .POST.get('test_basinci'), patlama_basinci=request.POST.get\n ('patlama_basinci'), kabul_durumu=request.POST.get(\n 'kabulBak'), testi_yapan=fullname)\n t.save(force_insert=True)\n if request.POST.get('test_tur') == 'bakirmembran':\n messages.success(request,\n 'Bakır membran testi başarıyla kaydedildi.')\n else:\n messages.success(request,\n 'Emniyet ventili testi başarıyla kaydedildi.')\n \"\"\"\n elif request.POST.get('tur') == 'emniyet':\n print(request.POST)\n if request.FILES:\n upload_file = request.FILES['file']\n fs = FileSystemStorage()\n fs.save(upload_file.name,upload_file)\n \n next_lot_no = getNextLotNo( request.POST.dict()['tur'])\n t = Test(tur='emniyet',lot_no =next_lot_no, pdf_ismi = request.POST.get('pdf_ismi') ,test_basinci = request.POST.get('test_basinci'), patlama_basinci = request.POST.get('patlama_basinci'),kabul_durumu = request.POST.get('kabulEmn'),testi_yapan = fullname)\n t.save(force_insert=True)\n messages.success(request,'Emniyet ventili testi başarıyla kaydedildi.')\n \"\"\"\n return render(request, 'giris-kalite-kontrol.html', {'mac': mac, 'grup':\n grup, 'birim': birim, 'server': server})\n\n\ndef getNextLotNo(tur):\n test_with_max_lot_no = Test.objects.filter(tur=tur).order_by('-lot_no'\n ).first()\n if test_with_max_lot_no == None:\n max_lot_no = 0\n else:\n max_lot_no = test_with_max_lot_no.lot_no\n return max_lot_no + 1\n\n\ndef nipelSeriNoKontrol(request):\n baslangic_seri_no = request.POST.get('baslangic_seri_no')\n bitis_seri_no = request.POST.get('bitis_seri_no')\n errorFlag = 0\n if int(baslangic_seri_no) > int(bitis_seri_no):\n errorFlag = 1\n messages.warning(request,\n 'Başlangıç seri numarası, bitiş seri numarasından büyük olamaz!')\n return False\n testler = Test.objects.filter(tur=request.POST.dict()['tur'])\n seri_no_aralık_range = range(int(baslangic_seri_no), int(bitis_seri_no) + 1\n )\n seri_no_aralık_list = set(seri_no_aralık_range)\n for test in testler:\n seri_no_aralık_test_range = range(int(test.baslangic_seri_no), int(\n test.bitis_seri_no) + 1)\n intersection_set = seri_no_aralık_list.intersection(\n seri_no_aralık_test_range)\n if len(intersection_set) != 0:\n messages.warning(request,\n 'Seri numarası aralığı mevcut bir seri numarası aralığı ile çakışmaktadır!'\n )\n return False\n return True\n\n\n@login_required\n@csrf_exempt\ndef uretimkontrol(request):\n mac = request.user_agent.os.family\n ip = get_client_ip(request)\n ip == '192.168.1.36'\n grup = request.user.grup\n birim = request.user.birim\n fullname = request.user.first_name + ' ' + request.user.last_name\n if request.method == 'POST':\n if request.POST.dict()['tur'] == 'valfmontaj':\n veris = json.loads(request.POST.dict()['veri'])\n print(veris)\n t = Uretim(tur='valfmontaj', okunan_deger=veris[0], personel=\n request.user.get_full_name())\n t.save(force_insert=True)\n b = Bildirim(tur='baslangic', kisi=request.user.get_full_name())\n b.save(force_insert=True)\n elif request.POST.dict()['tur'] == 'kurlenme':\n veris = json.loads(request.POST.dict()['veri'])\n \"\"\"neval\n if not Uretim.objects.all():\n vsn = 1\n else:\n a = Uretim.objects.all().order_by('-vsn').values()[0]\n s = a['vsn']\n vsn = s + 1\n v = Valf(vsn=vsn, is_emri=veris[0])\n v.save(force_insert=True)\n e = Emir.objects.get(is_emri=veris[0])\n e.durum = 'Aktif'\n e.save()\n t = Uretim(tur='montaj_kurlenme' ,vsn = vsn, is_emri = veris[0] ,personel = request.user.get_full_name(),alt_nipel_no = veris[1],bakir_membran_no = veris[2],ust_nipel_no = veris[3],manometre_no = veris[4],basincanahtari_no = veris[5],montaj_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10))\n t.save(force_insert=True)\n return HttpResponse(str(vsn))\n \"\"\"\n print('deneme')\n is_emri_adi = veris[0]\n emir = Emir.objects.get(is_emri=is_emri_adi)\n personel_id = request.user.id\n alt_nipel_no = veris[1]\n bakir_membran_no = veris[2]\n ust_nipel_no = veris[3]\n manometre_no = veris[4]\n basincanahtari_no = veris[5]\n sibop = veris[6]\n print('deneme2')\n try:\n kayit_tarihi = timezone.now()\n valf_montaj = Valf_montaj(montaj_personel_id=personel_id,\n alt_nipel_no=alt_nipel_no, bakir_membran_no=\n bakir_membran_no, ust_nipel_no=ust_nipel_no,\n manometre_no=manometre_no, basincanahtari_no=\n basincanahtari_no, montaj_tarihi=kayit_tarihi, sibop=sibop)\n valf_montaj.save()\n valf = Valf(is_emri=emir, valf_montaj=valf_montaj)\n valf.save()\n return HttpResponse(str(valf.id))\n except Exception as err:\n print(' KAyıt HAstası > ', err)\n elif request.POST.dict()['tur'] == 'valftest':\n try:\n valf_seri_no = json.loads(request.POST.dict()['valf_seri_no'])\n uygun = json.loads(request.POST.dict()['uygun'])\n valf = Valf.objects.get(id=valf_seri_no)\n personel_id = User.objects.get(id=request.user.id)\n test_tarihi = timezone.now()\n acma = str(uygun)\n kapama = str(uygun)\n sebep = str(uygun)\n if uygun == True:\n sebep = None\n valf_test = Valf_test(test_personel=personel_id,\n test_tarihi=test_tarihi, uygun=uygun)\n valf_test.save()\n valf.valf_test = valf_test\n valf.save()\n except Exception as err:\n print(err)\n elif request.POST.dict()['tur'] == 'valfgovde':\n veri = json.loads(request.POST.dict()['veri'])\n \"\"\"neval\n v = Valf.objects.get(vsn=veri[3])\n is_emri = v.is_emri\n print('veri[5],sodyum miktarı:: ',veri[5] )\n t = Uretim.objects.get(vsn=veri[3])\n t.tur='govde_kurlenme'\n t.tork_degeri = veri[0]\n t.uygunluk = veri[1]\n t.sebep = veri[2]\n t.tsn = veri[4]\n t.personel = request.user.get_full_name()\n t.govde_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10)\n # t = Uretim(tur='valfgovde',tork_degeri = veri[0] ,is_emri=is_emri, uygunluk = veri[1] , sebep = veri[2],\n # vsn = veri[3],tsn = veri[4], personel = request.user.get_full_name(),govde_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10))\n t.save()\n \"\"\"\n valf_seri_no = veri[3]\n valf = Valf.objects.get(id=valf_seri_no)\n valf.durum = 'valf_govde'\n valf.save()\n personel_id = request.user.id\n kayit_tarihi = timezone.now()\n kurlenme_bitis = timezone.now() + timezone.timedelta(minutes=10)\n tork = veri[0]\n tup_seri_no = veri[4]\n sodyum_miktari = veri[5]\n uygunluk = veri[1]\n sebep = veri[2]\n if uygunluk == 'on':\n sebep = None\n valf_govde = Valf_govde(valf=valf, personel_id=personel_id,\n kayit_tarihi=kayit_tarihi, kurlenme_bitis=kurlenme_bitis,\n tork=tork, tup_seri_no=tup_seri_no, sodyum_miktari=\n sodyum_miktari, uygunluk=uygunluk, sebep=sebep)\n valf_govde.save()\n elif request.POST.dict()['tur'] == 'fm200':\n veri = json.loads(request.POST.dict()['veri'])\n \"\"\"neval\n v = Valf.objects.get(vsn=veri[4])\n is_emri = v.is_emri\n print(veri)\n t = Uretim.objects.get(vsn=veri[4])\n t.tur='fm200_kurlenme'\n t.bos_agirlik = veri[0]\n t.rekorlu_agirlik = veri[1]\n t.fm200 = veri[2]\n t.azot = veri[3]\n t.personel = request.user.get_full_name()\n t.fm200_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10) \n t.save()\n \"\"\"\n valf_seri_no = veri[4]\n valf = Valf.objects.get(id=valf_seri_no)\n valf.durum = 'valf_fm200'\n valf.save()\n personel_id = request.user.id\n kayit_tarihi = timezone.now()\n kurlenme_bitis = timezone.now() + timezone.timedelta(minutes=10)\n bos_agirlik = veri[0]\n rekorlu_agirlik = veri[1]\n fm200 = veri[2]\n azot = veri[3]\n valf_fm200 = Valf_fm200(valf=valf, personel_id=personel_id,\n kayit_tarihi=kayit_tarihi, kurlenme_bitis=kurlenme_bitis,\n bos_agirlik=bos_agirlik, rekorlu_agirlik=rekorlu_agirlik,\n fm200=fm200, azot=azot)\n valf_fm200.save()\n elif request.POST.dict()['tur'] == 'havuztest':\n veri = json.loads(request.POST.dict()['veri'])\n \"\"\"neval\n print(veri)\n v = Valf.objects.get(vsn=veri[0])\n is_emri = v.is_emri\n t = Uretim(tur='havuztest',vsn = veri[0],tsn = veri[0],is_emri=is_emri , uygunluk = veri[1] , \n acma = veri[2], kapatma = veri[3],sebep = veri[4], personel = request.user.get_full_name())\n t.save(force_insert=True)\n \"\"\"\n print('veri', veri)\n valf_seri_no = veri[0]\n valf = Valf.objects.get(id=valf_seri_no)\n valf.durum = 'valf_havuz_test'\n valf.save()\n personel_id = request.user.id\n kayit_tarihi = timezone.now()\n uygunluk = veri[1]\n tup_cidar_sicaklik = veri[2]\n tup_basinc = veri[3]\n sebep = veri[4]\n if uygunluk:\n sebep = None\n valf_havuz = Valf_havuz(valf=valf, personel_id=personel_id,\n kayit_tarihi=kayit_tarihi, tup_cidar_sicaklik=\n tup_cidar_sicaklik, tup_basinc=tup_basinc, uygunluk=\n uygunluk, sebep=sebep)\n valf_havuz.save()\n elif request.POST.dict()['tur'] == 'finalmontaj':\n veri = json.loads(request.POST.dict()['veri'])\n \"\"\"neval\n \n print(veri)\n v = Valf.objects.get(vsn=veri[1])\n is_emri = v.is_emri\n t = Uretim.objects.get(vsn=veri[1])\n t.tur='finalmontaj'\n t.etiket_seri_no = veri[0]\n t.fsn = veri[2]\n t.funye_seri_omaj = veri[3]\n t.basinc_anahtari_omaj = veri[4]\n t. personel = request.user.get_full_name()\n #t = Uretim(tur='finalmontaj',etiket_seri_no = veri[0],is_emri=is_emri , vsn = veri[1] , fsn = veri[2],\n # funye_seri_omaj = veri[3],basinc_anahtari_omaj = veri[4], personel = request.user.get_full_name())\n t.save()\n tup_sayisi_str=Emir.objects.filter(is_emri=is_emri).values()[0]['tup_sayisi']\n \"\"\"\n valf_seri_no = veri[1]\n valf = Valf.objects.get(id=valf_seri_no)\n valf.durum = 'valf_final_montaj'\n valf.save()\n personel_id = request.user.id\n kayit_tarihi = timezone.now()\n etiket_seri_no = veri[0]\n funye_seri_no = veri[2]\n funye_seri_omaj = veri[3]\n basinc_anahtari_omaj = veri[4]\n valf_final_montaj = Valf_final_montaj(valf=valf, personel_id=\n personel_id, kayit_tarihi=kayit_tarihi, etiket_seri_no=\n etiket_seri_no, funye_seri_no=funye_seri_no,\n funye_seri_omaj=funye_seri_omaj, basinc_anahtari_omaj=\n basinc_anahtari_omaj)\n valf_final_montaj.save()\n emir = Emir.objects.get(is_emri=valf.is_emri)\n emir_tup_sayisi = int(emir.tup_sayisi)\n emir_biten_valf_sayi = Valf.objects.filter(is_emri=emir, durum=\n 'valf_final_montaj').count()\n print('emir_biten_valf_sayi', emir_biten_valf_sayi)\n print('emir_tup_sayisi', emir_tup_sayisi)\n if emir_biten_valf_sayi == emir_tup_sayisi:\n emir.durum = 'Bitmiş'\n emir.save()\n b = Bildirim(tur='bitis', kisi=request.user.get_full_name())\n b.save(force_insert=True)\n now = timezone.now()\n montajkurlenmesi = Valf_montaj.objects.all()\n fm200kurlenmesi = Valf_fm200.objects.filter(\n fm200_kurlenme_bitis_tarihi__gte=now)\n acikemirleri = Emir.objects.filter(durum='Aktif').values()\n aktifemirler = Emir.objects.filter(durum='Aktif')\n govde_emir = list(dict.fromkeys(Valf.objects.filter(\n valf_govde_id__isnull=False).values_list('is_emri_id', flat=True)))\n fm200_emir = list(dict.fromkeys(Valf.objects.filter(\n fm200_azot_id__isnull=False).values_list('is_emri_id', flat=True)))\n return render(request, 'uretim-kontrol.html', {'grup': grup, 'birim':\n birim, 'ip': ip, 'now': now, 'server': server, 'acikemirleri':\n acikemirleri, 'fm200kurlenmes': fm200kurlenmesi, 'kurlenmes':\n montajkurlenmesi, 'aktifemirler': aktifemirler, 'govde_emir':\n govde_emir, 'fm200_emir': fm200_emir})\n\n\n@csrf_exempt\ndef acikisemirleri(request):\n emirler = Emir.objects.filter(durum__in=('Aktif', 'Başlanmamış'))\n temp = []\n for o in emirler.values():\n temp.append(o['is_emri'])\n veri = list(temp)\n\n\n@login_required\n@csrf_exempt\ndef isemri(request):\n mac = request.user_agent.os.family\n grup = request.user.grup\n birim = request.user.birim\n fullname = request.user.first_name + ' ' + request.user.last_name\n emirler = Emir.objects.all()\n form = IsEmri(request.POST)\n if request.method == 'POST':\n if 'tur' in request.POST.dict():\n if request.POST.dict()['tur'] == 'oncelik':\n veri = json.loads(request.POST.dict()['veri'])\n print(veri)\n for key in veri:\n em = Emir.objects.get(is_emri=key)\n em.oncelik = veri[key]\n em.save()\n o = Bildirim(tur='oncelik')\n o.save()\n return HttpResponse('onceliktamam')\n elif form.is_valid():\n if not Emir.objects.all():\n son_oncelik = 1\n else:\n a = Emir.objects.all().order_by('-oncelik').values()[0]\n s = a['oncelik']\n son_oncelik = s + 1\n emir = form.save()\n emir.refresh_from_db()\n emir.is_emri = form.cleaned_data.get('is_emri')\n emir.urun_kodu = form.cleaned_data.get('urun_kodu')\n emir.baslangic = form.cleaned_data.get('baslangic')\n emir.bitis = form.cleaned_data.get('bitis')\n emir.emri_veren = form.cleaned_data.get('emri_veren')\n emir.tup_govde_turu = form.cleaned_data.get('tup_govde_turu')\n emir.valf_turu = form.cleaned_data.get('valf_turu')\n emir.renk = form.cleaned_data.get('renk')\n emir.emniyet_ventil_turu = form.cleaned_data.get(\n 'emniyet_ventil_turu')\n emir.siparis = form.cleaned_data.get('siparis')\n emir.fm200bosagirlikmindeger = form.cleaned_data.get(\n 'fm200bosagirlikmindeger')\n emir.fm200bosagirlikmaxdeger = form.cleaned_data.get(\n 'fm200bosagirlikmaxdeger')\n emir.fm200dolummiktarimindeger = form.cleaned_data.get(\n 'fm200dolummiktarimindeger')\n emir.fm200dolummiktarimaxdeger = form.cleaned_data.get(\n 'fm200dolummiktarimaxdeger')\n t = Bildirim(tur='is emri', emri_veren_grup=grup, emri_veren=\n request.user.get_full_name(), is_emri=form.cleaned_data.get\n ('is_emri'))\n t.save(force_insert=True)\n emir.oncelik = son_oncelik\n messages.success(request, 'Emir başarıyla eklendi!')\n emir.save()\n form.full_clean()\n return HttpResponseRedirect(reverse('isemri'))\n else:\n messages.warning(request,\n 'İş emri eklenemedi.Lütfen tekrar deneyin!Hata: {}'.format(\n form.errors))\n else:\n form = IsEmri()\n form.fields['emri_veren'].initial = fullname\n return render(request, 'is-emri.html', {'form': form, 'emirler':\n emirler, 'mac': mac, 'fullname': fullname, 'grup': grup, 'birim':\n birim, 'server': server})\n\n\ndef yetkilendirme(request):\n mac = request.user_agent.os.family\n grup = 'Yönetici'\n birim = 'IT'\n kullanicilar = User.objects.all()\n if (grup == 'Yönetici' and birim == 'IT' or grup == 'Mühendis' and \n birim == 'IT'):\n if request.method == 'POST':\n form = UserRegisterForm(request.POST)\n if form.is_valid():\n user = form.save()\n user.refresh_from_db()\n user.first_name = form.cleaned_data.get('first_name')\n user.last_name = form.cleaned_data.get('last_name')\n user.grup = form.cleaned_data.get('grup')\n user.save()\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password1')\n messages.success(request,\n '{} isimli kullanıcı {} isimli gruba eklendi!'.format(\n username, user.grup))\n return HttpResponseRedirect(reverse('yetkilendirme'))\n else:\n print(form.errors)\n else:\n form = UserRegisterForm()\n return render(request, 'kullanici-yetkilendirme.html', {'form':\n form, 'kullanicilar': kullanicilar, 'mac': mac, 'grup': grup,\n 'birim': birim, 'server': server})\n else:\n return HttpResponseRedirect(reverse('403'))\n\n\n@login_required\ndef performans(request):\n mac = request.user_agent.os.family\n grup = request.user.grup\n birim = request.user.birim\n kullanicilar = User.objects.all()\n return render(request, 'performans.html', {'mac': mac, 'grup': grup,\n 'birim': birim, 'kullanicilar': kullanicilar, 'server': server})\n\n\n<mask token>\n\n\n@login_required\ndef ulogout(request):\n logout(request)\n return HttpResponseRedirect(reverse('ulogin'))\n\n\n@csrf_exempt\ndef ulogin(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n login(request, user)\n print('{} kullanıcısı tarafından başarılı giriş'.format(\n username))\n return redirect('arama')\n else:\n messages.warning(request,\n 'Kullanıcı adınızı yada parolanızı yanlış girdiniz.')\n else:\n print('Birisi login olmayı denedi ve başarısız oldu!')\n messages.warning(request,\n 'Kullanıcı adınızı yada parolanızı yanlış girdiniz.')\n return HttpResponseRedirect(reverse('ulogin'))\n else:\n return render(request, 'login.html', {})\n\n\ndef _403(request):\n return render(request, '403.html', {})\n\n\ndef handler404(request, exception):\n return render(request, '403.html', status=404)\n\n\n@csrf_exempt\ndef kullanicijson(request):\n username = request.POST.get('username')\n b = User.objects.filter(username=username).values('first_name',\n 'last_name', 'username', 'grup')\n veri = list(b)\n return JsonResponse(veri, safe=False)\n\n\n@csrf_exempt\ndef kullanicisil(request):\n username = request.POST.get('username')\n print(username)\n sildi = User.objects.filter(username=username).delete()\n if sildi:\n return HttpResponse('silindi')\n else:\n return HttpResponse('silinemedi')\n\n\n@csrf_exempt\ndef kullaniciduzelt(request):\n veri = request.POST.get('bilgi')\n veri = json.loads(veri)\n a = User.objects.get(username=veri['eskisi'])\n a.username = veri['username']\n a.first_name = veri['first_name']\n a.last_name = veri['last_name']\n a.grup = veri['grup']\n a.birim = veri['birim']\n a.save()\n return HttpResponse('duzeltildi')\n\n\n<mask token>\n\n\n@csrf_exempt\ndef pdf(request):\n if request.GET.get('qr'):\n qr = request.GET.get('qr')\n print(qr.split(' ')[0])\n i = qr.split(' ')[0]\n print('---------------------')\n valf_no = request.GET.get('vsn')\n Valf_montaj_Data = Valf_montaj.objects.filter(id=Valf.objects.filter(id\n =valf_no).first().valf_montaj_id).first()\n Valf_fm200_Data = Valf_fm200.objects.filter(id=Valf.objects.filter(id=\n valf_no).first().fm200_azot_id).first()\n Valf_havuz_Data = Valf_havuz.objects.filter(id=Valf.objects.filter(id=\n valf_no).first().havuz_id).first()\n Valf_final_Data = Valf_final_montaj.objects.filter(id=Valf.objects.\n filter(id=valf_no).first().valf_final_montaj_id).first()\n Valf_test_Data = Valf_test.objects.filter(id=Valf.objects.filter(id=\n valf_no).first().valf_test_id).first()\n Valf_govde_Data = Valf_govde.objects.filter(id=Valf.objects.filter(id=\n valf_no).first().valf_govde_id).first()\n Emir_Data = Emir.objects.filter(is_emri=i).first()\n valf_final = Valf.objects.filter(id=valf_no).values_list(\n 'valf_final_montaj_id', flat=True).first()\n urun_seri_no = Valf_final_montaj.objects.filter(id=valf_final).values_list(\n 'urun_seri_no', flat=True).first()\n print('---------------------')\n try:\n valfmontajPersonel = get_first_and_lastname(User.objects.filter(id=\n Valf_montaj_Data.montaj_personel_id).first().username)\n except:\n valfmontajPersonel = ''\n try:\n valfmontajTarih = Valf_montaj_Data.montaj_tarihi\n except:\n valfmontajTarih = ''\n try:\n altnipelno = Valf_montaj_Data.alt_nipel_no\n except:\n altnipelno = ''\n try:\n ustnipelno = Valf_montaj_Data.ust_nipel_no\n except:\n ustnipelno = ''\n try:\n switchno = Valf_montaj_Data.basincanahtari_no\n except:\n switchno = ''\n try:\n manometreno = Valf_montaj_Data.manometre_no\n except:\n manometreno = ''\n try:\n valftestPersonel = get_first_and_lastname(User.objects.filter(id=\n Valf_test_Data.test_personel_id).first().username)\n except:\n valftestPersonel = ''\n try:\n valftestTarih = Valf_test_Data.test_tarihi\n except:\n valftestTarih = ''\n try:\n valfTestUygun = ('Uygun' if Valf_test_Data.uygun == True else\n 'Uygun Değil')\n except:\n valfTestUygun = Valf_test_Data.uygun\n try:\n valfgovdePersonel = get_first_and_lastname(User.objects.filter(id=\n Valf_govde_Data.govde_personel_id).first().username)\n except:\n valfgovdePersonel = ''\n try:\n valfgovdeTarih = Valf_govde_Data.govde_tarihi\n except:\n valfgovdeTarih = ''\n try:\n valfGovdeUygun = ('Uygun' if Valf_govde_Data.uygunluk == True else\n 'Uygun Değil')\n except:\n valfGovdeUygun = ''\n try:\n fm200Personel = get_first_and_lastname(User.objects.filter(id=\n Valf_fm200_Data.fm200_personel_id).first().username)\n except:\n fm200Personel = ''\n try:\n fm200Tarih = Valf_fm200_Data.kayit_tarihi\n except:\n fm200Tarih = ''\n try:\n bosAgirlik = Valf_fm200_Data.bos_agirlik\n except:\n bosAgirlik = ''\n try:\n doluAgirlik = Valf_fm200_Data.dolu_agirlik\n except:\n doluAgirlik = ''\n try:\n bar = Valf_fm200_Data.bar\n except:\n bar = ''\n try:\n havuztestPersonel = get_first_and_lastname(User.objects.filter(id=\n Valf_havuz_Data.havuz_personel_id).first().username)\n except:\n havuztestPersonel = ''\n try:\n havuztestTarih = Valf_havuz_Data.kayit_tarihi\n except:\n havuztestTarih = ''\n try:\n havuzTestUygun = ('Uygun' if Valf_havuz_Data.uygunluk == True else\n 'Uygun Değil')\n except:\n havuzTestUygun = ''\n try:\n finalmontajPersonel = get_first_and_lastname(User.objects.filter(id\n =Valf_final_Data.personel_id).first().username)\n except:\n finalmontajPersonel = ''\n try:\n finalmontajTarih = Valf_final_Data.kayit_tarihi\n except:\n finalmontajTarih = ''\n try:\n membranTipi = Emir_Data.valf_turu\n except:\n membranTipi = ''\n try:\n ventilTipi = Emir_Data.emniyet_ventil_turu\n except:\n ventilTipi = ''\n try:\n tugovdetipi = Emir_Data.tup_govde_turu\n except:\n tugovdetipi = ''\n try:\n siboplotno = Valf_montaj_Data.sibop\n except:\n siboplotno = ''\n print(valftestPersonel, Emir_Data.emniyet_ventil_turu)\n veri = 'veri'\n html_string = render_to_string('external/pdf-template.html', {'veri':\n veri, 'qr': urun_seri_no, 'valfmontajPersonel': valfmontajPersonel,\n 'valfmontajTarih': valfmontajTarih, 'valfgovdePersonel':\n valfgovdePersonel, 'valftestPersonel': valftestPersonel,\n 'valftestTarih': valftestTarih, 'valfTestUygun': valfTestUygun,\n 'havuzTestUygun': havuzTestUygun, 'valfgovdePersonel':\n valftestPersonel, 'valfgovdeTarih': valfgovdeTarih,\n 'valfGovdeUygun': valfGovdeUygun, 'valfMontajUygun': 'Uygun*',\n 'fm200Uygun': 'Uygun*', 'finalMontajUygun': 'Uygun*',\n 'fm200Personel': fm200Personel, 'fm200Tarih': fm200Tarih,\n 'bosAgirlik': bosAgirlik, 'doluAgirlik': doluAgirlik,\n 'havuztestPersonel': havuztestPersonel, 'havuztestTarih':\n havuztestTarih, 'finalmontajPersonel': finalmontajPersonel,\n 'finalmontajTarih': finalmontajTarih, 'altnipelno': altnipelno,\n 'ustnipelno': ustnipelno, 'switchno': switchno, 'manometreno':\n manometreno, 'is_emri': i, 'membranTipi': membranTipi, 'ventilTipi':\n ventilTipi, 'urunserino': urun_seri_no, 'bar': bar, 'tugovdetipi':\n tugovdetipi, 'siboplotno': siboplotno}, request=request)\n html = HTML(string=html_string, base_url=request.build_absolute_uri())\n html.write_pdf(target='/tmp/' + qr + '.pdf')\n fs = FileSystemStorage('/tmp/')\n with fs.open(qr + '.pdf') as pdf:\n response = HttpResponse(pdf, content_type='application/pdf')\n response['Content-Disposition'] = 'inline; filename=\"pdf.pdf\"'\n return response\n return response\n\n\n@csrf_exempt\ndef dashboard(request):\n bugun = timezone.now()\n print(request.POST.get('gun_sayisi'))\n gun = int(request.POST.get('gun_sayisi'))\n kac_gun = bugun - timezone.timedelta(days=gun)\n veris = Test.objects.filter(test_tarihi__range=[kac_gun, bugun])\n temp = []\n for o in veris.values():\n temp.append(o)\n veri = list(temp)\n print('dashboard', veri)\n return JsonResponse(veri, safe=False)\n\n\n<mask token>\n\n\n@csrf_exempt\ndef personeldurum(request):\n p = request.POST.get('personel')\n g = request.POST.get('gun_sayisi')\n print(p, g)\n bugun = timezone.now()\n gun = int(request.POST.get('gun_sayisi'))\n kac_gun = bugun - timezone.timedelta(days=gun)\n veris = Test.objects.filter(test_tarihi__range=[kac_gun, bugun])\n veri = list()\n try:\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='manometre').filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='basinc').filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='altnipel').filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='ustnipel').filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='bakirmembran').filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='emniyet').filter(testi_yapan=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='kurlenme').filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='valftest').filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='valfgovde').filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='fm200').filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='havuztest').filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='finalmontaj').filter(personel=p).count())\n except:\n veri = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10]\n print(veri)\n return JsonResponse(veri, safe=False)\n\n\n@csrf_exempt\ndef tupTuru(request):\n if request.method == 'POST':\n try:\n u = Emir.objects.filter(is_emri=request.POST.dict()['is_emri']\n ).first()\n bos_agirlik_miktari = u.bos_agirlik_miktari\n fm200_miktari = u.fm200_miktari\n renk = u.renk\n response = bos_agirlik_miktari + ';' + fm200_miktari + ';' + renk\n return HttpResponse(str(response))\n except e:\n print(e)\n return str('tur')\n\n\n<mask token>\n\n\n@csrf_exempt\ndef kontrolEt(request):\n if request.method == 'POST':\n tur = request.POST['tur']\n veri = request.POST['veri']\n isemri = request.POST['isemri']\n t = Test.objects.filter(tur=tur)\n r = 'NO'\n if tur == 'altnipel':\n t = Test.objects.filter(tur=tur)\n try:\n if int(veri) in t.values_list('lot_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'ustnipel':\n t = Test.objects.filter(tur=tur)\n try:\n if veri in t.values_list('baslangic_seri_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'manometre':\n t = Test.objects.filter(tur=tur)\n try:\n if veri in t.values_list('seri_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except Exception as e:\n print(e)\n r = 'NO'\n if tur == 'basinc':\n t = Test.objects.filter(tur=tur)\n try:\n if veri in t.values_list('seri_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'bakirmembran':\n t = Test.objects.filter(tur=tur)\n try:\n if int(veri) in t.values_list('lot_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'emniyet':\n t = Test.objects.filter(tur=tur)\n try:\n if veri in t.values_list('lot_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'valf_govde':\n try:\n valf_id = Valf.objects.filter(valf_montaj_id=veri).values_list(\n 'valf_test_id', flat=True).first()\n if isinstance(valf_id, int):\n Valf_test.objects.filter(id=valf_id).values_list('uygun',\n flat=True).first()\n if Valf_test.objects.filter(id=valf_id).values_list('uygun'\n , flat=True).first():\n r = 'OK'\n else:\n r = 'NO'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'sibop':\n print(tur, veri, t.values_list('lot_no', flat=True))\n t = Test.objects.filter(tur=tur)\n try:\n if int(veri) in t.values_list('lot_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n return HttpResponse(r)\n\n\n@csrf_exempt\ndef kurlenmeKontrol(request):\n if request.method == 'POST':\n r = 'NO'\n tur = request.POST['tur']\n vsn = request.POST['veri']\n print('kurlenmeKontrol', tur, vsn)\n if tur == 'montaj_kurlenme':\n try:\n u = Uretim.objects.filter(vsn=vsn)\n print(u, '---------------')\n if u.values()[0]['montaj_kurlenme_zamani'] < timezone.now():\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n elif tur == 'govde_kurlenme':\n try:\n u = Uretim.objects.filter(vsn=vsn)\n print('govde_kurlenme_zamani', u.values()[0][\n 'govde_kurlenme_zamani'])\n print('now', timezone.now())\n if u.values()[0]['govde_kurlenme_zamani'] < timezone.now():\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n elif tur == 'valf_test':\n print('içerdeyim-----> Valf Test')\n try:\n print(vsn, '----------------------------')\n valf_montaj_id = Valf.objects.filter(id=vsn).first(\n ).valf_montaj_id\n print(valf_montaj_id)\n tarih = Valf_montaj.objects.filter(id=valf_montaj_id).first(\n ).kurlenme_bitis_tarihi\n print(tarih)\n print(type(timezone.now()), timezone.now())\n print(type(tarih), tarih)\n if tarih < timezone.now():\n print('büyüktür')\n r = 'OK'\n else:\n print('küçük')\n r = 'NO'\n except Exception as err:\n print('r', err)\n r = 'NO'\n elif tur == 'pdfkontrol':\n print(vsn)\n try:\n if Valf.objects.filter(valf_montaj_id=vsn).count():\n r = 'OK'\n else:\n r = 'NO'\n except Exception as err:\n r = 'NO'\n print(err)\n return HttpResponse(r)\n\n\n@csrf_exempt\ndef newVSN(request):\n if request.method == 'POST':\n vsn = ''\n if not Uretim.objects.all():\n vsn = 1\n else:\n a = Uretim.objects.all().order_by('-vsn').values()[0]\n s = a['vsn']\n print('sssss', s)\n vsn = s + 1\n print(vsn)\n r = str(vsn)\n return HttpResponse(r)\n\n\n@csrf_exempt\ndef hardreset(request):\n print('Hard')\n",
"step-4": "<mask token>\nserver = '192.168.1.38:8000'\n\n\ndef get_client_ip(request):\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n print(ip)\n return ip\n\n\ndef bildirim(request):\n bugun = timezone.now()\n birGunOnce = bugun - timezone.timedelta(days=14)\n bildirimq = Bildirim.objects.filter(zaman__range=[birGunOnce, bugun])\n temp = []\n for o in bildirimq.values():\n temp.append(o)\n bildirims = list(temp)\n print(bildirims)\n return JsonResponse(bildirims, safe=False)\n\n\n@login_required\ndef index(request):\n grup = request.user.grup\n birim = request.user.birim\n emirler = Emir.objects.filter(durum='Aktif')\n l = list()\n for e in emirler.values():\n data = dict()\n data['is_emri'] = e['is_emri']\n data['valfmontaj'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n valf_montaj_id__isnull=False).values_list('valf_montaj_id',\n flat=True).count() or 0\n data['valftest'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n valf_test_id__isnull=False).values_list('valf_test_id', flat=True\n ).count() or 0\n data['valfgovde'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n valf_govde_id__isnull=False).values_list('valf_govde_id', flat=True\n ).count() or 0\n data['fm200'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n fm200_azot_id__isnull=False).values_list('fm200_azot_id', flat=True\n ).count() or 0\n data['havuztest'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n havuz_id__isnull=False).values_list('havuz_id', flat=True).count(\n ) or 0\n data['finalmontaj'] = Valf.objects.filter(is_emri_id=e['id']).filter(\n valf_final_montaj_id__isnull=False).values_list(\n 'valf_final_montaj_id', flat=True).count() or 0\n l.append(data)\n print(l)\n return render(request, 'index.html', {'grup': grup, 'emirler': emirler,\n 'birim': birim, 'server': server, 'uretims': l})\n\n\n@login_required\ndef arama(request):\n mac = request.user_agent.os.family\n q = request.GET.get('q') or request.GET.get('uretim')\n emir = request.GET.get('emir')\n emirs = Emir.objects.all()\n media_url = settings.MEDIA_URL\n aranan = ''\n if q:\n aranan = q\n elif emir:\n aranan = 'isemri'\n else:\n print('bos')\n grup = request.user.grup\n birim = request.user.birim\n testler = Test.objects.filter(tur=q)\n print(q)\n if q == 'valfmontaj':\n uretims = Valf_montaj.objects.all()\n elif q == 'valfgovde':\n uretims = Valf_govde.objects.all()\n elif q == 'fm200':\n uretims = Valf_fm200.objects.all()\n elif q == 'havuztest':\n uretims = Valf_havuz.objects.all()\n elif q == 'finalmontaj':\n uretims = Valf_final_montaj.objects.all()\n else:\n uretims = Uretim.objects.filter(tur=q)\n print(uretims)\n if emir == 'tumu':\n emirler = Emir.objects.all()\n else:\n emirler = Emir.objects.filter(is_emri=emir)\n return render(request, 'arama.html', {'mac': mac, 'testler': testler,\n 'grup': grup, 'emirler': emirler, 'aranan': aranan, 'emirs': emirs,\n 'birim': birim, 'media_url': media_url, 'uretims': uretims,\n 'server': server})\n\n\n@login_required\n@csrf_exempt\ndef giriskalite(request):\n mac = request.user_agent.os.family\n grup = request.user.grup\n birim = request.user.birim\n fullname = request.user.first_name + ' ' + request.user.last_name\n if request.method == 'POST':\n if request.POST.dict()['tur'] == 'basinc':\n veris = json.loads(request.POST.dict()['veri'])\n for veri in veris:\n t = Test(tur='basinc', seri_no=veri[0], acma=veri[1],\n kapatma=veri[2], kabul_durumu=veri[3], testi_yapan=fullname\n )\n t.save(force_insert=True)\n elif request.POST.dict()['tur'] == 'manometre':\n veris = json.loads(request.POST.dict()['veri'])\n for veri in veris:\n t = Test(tur='manometre', seri_no=veri[0], okunan_deger=\n veri[1], kabul_durumu=veri[2], testi_yapan=fullname)\n t.save(force_insert=True)\n elif request.POST.dict()['tur'] == 'altnipel':\n print(request.POST)\n kontrolResult = nipelSeriNoKontrol(request)\n if kontrolResult == True:\n if request.FILES:\n upload_file = request.FILES['file']\n fs = FileSystemStorage()\n fs.save(upload_file.name, upload_file)\n next_lot_no = getNextLotNo(request.POST.dict()['tur'])\n t = Test(tur='altnipel', lot_no=next_lot_no, pdf_ismi=\n request.POST.get('pdf_ismi'), baslangic_seri_no=request\n .POST.get('baslangic_seri_no'), bitis_seri_no=request.\n POST.get('bitis_seri_no'), kabul_durumu=request.POST.\n get('kabulAlt'), testi_yapan=fullname)\n t.save(force_insert=True)\n messages.success(request,\n 'Alt nipel testi başarıyla kaydedildi.')\n elif request.POST.dict()['tur'] == 'ustnipel':\n print(request.POST)\n kontrolResult = nipelSeriNoKontrol(request)\n if kontrolResult == True:\n if request.FILES:\n upload_file = request.FILES['file']\n fs = FileSystemStorage()\n fs.save(upload_file.name, upload_file)\n next_lot_no = getNextLotNo(request.POST.dict()['tur'])\n t = Test(tur='ustnipel', lot_no=next_lot_no, pdf_ismi=\n request.POST.get('pdf_ismi'), baslangic_seri_no=request\n .POST.get('baslangic_seri_no'), bitis_seri_no=request.\n POST.get('bitis_seri_no'), kabul_durumu=request.POST.\n get('kabulUst'), testi_yapan=fullname)\n t.save(force_insert=True)\n messages.success(request,\n 'Üst nipel testi başarıyla kaydedildi.')\n elif request.POST.dict()['tur'] == 'bakirmembran':\n print(request.POST)\n next_lot_no = getNextLotNo(request.POST.get('test_tur'))\n if request.FILES:\n upload_file = request.FILES['file']\n fs = FileSystemStorage()\n fs.save(upload_file.name, upload_file)\n t = Test(tur=request.POST.get('test_tur'), lot_no=next_lot_no,\n pdf_ismi=request.POST.get('pdf_ismi'), test_basinci=request\n .POST.get('test_basinci'), patlama_basinci=request.POST.get\n ('patlama_basinci'), kabul_durumu=request.POST.get(\n 'kabulBak'), testi_yapan=fullname)\n t.save(force_insert=True)\n if request.POST.get('test_tur') == 'bakirmembran':\n messages.success(request,\n 'Bakır membran testi başarıyla kaydedildi.')\n else:\n messages.success(request,\n 'Emniyet ventili testi başarıyla kaydedildi.')\n \"\"\"\n elif request.POST.get('tur') == 'emniyet':\n print(request.POST)\n if request.FILES:\n upload_file = request.FILES['file']\n fs = FileSystemStorage()\n fs.save(upload_file.name,upload_file)\n \n next_lot_no = getNextLotNo( request.POST.dict()['tur'])\n t = Test(tur='emniyet',lot_no =next_lot_no, pdf_ismi = request.POST.get('pdf_ismi') ,test_basinci = request.POST.get('test_basinci'), patlama_basinci = request.POST.get('patlama_basinci'),kabul_durumu = request.POST.get('kabulEmn'),testi_yapan = fullname)\n t.save(force_insert=True)\n messages.success(request,'Emniyet ventili testi başarıyla kaydedildi.')\n \"\"\"\n return render(request, 'giris-kalite-kontrol.html', {'mac': mac, 'grup':\n grup, 'birim': birim, 'server': server})\n\n\ndef getNextLotNo(tur):\n test_with_max_lot_no = Test.objects.filter(tur=tur).order_by('-lot_no'\n ).first()\n if test_with_max_lot_no == None:\n max_lot_no = 0\n else:\n max_lot_no = test_with_max_lot_no.lot_no\n return max_lot_no + 1\n\n\ndef nipelSeriNoKontrol(request):\n baslangic_seri_no = request.POST.get('baslangic_seri_no')\n bitis_seri_no = request.POST.get('bitis_seri_no')\n errorFlag = 0\n if int(baslangic_seri_no) > int(bitis_seri_no):\n errorFlag = 1\n messages.warning(request,\n 'Başlangıç seri numarası, bitiş seri numarasından büyük olamaz!')\n return False\n testler = Test.objects.filter(tur=request.POST.dict()['tur'])\n seri_no_aralık_range = range(int(baslangic_seri_no), int(bitis_seri_no) + 1\n )\n seri_no_aralık_list = set(seri_no_aralık_range)\n for test in testler:\n seri_no_aralık_test_range = range(int(test.baslangic_seri_no), int(\n test.bitis_seri_no) + 1)\n intersection_set = seri_no_aralık_list.intersection(\n seri_no_aralık_test_range)\n if len(intersection_set) != 0:\n messages.warning(request,\n 'Seri numarası aralığı mevcut bir seri numarası aralığı ile çakışmaktadır!'\n )\n return False\n return True\n\n\n@login_required\n@csrf_exempt\ndef uretimkontrol(request):\n mac = request.user_agent.os.family\n ip = get_client_ip(request)\n ip == '192.168.1.36'\n grup = request.user.grup\n birim = request.user.birim\n fullname = request.user.first_name + ' ' + request.user.last_name\n if request.method == 'POST':\n if request.POST.dict()['tur'] == 'valfmontaj':\n veris = json.loads(request.POST.dict()['veri'])\n print(veris)\n t = Uretim(tur='valfmontaj', okunan_deger=veris[0], personel=\n request.user.get_full_name())\n t.save(force_insert=True)\n b = Bildirim(tur='baslangic', kisi=request.user.get_full_name())\n b.save(force_insert=True)\n elif request.POST.dict()['tur'] == 'kurlenme':\n veris = json.loads(request.POST.dict()['veri'])\n \"\"\"neval\n if not Uretim.objects.all():\n vsn = 1\n else:\n a = Uretim.objects.all().order_by('-vsn').values()[0]\n s = a['vsn']\n vsn = s + 1\n v = Valf(vsn=vsn, is_emri=veris[0])\n v.save(force_insert=True)\n e = Emir.objects.get(is_emri=veris[0])\n e.durum = 'Aktif'\n e.save()\n t = Uretim(tur='montaj_kurlenme' ,vsn = vsn, is_emri = veris[0] ,personel = request.user.get_full_name(),alt_nipel_no = veris[1],bakir_membran_no = veris[2],ust_nipel_no = veris[3],manometre_no = veris[4],basincanahtari_no = veris[5],montaj_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10))\n t.save(force_insert=True)\n return HttpResponse(str(vsn))\n \"\"\"\n print('deneme')\n is_emri_adi = veris[0]\n emir = Emir.objects.get(is_emri=is_emri_adi)\n personel_id = request.user.id\n alt_nipel_no = veris[1]\n bakir_membran_no = veris[2]\n ust_nipel_no = veris[3]\n manometre_no = veris[4]\n basincanahtari_no = veris[5]\n sibop = veris[6]\n print('deneme2')\n try:\n kayit_tarihi = timezone.now()\n valf_montaj = Valf_montaj(montaj_personel_id=personel_id,\n alt_nipel_no=alt_nipel_no, bakir_membran_no=\n bakir_membran_no, ust_nipel_no=ust_nipel_no,\n manometre_no=manometre_no, basincanahtari_no=\n basincanahtari_no, montaj_tarihi=kayit_tarihi, sibop=sibop)\n valf_montaj.save()\n valf = Valf(is_emri=emir, valf_montaj=valf_montaj)\n valf.save()\n return HttpResponse(str(valf.id))\n except Exception as err:\n print(' KAyıt HAstası > ', err)\n elif request.POST.dict()['tur'] == 'valftest':\n try:\n valf_seri_no = json.loads(request.POST.dict()['valf_seri_no'])\n uygun = json.loads(request.POST.dict()['uygun'])\n valf = Valf.objects.get(id=valf_seri_no)\n personel_id = User.objects.get(id=request.user.id)\n test_tarihi = timezone.now()\n acma = str(uygun)\n kapama = str(uygun)\n sebep = str(uygun)\n if uygun == True:\n sebep = None\n valf_test = Valf_test(test_personel=personel_id,\n test_tarihi=test_tarihi, uygun=uygun)\n valf_test.save()\n valf.valf_test = valf_test\n valf.save()\n except Exception as err:\n print(err)\n elif request.POST.dict()['tur'] == 'valfgovde':\n veri = json.loads(request.POST.dict()['veri'])\n \"\"\"neval\n v = Valf.objects.get(vsn=veri[3])\n is_emri = v.is_emri\n print('veri[5],sodyum miktarı:: ',veri[5] )\n t = Uretim.objects.get(vsn=veri[3])\n t.tur='govde_kurlenme'\n t.tork_degeri = veri[0]\n t.uygunluk = veri[1]\n t.sebep = veri[2]\n t.tsn = veri[4]\n t.personel = request.user.get_full_name()\n t.govde_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10)\n # t = Uretim(tur='valfgovde',tork_degeri = veri[0] ,is_emri=is_emri, uygunluk = veri[1] , sebep = veri[2],\n # vsn = veri[3],tsn = veri[4], personel = request.user.get_full_name(),govde_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10))\n t.save()\n \"\"\"\n valf_seri_no = veri[3]\n valf = Valf.objects.get(id=valf_seri_no)\n valf.durum = 'valf_govde'\n valf.save()\n personel_id = request.user.id\n kayit_tarihi = timezone.now()\n kurlenme_bitis = timezone.now() + timezone.timedelta(minutes=10)\n tork = veri[0]\n tup_seri_no = veri[4]\n sodyum_miktari = veri[5]\n uygunluk = veri[1]\n sebep = veri[2]\n if uygunluk == 'on':\n sebep = None\n valf_govde = Valf_govde(valf=valf, personel_id=personel_id,\n kayit_tarihi=kayit_tarihi, kurlenme_bitis=kurlenme_bitis,\n tork=tork, tup_seri_no=tup_seri_no, sodyum_miktari=\n sodyum_miktari, uygunluk=uygunluk, sebep=sebep)\n valf_govde.save()\n elif request.POST.dict()['tur'] == 'fm200':\n veri = json.loads(request.POST.dict()['veri'])\n \"\"\"neval\n v = Valf.objects.get(vsn=veri[4])\n is_emri = v.is_emri\n print(veri)\n t = Uretim.objects.get(vsn=veri[4])\n t.tur='fm200_kurlenme'\n t.bos_agirlik = veri[0]\n t.rekorlu_agirlik = veri[1]\n t.fm200 = veri[2]\n t.azot = veri[3]\n t.personel = request.user.get_full_name()\n t.fm200_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10) \n t.save()\n \"\"\"\n valf_seri_no = veri[4]\n valf = Valf.objects.get(id=valf_seri_no)\n valf.durum = 'valf_fm200'\n valf.save()\n personel_id = request.user.id\n kayit_tarihi = timezone.now()\n kurlenme_bitis = timezone.now() + timezone.timedelta(minutes=10)\n bos_agirlik = veri[0]\n rekorlu_agirlik = veri[1]\n fm200 = veri[2]\n azot = veri[3]\n valf_fm200 = Valf_fm200(valf=valf, personel_id=personel_id,\n kayit_tarihi=kayit_tarihi, kurlenme_bitis=kurlenme_bitis,\n bos_agirlik=bos_agirlik, rekorlu_agirlik=rekorlu_agirlik,\n fm200=fm200, azot=azot)\n valf_fm200.save()\n elif request.POST.dict()['tur'] == 'havuztest':\n veri = json.loads(request.POST.dict()['veri'])\n \"\"\"neval\n print(veri)\n v = Valf.objects.get(vsn=veri[0])\n is_emri = v.is_emri\n t = Uretim(tur='havuztest',vsn = veri[0],tsn = veri[0],is_emri=is_emri , uygunluk = veri[1] , \n acma = veri[2], kapatma = veri[3],sebep = veri[4], personel = request.user.get_full_name())\n t.save(force_insert=True)\n \"\"\"\n print('veri', veri)\n valf_seri_no = veri[0]\n valf = Valf.objects.get(id=valf_seri_no)\n valf.durum = 'valf_havuz_test'\n valf.save()\n personel_id = request.user.id\n kayit_tarihi = timezone.now()\n uygunluk = veri[1]\n tup_cidar_sicaklik = veri[2]\n tup_basinc = veri[3]\n sebep = veri[4]\n if uygunluk:\n sebep = None\n valf_havuz = Valf_havuz(valf=valf, personel_id=personel_id,\n kayit_tarihi=kayit_tarihi, tup_cidar_sicaklik=\n tup_cidar_sicaklik, tup_basinc=tup_basinc, uygunluk=\n uygunluk, sebep=sebep)\n valf_havuz.save()\n elif request.POST.dict()['tur'] == 'finalmontaj':\n veri = json.loads(request.POST.dict()['veri'])\n \"\"\"neval\n \n print(veri)\n v = Valf.objects.get(vsn=veri[1])\n is_emri = v.is_emri\n t = Uretim.objects.get(vsn=veri[1])\n t.tur='finalmontaj'\n t.etiket_seri_no = veri[0]\n t.fsn = veri[2]\n t.funye_seri_omaj = veri[3]\n t.basinc_anahtari_omaj = veri[4]\n t. personel = request.user.get_full_name()\n #t = Uretim(tur='finalmontaj',etiket_seri_no = veri[0],is_emri=is_emri , vsn = veri[1] , fsn = veri[2],\n # funye_seri_omaj = veri[3],basinc_anahtari_omaj = veri[4], personel = request.user.get_full_name())\n t.save()\n tup_sayisi_str=Emir.objects.filter(is_emri=is_emri).values()[0]['tup_sayisi']\n \"\"\"\n valf_seri_no = veri[1]\n valf = Valf.objects.get(id=valf_seri_no)\n valf.durum = 'valf_final_montaj'\n valf.save()\n personel_id = request.user.id\n kayit_tarihi = timezone.now()\n etiket_seri_no = veri[0]\n funye_seri_no = veri[2]\n funye_seri_omaj = veri[3]\n basinc_anahtari_omaj = veri[4]\n valf_final_montaj = Valf_final_montaj(valf=valf, personel_id=\n personel_id, kayit_tarihi=kayit_tarihi, etiket_seri_no=\n etiket_seri_no, funye_seri_no=funye_seri_no,\n funye_seri_omaj=funye_seri_omaj, basinc_anahtari_omaj=\n basinc_anahtari_omaj)\n valf_final_montaj.save()\n emir = Emir.objects.get(is_emri=valf.is_emri)\n emir_tup_sayisi = int(emir.tup_sayisi)\n emir_biten_valf_sayi = Valf.objects.filter(is_emri=emir, durum=\n 'valf_final_montaj').count()\n print('emir_biten_valf_sayi', emir_biten_valf_sayi)\n print('emir_tup_sayisi', emir_tup_sayisi)\n if emir_biten_valf_sayi == emir_tup_sayisi:\n emir.durum = 'Bitmiş'\n emir.save()\n b = Bildirim(tur='bitis', kisi=request.user.get_full_name())\n b.save(force_insert=True)\n now = timezone.now()\n montajkurlenmesi = Valf_montaj.objects.all()\n fm200kurlenmesi = Valf_fm200.objects.filter(\n fm200_kurlenme_bitis_tarihi__gte=now)\n acikemirleri = Emir.objects.filter(durum='Aktif').values()\n aktifemirler = Emir.objects.filter(durum='Aktif')\n govde_emir = list(dict.fromkeys(Valf.objects.filter(\n valf_govde_id__isnull=False).values_list('is_emri_id', flat=True)))\n fm200_emir = list(dict.fromkeys(Valf.objects.filter(\n fm200_azot_id__isnull=False).values_list('is_emri_id', flat=True)))\n return render(request, 'uretim-kontrol.html', {'grup': grup, 'birim':\n birim, 'ip': ip, 'now': now, 'server': server, 'acikemirleri':\n acikemirleri, 'fm200kurlenmes': fm200kurlenmesi, 'kurlenmes':\n montajkurlenmesi, 'aktifemirler': aktifemirler, 'govde_emir':\n govde_emir, 'fm200_emir': fm200_emir})\n\n\n@csrf_exempt\ndef acikisemirleri(request):\n emirler = Emir.objects.filter(durum__in=('Aktif', 'Başlanmamış'))\n temp = []\n for o in emirler.values():\n temp.append(o['is_emri'])\n veri = list(temp)\n\n\n@login_required\n@csrf_exempt\ndef isemri(request):\n mac = request.user_agent.os.family\n grup = request.user.grup\n birim = request.user.birim\n fullname = request.user.first_name + ' ' + request.user.last_name\n emirler = Emir.objects.all()\n form = IsEmri(request.POST)\n if request.method == 'POST':\n if 'tur' in request.POST.dict():\n if request.POST.dict()['tur'] == 'oncelik':\n veri = json.loads(request.POST.dict()['veri'])\n print(veri)\n for key in veri:\n em = Emir.objects.get(is_emri=key)\n em.oncelik = veri[key]\n em.save()\n o = Bildirim(tur='oncelik')\n o.save()\n return HttpResponse('onceliktamam')\n elif form.is_valid():\n if not Emir.objects.all():\n son_oncelik = 1\n else:\n a = Emir.objects.all().order_by('-oncelik').values()[0]\n s = a['oncelik']\n son_oncelik = s + 1\n emir = form.save()\n emir.refresh_from_db()\n emir.is_emri = form.cleaned_data.get('is_emri')\n emir.urun_kodu = form.cleaned_data.get('urun_kodu')\n emir.baslangic = form.cleaned_data.get('baslangic')\n emir.bitis = form.cleaned_data.get('bitis')\n emir.emri_veren = form.cleaned_data.get('emri_veren')\n emir.tup_govde_turu = form.cleaned_data.get('tup_govde_turu')\n emir.valf_turu = form.cleaned_data.get('valf_turu')\n emir.renk = form.cleaned_data.get('renk')\n emir.emniyet_ventil_turu = form.cleaned_data.get(\n 'emniyet_ventil_turu')\n emir.siparis = form.cleaned_data.get('siparis')\n emir.fm200bosagirlikmindeger = form.cleaned_data.get(\n 'fm200bosagirlikmindeger')\n emir.fm200bosagirlikmaxdeger = form.cleaned_data.get(\n 'fm200bosagirlikmaxdeger')\n emir.fm200dolummiktarimindeger = form.cleaned_data.get(\n 'fm200dolummiktarimindeger')\n emir.fm200dolummiktarimaxdeger = form.cleaned_data.get(\n 'fm200dolummiktarimaxdeger')\n t = Bildirim(tur='is emri', emri_veren_grup=grup, emri_veren=\n request.user.get_full_name(), is_emri=form.cleaned_data.get\n ('is_emri'))\n t.save(force_insert=True)\n emir.oncelik = son_oncelik\n messages.success(request, 'Emir başarıyla eklendi!')\n emir.save()\n form.full_clean()\n return HttpResponseRedirect(reverse('isemri'))\n else:\n messages.warning(request,\n 'İş emri eklenemedi.Lütfen tekrar deneyin!Hata: {}'.format(\n form.errors))\n else:\n form = IsEmri()\n form.fields['emri_veren'].initial = fullname\n return render(request, 'is-emri.html', {'form': form, 'emirler':\n emirler, 'mac': mac, 'fullname': fullname, 'grup': grup, 'birim':\n birim, 'server': server})\n\n\ndef yetkilendirme(request):\n mac = request.user_agent.os.family\n grup = 'Yönetici'\n birim = 'IT'\n kullanicilar = User.objects.all()\n if (grup == 'Yönetici' and birim == 'IT' or grup == 'Mühendis' and \n birim == 'IT'):\n if request.method == 'POST':\n form = UserRegisterForm(request.POST)\n if form.is_valid():\n user = form.save()\n user.refresh_from_db()\n user.first_name = form.cleaned_data.get('first_name')\n user.last_name = form.cleaned_data.get('last_name')\n user.grup = form.cleaned_data.get('grup')\n user.save()\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password1')\n messages.success(request,\n '{} isimli kullanıcı {} isimli gruba eklendi!'.format(\n username, user.grup))\n return HttpResponseRedirect(reverse('yetkilendirme'))\n else:\n print(form.errors)\n else:\n form = UserRegisterForm()\n return render(request, 'kullanici-yetkilendirme.html', {'form':\n form, 'kullanicilar': kullanicilar, 'mac': mac, 'grup': grup,\n 'birim': birim, 'server': server})\n else:\n return HttpResponseRedirect(reverse('403'))\n\n\n@login_required\ndef performans(request):\n mac = request.user_agent.os.family\n grup = request.user.grup\n birim = request.user.birim\n kullanicilar = User.objects.all()\n return render(request, 'performans.html', {'mac': mac, 'grup': grup,\n 'birim': birim, 'kullanicilar': kullanicilar, 'server': server})\n\n\n@login_required\n@csrf_exempt\ndef yazdir(request):\n mac = request.user_agent.os.family\n grup = request.user.grup\n birim = request.user.birim\n if True:\n if request.method == 'POST':\n i = Emir.objects.filter(durum=request.POST['durum'])\n temp = []\n for obj in i.values():\n times = obj['emir_zamani'].strftime('%d %B %Y (%H:%M:%S)')\n temp.append(obj['is_emri'] + ' ' + times)\n veri = list(temp)\n return JsonResponse(veri, safe=False)\n return render(request, 'yazdir.html', {'mac': mac, 'grup': grup,\n 'birim': birim, 'server': server})\n else:\n return HttpResponseRedirect(reverse('403'))\n\n\n@login_required\ndef ulogout(request):\n logout(request)\n return HttpResponseRedirect(reverse('ulogin'))\n\n\n@csrf_exempt\ndef ulogin(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n login(request, user)\n print('{} kullanıcısı tarafından başarılı giriş'.format(\n username))\n return redirect('arama')\n else:\n messages.warning(request,\n 'Kullanıcı adınızı yada parolanızı yanlış girdiniz.')\n else:\n print('Birisi login olmayı denedi ve başarısız oldu!')\n messages.warning(request,\n 'Kullanıcı adınızı yada parolanızı yanlış girdiniz.')\n return HttpResponseRedirect(reverse('ulogin'))\n else:\n return render(request, 'login.html', {})\n\n\ndef _403(request):\n return render(request, '403.html', {})\n\n\ndef handler404(request, exception):\n return render(request, '403.html', status=404)\n\n\n@csrf_exempt\ndef kullanicijson(request):\n username = request.POST.get('username')\n b = User.objects.filter(username=username).values('first_name',\n 'last_name', 'username', 'grup')\n veri = list(b)\n return JsonResponse(veri, safe=False)\n\n\n@csrf_exempt\ndef kullanicisil(request):\n username = request.POST.get('username')\n print(username)\n sildi = User.objects.filter(username=username).delete()\n if sildi:\n return HttpResponse('silindi')\n else:\n return HttpResponse('silinemedi')\n\n\n@csrf_exempt\ndef kullaniciduzelt(request):\n veri = request.POST.get('bilgi')\n veri = json.loads(veri)\n a = User.objects.get(username=veri['eskisi'])\n a.username = veri['username']\n a.first_name = veri['first_name']\n a.last_name = veri['last_name']\n a.grup = veri['grup']\n a.birim = veri['birim']\n a.save()\n return HttpResponse('duzeltildi')\n\n\n@csrf_exempt\ndef passwordreset(request):\n ps = request.POST.get('ps1')\n if request.POST.get('username'):\n u = User.objects.get(username=request.POST.get('username'))\n u.set_password(ps)\n u.save()\n return HttpResponse('parola değiştirildi')\n return HttpResponse('bir hata var')\n\n\ndef get_first_and_lastname(username):\n try:\n first_name = User.objects.filter(username=username).first().first_name\n last_name = User.objects.filter(username=username).first().last_name\n return '{} {}'.format(first_name, last_name)\n except:\n return 'isim soyisim'\n\n\n@csrf_exempt\ndef pdf(request):\n if request.GET.get('qr'):\n qr = request.GET.get('qr')\n print(qr.split(' ')[0])\n i = qr.split(' ')[0]\n print('---------------------')\n valf_no = request.GET.get('vsn')\n Valf_montaj_Data = Valf_montaj.objects.filter(id=Valf.objects.filter(id\n =valf_no).first().valf_montaj_id).first()\n Valf_fm200_Data = Valf_fm200.objects.filter(id=Valf.objects.filter(id=\n valf_no).first().fm200_azot_id).first()\n Valf_havuz_Data = Valf_havuz.objects.filter(id=Valf.objects.filter(id=\n valf_no).first().havuz_id).first()\n Valf_final_Data = Valf_final_montaj.objects.filter(id=Valf.objects.\n filter(id=valf_no).first().valf_final_montaj_id).first()\n Valf_test_Data = Valf_test.objects.filter(id=Valf.objects.filter(id=\n valf_no).first().valf_test_id).first()\n Valf_govde_Data = Valf_govde.objects.filter(id=Valf.objects.filter(id=\n valf_no).first().valf_govde_id).first()\n Emir_Data = Emir.objects.filter(is_emri=i).first()\n valf_final = Valf.objects.filter(id=valf_no).values_list(\n 'valf_final_montaj_id', flat=True).first()\n urun_seri_no = Valf_final_montaj.objects.filter(id=valf_final).values_list(\n 'urun_seri_no', flat=True).first()\n print('---------------------')\n try:\n valfmontajPersonel = get_first_and_lastname(User.objects.filter(id=\n Valf_montaj_Data.montaj_personel_id).first().username)\n except:\n valfmontajPersonel = ''\n try:\n valfmontajTarih = Valf_montaj_Data.montaj_tarihi\n except:\n valfmontajTarih = ''\n try:\n altnipelno = Valf_montaj_Data.alt_nipel_no\n except:\n altnipelno = ''\n try:\n ustnipelno = Valf_montaj_Data.ust_nipel_no\n except:\n ustnipelno = ''\n try:\n switchno = Valf_montaj_Data.basincanahtari_no\n except:\n switchno = ''\n try:\n manometreno = Valf_montaj_Data.manometre_no\n except:\n manometreno = ''\n try:\n valftestPersonel = get_first_and_lastname(User.objects.filter(id=\n Valf_test_Data.test_personel_id).first().username)\n except:\n valftestPersonel = ''\n try:\n valftestTarih = Valf_test_Data.test_tarihi\n except:\n valftestTarih = ''\n try:\n valfTestUygun = ('Uygun' if Valf_test_Data.uygun == True else\n 'Uygun Değil')\n except:\n valfTestUygun = Valf_test_Data.uygun\n try:\n valfgovdePersonel = get_first_and_lastname(User.objects.filter(id=\n Valf_govde_Data.govde_personel_id).first().username)\n except:\n valfgovdePersonel = ''\n try:\n valfgovdeTarih = Valf_govde_Data.govde_tarihi\n except:\n valfgovdeTarih = ''\n try:\n valfGovdeUygun = ('Uygun' if Valf_govde_Data.uygunluk == True else\n 'Uygun Değil')\n except:\n valfGovdeUygun = ''\n try:\n fm200Personel = get_first_and_lastname(User.objects.filter(id=\n Valf_fm200_Data.fm200_personel_id).first().username)\n except:\n fm200Personel = ''\n try:\n fm200Tarih = Valf_fm200_Data.kayit_tarihi\n except:\n fm200Tarih = ''\n try:\n bosAgirlik = Valf_fm200_Data.bos_agirlik\n except:\n bosAgirlik = ''\n try:\n doluAgirlik = Valf_fm200_Data.dolu_agirlik\n except:\n doluAgirlik = ''\n try:\n bar = Valf_fm200_Data.bar\n except:\n bar = ''\n try:\n havuztestPersonel = get_first_and_lastname(User.objects.filter(id=\n Valf_havuz_Data.havuz_personel_id).first().username)\n except:\n havuztestPersonel = ''\n try:\n havuztestTarih = Valf_havuz_Data.kayit_tarihi\n except:\n havuztestTarih = ''\n try:\n havuzTestUygun = ('Uygun' if Valf_havuz_Data.uygunluk == True else\n 'Uygun Değil')\n except:\n havuzTestUygun = ''\n try:\n finalmontajPersonel = get_first_and_lastname(User.objects.filter(id\n =Valf_final_Data.personel_id).first().username)\n except:\n finalmontajPersonel = ''\n try:\n finalmontajTarih = Valf_final_Data.kayit_tarihi\n except:\n finalmontajTarih = ''\n try:\n membranTipi = Emir_Data.valf_turu\n except:\n membranTipi = ''\n try:\n ventilTipi = Emir_Data.emniyet_ventil_turu\n except:\n ventilTipi = ''\n try:\n tugovdetipi = Emir_Data.tup_govde_turu\n except:\n tugovdetipi = ''\n try:\n siboplotno = Valf_montaj_Data.sibop\n except:\n siboplotno = ''\n print(valftestPersonel, Emir_Data.emniyet_ventil_turu)\n veri = 'veri'\n html_string = render_to_string('external/pdf-template.html', {'veri':\n veri, 'qr': urun_seri_no, 'valfmontajPersonel': valfmontajPersonel,\n 'valfmontajTarih': valfmontajTarih, 'valfgovdePersonel':\n valfgovdePersonel, 'valftestPersonel': valftestPersonel,\n 'valftestTarih': valftestTarih, 'valfTestUygun': valfTestUygun,\n 'havuzTestUygun': havuzTestUygun, 'valfgovdePersonel':\n valftestPersonel, 'valfgovdeTarih': valfgovdeTarih,\n 'valfGovdeUygun': valfGovdeUygun, 'valfMontajUygun': 'Uygun*',\n 'fm200Uygun': 'Uygun*', 'finalMontajUygun': 'Uygun*',\n 'fm200Personel': fm200Personel, 'fm200Tarih': fm200Tarih,\n 'bosAgirlik': bosAgirlik, 'doluAgirlik': doluAgirlik,\n 'havuztestPersonel': havuztestPersonel, 'havuztestTarih':\n havuztestTarih, 'finalmontajPersonel': finalmontajPersonel,\n 'finalmontajTarih': finalmontajTarih, 'altnipelno': altnipelno,\n 'ustnipelno': ustnipelno, 'switchno': switchno, 'manometreno':\n manometreno, 'is_emri': i, 'membranTipi': membranTipi, 'ventilTipi':\n ventilTipi, 'urunserino': urun_seri_no, 'bar': bar, 'tugovdetipi':\n tugovdetipi, 'siboplotno': siboplotno}, request=request)\n html = HTML(string=html_string, base_url=request.build_absolute_uri())\n html.write_pdf(target='/tmp/' + qr + '.pdf')\n fs = FileSystemStorage('/tmp/')\n with fs.open(qr + '.pdf') as pdf:\n response = HttpResponse(pdf, content_type='application/pdf')\n response['Content-Disposition'] = 'inline; filename=\"pdf.pdf\"'\n return response\n return response\n\n\n@csrf_exempt\ndef dashboard(request):\n bugun = timezone.now()\n print(request.POST.get('gun_sayisi'))\n gun = int(request.POST.get('gun_sayisi'))\n kac_gun = bugun - timezone.timedelta(days=gun)\n veris = Test.objects.filter(test_tarihi__range=[kac_gun, bugun])\n temp = []\n for o in veris.values():\n temp.append(o)\n veri = list(temp)\n print('dashboard', veri)\n return JsonResponse(veri, safe=False)\n\n\n@csrf_exempt\ndef uretimdurum(request):\n i = request.POST.get('is_emri')\n print(i)\n veri = list()\n print(Valf.objects.filter(is_emri_id=i).values_list('valf_montaj_id',\n flat=True).count(), Valf.objects.filter(is_emri_id=i).filter(\n valf_test_id__isnull=False).values_list('valf_test_id', flat=True).\n count())\n try:\n veri.append(Valf.objects.filter(is_emri_id=i).filter(\n valf_montaj_id__isnull=False).values_list('valf_montaj_id',\n flat=True).count())\n veri.append(Valf.objects.filter(is_emri_id=i).filter(\n valf_test_id__isnull=False).values_list('valf_test_id', flat=\n True).count())\n veri.append(Valf.objects.filter(is_emri_id=i).filter(\n valf_govde_id__isnull=False).values_list('valf_govde_id', flat=\n True).count())\n veri.append(Valf.objects.filter(is_emri_id=i).filter(\n fm200_azot_id__isnull=False).values_list('fm200_azot_id', flat=\n True).count())\n veri.append(Valf.objects.filter(is_emri_id=i).filter(\n havuz_id__isnull=False).values_list('havuz_id', flat=True).count())\n veri.append(Valf.objects.filter(is_emri_id=i).filter(\n valf_final_montaj_id__isnull=False).values_list(\n 'valf_final_montaj_id', flat=True).count())\n veri.append(Emir.objects.filter(id=i).values()[0]['tup_sayisi'])\n except Exception as err:\n print(err)\n veri = [0, 0, 0, 0, 0, 0, 10]\n print(veri)\n return JsonResponse(veri, safe=False)\n\n\n@csrf_exempt\ndef personeldurum(request):\n p = request.POST.get('personel')\n g = request.POST.get('gun_sayisi')\n print(p, g)\n bugun = timezone.now()\n gun = int(request.POST.get('gun_sayisi'))\n kac_gun = bugun - timezone.timedelta(days=gun)\n veris = Test.objects.filter(test_tarihi__range=[kac_gun, bugun])\n veri = list()\n try:\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='manometre').filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='basinc').filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='altnipel').filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='ustnipel').filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='bakirmembran').filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun, bugun]\n ).filter(tur='emniyet').filter(testi_yapan=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='kurlenme').filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='valftest').filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='valfgovde').filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='fm200').filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='havuztest').filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun, bugun]).\n filter(tur='finalmontaj').filter(personel=p).count())\n except:\n veri = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10]\n print(veri)\n return JsonResponse(veri, safe=False)\n\n\n@csrf_exempt\ndef tupTuru(request):\n if request.method == 'POST':\n try:\n u = Emir.objects.filter(is_emri=request.POST.dict()['is_emri']\n ).first()\n bos_agirlik_miktari = u.bos_agirlik_miktari\n fm200_miktari = u.fm200_miktari\n renk = u.renk\n response = bos_agirlik_miktari + ';' + fm200_miktari + ';' + renk\n return HttpResponse(str(response))\n except e:\n print(e)\n return str('tur')\n\n\n@csrf_exempt\ndef getEmirNo(request):\n if request.method == 'POST':\n vsn = request.POST.dict()['veri']\n print('getEmirNo', vsn)\n try:\n is_emri = Emir.objects.filter(id=vsn).values_list('is_emri',\n flat=True).first()\n return HttpResponse(str(is_emri))\n except:\n return HttpResponse(str('NO'))\n return str('is_emri')\n\n\n@csrf_exempt\ndef kontrolEt(request):\n if request.method == 'POST':\n tur = request.POST['tur']\n veri = request.POST['veri']\n isemri = request.POST['isemri']\n t = Test.objects.filter(tur=tur)\n r = 'NO'\n if tur == 'altnipel':\n t = Test.objects.filter(tur=tur)\n try:\n if int(veri) in t.values_list('lot_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'ustnipel':\n t = Test.objects.filter(tur=tur)\n try:\n if veri in t.values_list('baslangic_seri_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'manometre':\n t = Test.objects.filter(tur=tur)\n try:\n if veri in t.values_list('seri_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except Exception as e:\n print(e)\n r = 'NO'\n if tur == 'basinc':\n t = Test.objects.filter(tur=tur)\n try:\n if veri in t.values_list('seri_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'bakirmembran':\n t = Test.objects.filter(tur=tur)\n try:\n if int(veri) in t.values_list('lot_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'emniyet':\n t = Test.objects.filter(tur=tur)\n try:\n if veri in t.values_list('lot_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'valf_govde':\n try:\n valf_id = Valf.objects.filter(valf_montaj_id=veri).values_list(\n 'valf_test_id', flat=True).first()\n if isinstance(valf_id, int):\n Valf_test.objects.filter(id=valf_id).values_list('uygun',\n flat=True).first()\n if Valf_test.objects.filter(id=valf_id).values_list('uygun'\n , flat=True).first():\n r = 'OK'\n else:\n r = 'NO'\n else:\n r = 'NO'\n except:\n r = 'NO'\n if tur == 'sibop':\n print(tur, veri, t.values_list('lot_no', flat=True))\n t = Test.objects.filter(tur=tur)\n try:\n if int(veri) in t.values_list('lot_no', flat=True):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n return HttpResponse(r)\n\n\n@csrf_exempt\ndef kurlenmeKontrol(request):\n if request.method == 'POST':\n r = 'NO'\n tur = request.POST['tur']\n vsn = request.POST['veri']\n print('kurlenmeKontrol', tur, vsn)\n if tur == 'montaj_kurlenme':\n try:\n u = Uretim.objects.filter(vsn=vsn)\n print(u, '---------------')\n if u.values()[0]['montaj_kurlenme_zamani'] < timezone.now():\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n elif tur == 'govde_kurlenme':\n try:\n u = Uretim.objects.filter(vsn=vsn)\n print('govde_kurlenme_zamani', u.values()[0][\n 'govde_kurlenme_zamani'])\n print('now', timezone.now())\n if u.values()[0]['govde_kurlenme_zamani'] < timezone.now():\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n elif tur == 'valf_test':\n print('içerdeyim-----> Valf Test')\n try:\n print(vsn, '----------------------------')\n valf_montaj_id = Valf.objects.filter(id=vsn).first(\n ).valf_montaj_id\n print(valf_montaj_id)\n tarih = Valf_montaj.objects.filter(id=valf_montaj_id).first(\n ).kurlenme_bitis_tarihi\n print(tarih)\n print(type(timezone.now()), timezone.now())\n print(type(tarih), tarih)\n if tarih < timezone.now():\n print('büyüktür')\n r = 'OK'\n else:\n print('küçük')\n r = 'NO'\n except Exception as err:\n print('r', err)\n r = 'NO'\n elif tur == 'pdfkontrol':\n print(vsn)\n try:\n if Valf.objects.filter(valf_montaj_id=vsn).count():\n r = 'OK'\n else:\n r = 'NO'\n except Exception as err:\n r = 'NO'\n print(err)\n return HttpResponse(r)\n\n\n@csrf_exempt\ndef newVSN(request):\n if request.method == 'POST':\n vsn = ''\n if not Uretim.objects.all():\n vsn = 1\n else:\n a = Uretim.objects.all().order_by('-vsn').values()[0]\n s = a['vsn']\n print('sssss', s)\n vsn = s + 1\n print(vsn)\n r = str(vsn)\n return HttpResponse(r)\n\n\n@csrf_exempt\ndef hardreset(request):\n print('Hard')\n",
"step-5": "from django.shortcuts import render,redirect\nfrom .forms import UserRegisterForm, IsEmri ,TestForm,PDF_Rapor\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login ,logout\nfrom django.http import HttpResponseRedirect, HttpResponse ,JsonResponse\nfrom django.urls import reverse\nfrom django.db.models import Max\nfrom django.contrib.auth.models import User\nfrom .models import Emir , Test, Bildirim, Uretim, Valf\nfrom .models import Valf_montaj,Valf_test,Valf_govde,Valf_fm200,Valf_havuz,Valf_final_montaj\nfrom django.contrib.auth.decorators import login_required\nimport json, platform, base64, datetime, os\nfrom django.utils import timezone\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.core.files.storage import FileSystemStorage\nfrom django.template.loader import render_to_string\nfrom weasyprint import HTML\nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\nfrom base64 import b64decode\n\n# Create your views here.\n\n#mac = platform.machine()[:3] # eğer device ras pi ise 'arm' döner\nserver = '192.168.1.38:8000'\ndef get_client_ip(request):\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n print(ip)\n return ip\n\n\n\ndef bildirim(request):\n bugun = timezone.now()\n birGunOnce = bugun - timezone.timedelta(days=14)\n bildirimq = Bildirim.objects.filter(zaman__range=[birGunOnce,bugun])\n temp = []\n for o in bildirimq.values():\n temp.append(o)\n bildirims = list(temp)\n print(bildirims)\n return JsonResponse(bildirims,safe=False)\n\n@login_required\ndef index(request):\n #Bildirim.objects.all().delete()\n \n grup = request.user.grup\n birim = request.user.birim\n emirler = Emir.objects.filter(durum=\"Aktif\")\n l = list()\n for e in emirler.values():\n \n data = dict()\n data['is_emri'] = e['is_emri']\n data['valfmontaj'] = Valf.objects.filter(is_emri_id=e['id']).filter(valf_montaj_id__isnull=False).values_list('valf_montaj_id',flat=True).count()or 0\n data['valftest'] = Valf.objects.filter(is_emri_id=e['id']).filter(valf_test_id__isnull=False).values_list('valf_test_id',flat=True).count()or 0\n data['valfgovde'] =Valf.objects.filter(is_emri_id=e['id']).filter(valf_govde_id__isnull=False).values_list('valf_govde_id',flat=True).count()or 0\n data['fm200'] = Valf.objects.filter(is_emri_id=e['id']).filter(fm200_azot_id__isnull=False).values_list('fm200_azot_id',flat=True).count()or 0\n data['havuztest'] = Valf.objects.filter(is_emri_id=e['id']).filter(havuz_id__isnull=False).values_list('havuz_id',flat=True).count()or 0\n data['finalmontaj'] = Valf.objects.filter(is_emri_id=e['id']).filter(valf_final_montaj_id__isnull=False).values_list('valf_final_montaj_id',flat=True).count()or 0\n l.append(data)\n print(l)\n\n return render(request,'index.html', { 'grup' : grup, \"emirler\" : emirler, 'birim': birim,'server' : server,'uretims':l})\n@login_required\ndef arama(request):\n mac = request.user_agent.os.family\n q = request.GET.get('q') or request.GET.get('uretim')\n emir = request.GET.get('emir')\n emirs = Emir.objects.all()\n media_url = settings.MEDIA_URL\n aranan = \"\"\n if q:\n aranan = q\n elif emir:\n aranan = \"isemri\"\n else:\n print('bos')\n grup = request.user.grup\n birim = request.user.birim\n testler = Test.objects.filter(tur=q)\n # valfmontaj=Valf_montaj.objects.all()\n # valfgovde=Valf_govde.objects.all()\n # finalmontaj=Valf_final_montaj.objects.all()\n # fm200=Valf_fm200.objectsobjects.all()\n print(q)\n # if q == \"valfmontaj\":\n # uretims = Uretim.objects.filter(tur=\"kurlenme\")\n # else:\n # uretims = Uretim.objects.filter(tur=q)\n # print(uretims)\n if q == \"valfmontaj\":\n uretims = Valf_montaj.objects.all()\n elif q == \"valfgovde\":\n uretims = Valf_govde.objects.all()\n elif q == \"fm200\":\n uretims = Valf_fm200.objects.all()\n elif q == \"havuztest\":\n uretims = Valf_havuz.objects.all()\n elif q == \"finalmontaj\":\n uretims = Valf_final_montaj.objects.all()\n else:\n uretims = Uretim.objects.filter(tur=q)\n print(uretims)\n if emir == \"tumu\":\n emirler = Emir.objects.all()\n else:\n emirler = Emir.objects.filter(is_emri=emir)\n return render(request,'arama.html',{ 'mac' : mac , 'testler' : testler , 'grup': grup,\"emirler\": emirler, \"aranan\": aranan, \"emirs\":emirs, 'birim': birim,'media_url':media_url,\"uretims\":uretims,'server' : server})\n@login_required\n@csrf_exempt\ndef giriskalite(request):\n mac = request.user_agent.os.family\n grup = request.user.grup\n birim = request.user.birim\n #Test.objects.all().delete() #Test sonuçlarını silmek için\n fullname = request.user.first_name + ' ' + request.user.last_name\n if request.method == 'POST':\n if request.POST.dict()['tur'] == 'basinc':\n veris = json.loads(request.POST.dict()['veri'])\n for veri in veris:\n t = Test(tur='basinc',seri_no = veri[0] , acma = veri[1] , kapatma = veri[2], kabul_durumu = veri[3], testi_yapan = fullname)\n t.save(force_insert=True)\n elif request.POST.dict()['tur'] == 'manometre':\n veris = json.loads(request.POST.dict()['veri'])\n for veri in veris:\n t = Test(tur='manometre',seri_no = veri[0] , okunan_deger = veri[1], kabul_durumu = veri[2] ,testi_yapan = fullname)\n t.save(force_insert=True)\n elif request.POST.dict()['tur'] == 'altnipel':\n print(request.POST)\n\n kontrolResult= nipelSeriNoKontrol(request)\n if kontrolResult == True :\n if request.FILES:\n upload_file = request.FILES['file']\n fs = FileSystemStorage()\n fs.save(upload_file.name,upload_file)\n next_lot_no = getNextLotNo( request.POST.dict()['tur'])\n\n t = Test(tur='altnipel',lot_no = next_lot_no , pdf_ismi = request.POST.get('pdf_ismi') ,baslangic_seri_no = request.POST.get('baslangic_seri_no'),bitis_seri_no = request.POST.get('bitis_seri_no'), kabul_durumu = request.POST.get('kabulAlt'),testi_yapan = fullname)\n t.save(force_insert=True)\n messages.success(request,'Alt nipel testi başarıyla kaydedildi.')\n elif request.POST.dict()['tur'] == 'ustnipel':\n print(request.POST)\n\n kontrolResult= nipelSeriNoKontrol(request)\n if kontrolResult == True :\n if request.FILES:\n upload_file = request.FILES['file']\n fs = FileSystemStorage()\n fs.save(upload_file.name,upload_file)\n next_lot_no = getNextLotNo( request.POST.dict()['tur'])\n t = Test(tur='ustnipel',lot_no = next_lot_no , pdf_ismi = request.POST.get('pdf_ismi') ,baslangic_seri_no = request.POST.get('baslangic_seri_no'),bitis_seri_no = request.POST.get('bitis_seri_no'), kabul_durumu = request.POST.get('kabulUst'),testi_yapan = fullname)\n t.save(force_insert=True)\n messages.success(request,'Üst nipel testi başarıyla kaydedildi.')\n elif request.POST.dict()['tur'] == 'bakirmembran':\n \n print(request.POST) \n \n next_lot_no = getNextLotNo( request.POST.get('test_tur') )\n if request.FILES:\n upload_file = request.FILES['file']\n fs = FileSystemStorage()\n fs.save(upload_file.name,upload_file)\n t = Test(tur=request.POST.get('test_tur'), lot_no = next_lot_no, pdf_ismi = request.POST.get('pdf_ismi') ,test_basinci = request.POST.get('test_basinci'),\n patlama_basinci = request.POST.get('patlama_basinci'), kabul_durumu = request.POST.get('kabulBak'),testi_yapan = fullname)\n t.save(force_insert=True)\n if(request.POST.get('test_tur') =='bakirmembran'):\n messages.success(request,'Bakır membran testi başarıyla kaydedildi.')\n else:\n messages.success(request,'Emniyet ventili testi başarıyla kaydedildi.')\n\n\n \"\"\"\n elif request.POST.get('tur') == 'emniyet':\n print(request.POST)\n if request.FILES:\n upload_file = request.FILES['file']\n fs = FileSystemStorage()\n fs.save(upload_file.name,upload_file)\n \n next_lot_no = getNextLotNo( request.POST.dict()['tur'])\n t = Test(tur='emniyet',lot_no =next_lot_no, pdf_ismi = request.POST.get('pdf_ismi') ,test_basinci = request.POST.get('test_basinci'), patlama_basinci = request.POST.get('patlama_basinci'),kabul_durumu = request.POST.get('kabulEmn'),testi_yapan = fullname)\n t.save(force_insert=True)\n messages.success(request,'Emniyet ventili testi başarıyla kaydedildi.')\n \"\"\"\n \n return render(request,'giris-kalite-kontrol.html',{ 'mac' : mac , 'grup': grup, 'birim': birim,'server' : server})\n\n\ndef getNextLotNo(tur):\n test_with_max_lot_no = Test.objects.filter(tur=tur).order_by('-lot_no').first()\n if(test_with_max_lot_no == None):\n max_lot_no=0\n else:\n max_lot_no=test_with_max_lot_no.lot_no\n return max_lot_no + 1 \n\ndef nipelSeriNoKontrol(request):\n baslangic_seri_no = request.POST.get('baslangic_seri_no')\n bitis_seri_no = request.POST.get('bitis_seri_no')\n errorFlag=0\n if(int(baslangic_seri_no) > int(bitis_seri_no)):\n errorFlag=1\n messages.warning(request,'Başlangıç seri numarası, bitiş seri numarasından büyük olamaz!') \n return False \n \n testler = Test.objects.filter(tur=request.POST.dict()['tur'] )\n seri_no_aralık_range= range(int(baslangic_seri_no),int(bitis_seri_no)+1)\n seri_no_aralık_list= set(seri_no_aralık_range)\n for test in testler:\n seri_no_aralık_test_range= range(int(test.baslangic_seri_no),int(test.bitis_seri_no)+1)\n intersection_set= seri_no_aralık_list.intersection(seri_no_aralık_test_range)\n if len(intersection_set) != 0 :\n messages.warning(request,'Seri numarası aralığı mevcut bir seri numarası aralığı ile çakışmaktadır!')\n return False\n\n return True\n\n@login_required\n@csrf_exempt\ndef uretimkontrol(request):\n mac = request.user_agent.os.family\n\n ip = get_client_ip(request)\n ip == '192.168.1.36'\n\n grup = request.user.grup\n birim = request.user.birim\n\n #Uretim.objects.all().delete() #Test sonuçlarını silmek için bu yorumu açabilirsiniz\n fullname = request.user.first_name + ' ' + request.user.last_name\n if request.method == 'POST':\n if request.POST.dict()['tur'] == 'valfmontaj':\n veris = json.loads(request.POST.dict()['veri'])\n print(veris)\n t = Uretim(tur='valfmontaj' , okunan_deger = veris[0] ,personel = request.user.get_full_name())\n t.save(force_insert=True)\n b = Bildirim(tur=\"baslangic\",kisi = request.user.get_full_name())\n b.save(force_insert=True)\n elif request.POST.dict()['tur'] == 'kurlenme':\n veris = json.loads(request.POST.dict()['veri'])\n '''neval\n if not Uretim.objects.all():\n vsn = 1\n else:\n a = Uretim.objects.all().order_by('-vsn').values()[0]\n s = a['vsn']\n vsn = s + 1\n v = Valf(vsn=vsn, is_emri=veris[0])\n v.save(force_insert=True)\n e = Emir.objects.get(is_emri=veris[0])\n e.durum = 'Aktif'\n e.save()\n t = Uretim(tur='montaj_kurlenme' ,vsn = vsn, is_emri = veris[0] ,personel = request.user.get_full_name(),alt_nipel_no = veris[1],bakir_membran_no = veris[2],ust_nipel_no = veris[3],manometre_no = veris[4],basincanahtari_no = veris[5],montaj_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10))\n t.save(force_insert=True)\n return HttpResponse(str(vsn))\n '''\n print(\"deneme\")\n #burası sonradan düzenlenecek Berker\n # e = Emir.objects.get(is_emri=veris[0])\n # e.durum = 'Aktif'\n # e.save()\n is_emri_adi=veris[0] \n emir=Emir.objects.get(is_emri= is_emri_adi)\n personel_id=request.user.id\n\n alt_nipel_no = veris[1]\n bakir_membran_no = veris[2]\n ust_nipel_no = veris[3]\n manometre_no = veris[4]\n basincanahtari_no = veris[5]\n sibop = veris[6]\n print(\"deneme2\")\n \n \n try:\n kayit_tarihi=timezone.now()\n #kurlenme_bitis=timezone.now()+timezone.timedelta(minutes=10)\n\n valf_montaj = Valf_montaj(montaj_personel_id= personel_id, alt_nipel_no=alt_nipel_no,bakir_membran_no=bakir_membran_no,ust_nipel_no=ust_nipel_no,manometre_no=manometre_no,basincanahtari_no=basincanahtari_no,montaj_tarihi=kayit_tarihi,sibop=sibop)\n valf_montaj.save() \n \n\n valf = Valf(is_emri=emir,valf_montaj=valf_montaj)\n valf.save()\n\n return HttpResponse(str(valf.id))\n except Exception as err:\n print(\" KAyıt HAstası > \", err)\n\n \n elif request.POST.dict()['tur'] == 'valftest':\n try:\n\n valf_seri_no = json.loads(request.POST.dict()['valf_seri_no'])\n uygun = json.loads(request.POST.dict()['uygun'])\n \n\n valf = Valf.objects.get(id=valf_seri_no ) \n\n\n personel_id=User.objects.get(id=request.user.id)\n test_tarihi=timezone.now()\n \n\n acma = str(uygun)\n kapama = str(uygun)\n sebep = str(uygun)\n if (uygun==True): \n sebep=None\n valf_test= Valf_test( test_personel=personel_id,test_tarihi=test_tarihi,uygun=uygun)\n\n valf_test.save()\n valf.valf_test=valf_test\n valf.save()\n except Exception as err:\n print(err)\n \n \n elif request.POST.dict()['tur'] == 'valfgovde':\n \n veri = json.loads(request.POST.dict()['veri'])\n \n '''neval\n v = Valf.objects.get(vsn=veri[3])\n is_emri = v.is_emri\n print('veri[5],sodyum miktarı:: ',veri[5] )\n t = Uretim.objects.get(vsn=veri[3])\n t.tur='govde_kurlenme'\n t.tork_degeri = veri[0]\n t.uygunluk = veri[1]\n t.sebep = veri[2]\n t.tsn = veri[4]\n t.personel = request.user.get_full_name()\n t.govde_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10)\n # t = Uretim(tur='valfgovde',tork_degeri = veri[0] ,is_emri=is_emri, uygunluk = veri[1] , sebep = veri[2],\n # vsn = veri[3],tsn = veri[4], personel = request.user.get_full_name(),govde_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10))\n t.save()\n '''\n valf_seri_no=veri[3]\n valf = Valf.objects.get(id=valf_seri_no ) \n valf.durum='valf_govde'\n valf.save()\n\n \n personel_id=request.user.id\n kayit_tarihi=timezone.now()\n kurlenme_bitis=timezone.now()+timezone.timedelta(minutes=10)\n tork=veri[0]\n tup_seri_no=veri[4]\n sodyum_miktari=veri[5]\n uygunluk=veri[1]\n sebep=veri[2]\n if (uygunluk=='on'): \n sebep=None\n\n valf_govde= Valf_govde(valf=valf, personel_id=personel_id,kayit_tarihi=kayit_tarihi,kurlenme_bitis=kurlenme_bitis,tork=tork,tup_seri_no=tup_seri_no,sodyum_miktari=sodyum_miktari,uygunluk=uygunluk,sebep=sebep)\n valf_govde.save()\n\n elif request.POST.dict()['tur'] == 'fm200':\n veri = json.loads(request.POST.dict()['veri'])\n '''neval\n v = Valf.objects.get(vsn=veri[4])\n is_emri = v.is_emri\n print(veri)\n t = Uretim.objects.get(vsn=veri[4])\n t.tur='fm200_kurlenme'\n t.bos_agirlik = veri[0]\n t.rekorlu_agirlik = veri[1]\n t.fm200 = veri[2]\n t.azot = veri[3]\n t.personel = request.user.get_full_name()\n t.fm200_kurlenme_zamani=timezone.now()+timezone.timedelta(minutes=10) \n t.save()\n '''\n\n valf_seri_no=veri[4]\n valf = Valf.objects.get(id=valf_seri_no ) \n valf.durum='valf_fm200'\n valf.save()\n\n \n personel_id=request.user.id\n kayit_tarihi=timezone.now()\n kurlenme_bitis=timezone.now()+timezone.timedelta(minutes=10) \n bos_agirlik =veri[0]\n rekorlu_agirlik=veri[1]\n fm200 = veri[2]\n azot = veri[3]\n valf_fm200= Valf_fm200(valf=valf, personel_id=personel_id,kayit_tarihi=kayit_tarihi,kurlenme_bitis=kurlenme_bitis, bos_agirlik =bos_agirlik,rekorlu_agirlik=rekorlu_agirlik, fm200 = fm200,azot = azot)\n valf_fm200.save()\n\n elif request.POST.dict()['tur'] == 'havuztest':\n veri = json.loads(request.POST.dict()['veri'])\n '''neval\n print(veri)\n v = Valf.objects.get(vsn=veri[0])\n is_emri = v.is_emri\n t = Uretim(tur='havuztest',vsn = veri[0],tsn = veri[0],is_emri=is_emri , uygunluk = veri[1] , \n acma = veri[2], kapatma = veri[3],sebep = veri[4], personel = request.user.get_full_name())\n t.save(force_insert=True)\n '''\n print(\"veri\",veri)\n\n valf_seri_no=veri[0]\n valf = Valf.objects.get(id=valf_seri_no ) \n valf.durum='valf_havuz_test'\n valf.save()\n\n \n personel_id=request.user.id\n kayit_tarihi=timezone.now()\n uygunluk= veri[1]\n tup_cidar_sicaklik =veri[2]\n tup_basinc = veri[3]\n sebep=veri[4]\n if (uygunluk):\n sebep=None\n \n valf_havuz= Valf_havuz(valf=valf, personel_id=personel_id,kayit_tarihi=kayit_tarihi,tup_cidar_sicaklik=tup_cidar_sicaklik, tup_basinc =tup_basinc,uygunluk=uygunluk, sebep = sebep)\n valf_havuz.save()\n \n\n elif request.POST.dict()['tur'] == 'finalmontaj':\n veri = json.loads(request.POST.dict()['veri'])\n '''neval\n \n print(veri)\n v = Valf.objects.get(vsn=veri[1])\n is_emri = v.is_emri\n t = Uretim.objects.get(vsn=veri[1])\n t.tur='finalmontaj'\n t.etiket_seri_no = veri[0]\n t.fsn = veri[2]\n t.funye_seri_omaj = veri[3]\n t.basinc_anahtari_omaj = veri[4]\n t. personel = request.user.get_full_name()\n #t = Uretim(tur='finalmontaj',etiket_seri_no = veri[0],is_emri=is_emri , vsn = veri[1] , fsn = veri[2],\n # funye_seri_omaj = veri[3],basinc_anahtari_omaj = veri[4], personel = request.user.get_full_name())\n t.save()\n tup_sayisi_str=Emir.objects.filter(is_emri=is_emri).values()[0]['tup_sayisi']\n '''\n\n valf_seri_no=veri[1]\n valf = Valf.objects.get(id=valf_seri_no ) \n valf.durum='valf_final_montaj'\n valf.save()\n\n \n personel_id=request.user.id\n kayit_tarihi=timezone.now()\n etiket_seri_no = veri[0]\n funye_seri_no = veri[2]\n funye_seri_omaj = veri[3]\n basinc_anahtari_omaj = veri[4]\n valf_final_montaj= Valf_final_montaj(valf=valf, personel_id=personel_id,kayit_tarihi=kayit_tarihi,etiket_seri_no = etiket_seri_no,funye_seri_no = funye_seri_no ,funye_seri_omaj = funye_seri_omaj,basinc_anahtari_omaj = basinc_anahtari_omaj)\n valf_final_montaj.save()\n\n\n\n emir = Emir.objects.get(is_emri=valf.is_emri)\n emir_tup_sayisi = int(emir.tup_sayisi )\n emir_biten_valf_sayi = Valf.objects.filter(is_emri=emir,durum='valf_final_montaj').count()\n print('emir_biten_valf_sayi',emir_biten_valf_sayi)\n print('emir_tup_sayisi',emir_tup_sayisi)\n if(emir_biten_valf_sayi == emir_tup_sayisi):\n\n emir.durum = 'Bitmiş'\n emir.save()\n\n b = Bildirim(tur = \"bitis\" , kisi = request.user.get_full_name())\n b.save(force_insert=True)\n\n now = timezone.now()\n\n\n #montajkurlenmesi=Valf_montaj.objects.filter(kurlenme_bitis_tarihi__gte=now)\n montajkurlenmesi=Valf_montaj.objects.all()\n # govdekurlenmesi=Valf_govde.objects.filter(kurlenme_bitis__gte=now)\n fm200kurlenmesi=Valf_fm200.objects.filter(fm200_kurlenme_bitis_tarihi__gte=now)\n #acikemirleri= Emir.objects.filter(durum__in=(\"Aktif\",\"Başlanmamış\"))\n acikemirleri=Emir.objects.filter(durum='Aktif').values()\n aktifemirler= Emir.objects.filter(durum=\"Aktif\")\n ####Duplikasyonu önlemek için yaptık ###############\n govde_emir = list(dict.fromkeys(Valf.objects.filter(valf_govde_id__isnull=False).values_list('is_emri_id',flat=True)))\n fm200_emir = list(dict.fromkeys(Valf.objects.filter(fm200_azot_id__isnull=False).values_list('is_emri_id',flat=True)))\n ###################################################\n \n #return render(request,'uretim-kontrol.html',{ 'acikemirleri':acikemirleri, 'grup': grup, 'birim': birim, 'ip': ip,'now':now, 'kurlenmes':montajkurlenmesi,'fm200kurlenmes':fm200kurlenmesi, 'govdekurlenmes': govdekurlenmesi ,'server' : server})\n return render(request,'uretim-kontrol.html',{'grup': grup, 'birim': birim, 'ip': ip,'now':now,'server':server, 'acikemirleri':acikemirleri,'fm200kurlenmes':fm200kurlenmesi,'kurlenmes':montajkurlenmesi,'aktifemirler':aktifemirler,'govde_emir':govde_emir,'fm200_emir':fm200_emir})\n@csrf_exempt\ndef acikisemirleri(request):\n emirler = Emir.objects.filter(durum__in=(\"Aktif\",\"Başlanmamış\"))\n temp = []\n for o in emirler.values():\n temp.append(o['is_emri'])\n veri = list(temp)\n\n\n\n\n\n\n@login_required\n@csrf_exempt\ndef isemri(request):\n mac = request.user_agent.os.family\n grup = request.user.grup\n birim = request.user.birim\n #Emir.objects.all().delete()\n fullname = request.user.first_name + ' ' + request.user.last_name\n emirler = Emir.objects.all()\n form = IsEmri(request.POST)\n if request.method == 'POST':\n if 'tur' in request.POST.dict():\n\n if request.POST.dict()['tur'] == 'oncelik':\n veri = json.loads(request.POST.dict()['veri'])\n print(veri)\n for key in veri:\n em = Emir.objects.get(is_emri=key)\n em.oncelik = veri[key]\n em.save()\n\n o = Bildirim(tur=\"oncelik\")\n o.save()\n return HttpResponse('onceliktamam')\n else:\n if form.is_valid():\n if not Emir.objects.all():\n son_oncelik = 1\n else:\n a = Emir.objects.all().order_by('-oncelik').values()[0]\n s = a['oncelik']\n son_oncelik = s + 1\n emir = form.save()\n emir.refresh_from_db()\n emir.is_emri = form.cleaned_data.get('is_emri')\n emir.urun_kodu = form.cleaned_data.get('urun_kodu')\n emir.baslangic = form.cleaned_data.get('baslangic')\n emir.bitis = form.cleaned_data.get('bitis')\n emir.emri_veren = form.cleaned_data.get('emri_veren')\n emir.tup_govde_turu = form.cleaned_data.get('tup_govde_turu')\n emir.valf_turu = form.cleaned_data.get('valf_turu')\n emir.renk = form.cleaned_data.get('renk')\n emir.emniyet_ventil_turu = form.cleaned_data.get('emniyet_ventil_turu')\n emir.siparis = form.cleaned_data.get('siparis')\n emir.fm200bosagirlikmindeger= form.cleaned_data.get('fm200bosagirlikmindeger')\n emir.fm200bosagirlikmaxdeger = form.cleaned_data.get('fm200bosagirlikmaxdeger')\n emir.fm200dolummiktarimindeger= form.cleaned_data.get('fm200dolummiktarimindeger')\n emir.fm200dolummiktarimaxdeger = form.cleaned_data.get('fm200dolummiktarimaxdeger')\n #if(request.user.grup == \"planlama\"):\n t = Bildirim(tur = \"is emri\",emri_veren_grup = grup, emri_veren = request.user.get_full_name(), is_emri = form.cleaned_data.get('is_emri'))\n t.save(force_insert=True)\n emir.oncelik = son_oncelik\n messages.success(request,'Emir başarıyla eklendi!')\n emir.save()\n form.full_clean()\n return(HttpResponseRedirect(reverse('isemri')))\n\n else:\n messages.warning(request,'İş emri eklenemedi.Lütfen tekrar deneyin!Hata: {}'.format(form.errors))\n else:\n form = IsEmri()\n form.fields[\"emri_veren\"].initial = fullname\n return render(request,'is-emri.html', { 'form' : form , 'emirler': emirler , 'mac' : mac , 'fullname' : fullname ,'grup' : grup , 'birim': birim,'server' : server})\n\n#@login_required\ndef yetkilendirme(request):\n mac = request.user_agent.os.family\n #grup = \"yonetici\"#request.user.grup\n #birim = request.user.birim\n grup = \"Yönetici\"\n birim = \"IT\"\n kullanicilar = User.objects.all()\n if grup == 'Yönetici' and birim == 'IT' or grup == 'Mühendis' and birim == 'IT':\n if request.method == 'POST':\n form = UserRegisterForm(request.POST)\n if form.is_valid(): #and profile_form.is_valid():\n user = form.save()\n user.refresh_from_db()\n user.first_name = form.cleaned_data.get('first_name')\n user.last_name = form.cleaned_data.get('last_name')\n user.grup = form.cleaned_data.get('grup')\n user.save()\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password1')\n messages.success(request,'{} isimli kullanıcı {} isimli gruba eklendi!'.format(username,user.grup))\n return(HttpResponseRedirect(reverse('yetkilendirme')))\n else:\n print(form.errors)\n else:\n form = UserRegisterForm()\n return render(request,'kullanici-yetkilendirme.html',{'form':form,'kullanicilar':kullanicilar , 'mac' : mac , 'grup' : grup, 'birim': birim,'server' : server})\n else:\n return(HttpResponseRedirect(reverse('403')))\n\n\n@login_required\ndef performans(request):\n mac = request.user_agent.os.family\n grup = request.user.grup\n birim = request.user.birim\n kullanicilar = User.objects.all()\n return render(request,'performans.html',{ 'mac' : mac , 'grup':grup, 'birim': birim, 'kullanicilar': kullanicilar,'server' : server})\n\n\n@login_required\n@csrf_exempt\ndef yazdir(request):\n mac = request.user_agent.os.family\n grup = request.user.grup\n birim = request.user.birim\n if True:#grup == 'Yönetici' and birim == 'IT':\n if request.method == 'POST':\n i = Emir.objects.filter(durum=request.POST['durum'])\n temp = []\n for obj in i.values():\n times = obj['emir_zamani'].strftime(\"%d %B %Y (%H:%M:%S)\")\n temp.append(obj['is_emri'] + \" \" + times)\n veri = list(temp)\n return JsonResponse(veri,safe=False)\n return render(request,'yazdir.html',{ 'mac' : mac , 'grup':grup, 'birim': birim,'server' : server})\n else:\n return(HttpResponseRedirect(reverse('403')))\n\n@login_required\ndef ulogout(request):\n logout(request)\n return(HttpResponseRedirect(reverse('ulogin')))\n\n@csrf_exempt\ndef ulogin(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(username=username,password=password)\n if user:\n if user.is_active:\n login(request,user)\n print('{} kullanıcısı tarafından başarılı giriş'.format(username))\n return redirect('arama')\n else:\n messages.warning(request,'Kullanıcı adınızı yada parolanızı yanlış girdiniz.')\n else:\n print(\"Birisi login olmayı denedi ve başarısız oldu!\")\n messages.warning(request,'Kullanıcı adınızı yada parolanızı yanlış girdiniz.')\n return(HttpResponseRedirect(reverse('ulogin')))\n else:\n return render(request,'login.html',{})\n\n\ndef _403(request):\n return render(request,'403.html',{})\n\ndef handler404(request,exception):\n return render(request, '403.html', status=404)\n\n@csrf_exempt\ndef kullanicijson(request):\n username = request.POST.get('username')\n b = User.objects.filter(username=username).values('first_name','last_name','username','grup')\n veri = list(b)\n return JsonResponse(veri,safe=False)\n@csrf_exempt\ndef kullanicisil(request):\n username = request.POST.get('username')\n print(username)\n sildi = User.objects.filter(username=username).delete()\n if sildi:\n return HttpResponse('silindi')\n else:\n return HttpResponse('silinemedi')\n@csrf_exempt\ndef kullaniciduzelt(request):\n veri = request.POST.get('bilgi')\n veri = json.loads(veri)\n a = User.objects.get(username=veri[\"eskisi\"])\n a.username = veri[\"username\"]\n a.first_name = veri[\"first_name\"]\n a.last_name = veri[\"last_name\"]\n a.grup = veri[\"grup\"]\n a.birim = veri[\"birim\"]\n a.save()\n return HttpResponse('duzeltildi')\n\n@csrf_exempt\ndef passwordreset(request):\n ps = request.POST.get('ps1')\n if request.POST.get('username'):\n u = User.objects.get(username=request.POST.get('username'))\n u.set_password(ps)\n u.save()\n return HttpResponse('parola değiştirildi')\n return HttpResponse('bir hata var')\n\ndef get_first_and_lastname(username):\n try:\n first_name=User.objects.filter(username=username).first().first_name\n last_name=User.objects.filter(username=username).first().last_name\n return \"{} {}\".format(first_name,last_name)\n except:\n return 'isim soyisim'\n\n\n@csrf_exempt\ndef pdf(request):\n if request.GET.get('qr'):\n qr = request.GET.get('qr')\n print(qr.split(\" \")[0])\n i = qr.split(\" \")[0]\n # elif request.GET.get('valfqr'):\n # qr = request.GET.get('valfqr')\n # v = Valf.objects.get(vsn=qr)\n # i = v.is_emri\n print(\"---------------------\")\n valf_no = request.GET.get('vsn')\n Valf_montaj_Data=Valf_montaj.objects.filter(id=Valf.objects.filter(id=valf_no).first().valf_montaj_id).first()\n Valf_fm200_Data=Valf_fm200.objects.filter(id=Valf.objects.filter(id=valf_no).first().fm200_azot_id).first()\n Valf_havuz_Data=Valf_havuz.objects.filter(id=Valf.objects.filter(id=valf_no).first().havuz_id).first()\n Valf_final_Data=Valf_final_montaj.objects.filter(id=Valf.objects.filter(id=valf_no).first().valf_final_montaj_id).first()\n Valf_test_Data=Valf_test.objects.filter(id=Valf.objects.filter(id=valf_no).first().valf_test_id).first()\n Valf_govde_Data=Valf_govde.objects.filter(id=Valf.objects.filter(id=valf_no).first().valf_govde_id).first()\n Emir_Data=Emir.objects.filter(is_emri=i).first()\n valf_final = Valf.objects.filter(id=valf_no).values_list('valf_final_montaj_id',flat=True).first()\n urun_seri_no = Valf_final_montaj.objects.filter(id=valf_final).values_list('urun_seri_no',flat=True).first()\n print(\"---------------------\") \n try:\n valfmontajPersonel = get_first_and_lastname(User.objects.filter(id=Valf_montaj_Data.montaj_personel_id).first().username)\n except:\n valfmontajPersonel = ''\n\n try:\n valfmontajTarih = Valf_montaj_Data.montaj_tarihi\n except:\n valfmontajTarih = ''\n try:\n altnipelno = Valf_montaj_Data.alt_nipel_no\n except:\n altnipelno = ''\n try:\n ustnipelno = Valf_montaj_Data.ust_nipel_no\n except:\n ustnipelno = ''\n try:\n switchno = Valf_montaj_Data.basincanahtari_no\n except:\n switchno = ''\n try:\n manometreno = Valf_montaj_Data.manometre_no\n except:\n manometreno = ''\n\n \n try:\n valftestPersonel = get_first_and_lastname(User.objects.filter(id=Valf_test_Data.test_personel_id).first().username)\n except:\n valftestPersonel = ''\n try:\n valftestTarih = Valf_test_Data.test_tarihi\n except:\n valftestTarih = ''\n try:\n valfTestUygun = 'Uygun' if Valf_test_Data.uygun == True else 'Uygun Değil'\n except:\n valfTestUygun = Valf_test_Data.uygun\n\n \n try:\n valfgovdePersonel = get_first_and_lastname(User.objects.filter(id=Valf_govde_Data.govde_personel_id).first().username)\n except:\n valfgovdePersonel = ''\n try:\n valfgovdeTarih = Valf_govde_Data.govde_tarihi\n except:\n valfgovdeTarih = ''\n try:\n valfGovdeUygun = 'Uygun' if Valf_govde_Data.uygunluk == True else 'Uygun Değil'\n except:\n valfGovdeUygun = ''\n\n try:\n fm200Personel = get_first_and_lastname(User.objects.filter(id=Valf_fm200_Data.fm200_personel_id).first().username)\n except:\n fm200Personel = ''\n try:\n fm200Tarih = Valf_fm200_Data.kayit_tarihi\n except:\n fm200Tarih = ''\n try:\n bosAgirlik = Valf_fm200_Data.bos_agirlik\n except:\n bosAgirlik = ''\n try:\n doluAgirlik = Valf_fm200_Data.dolu_agirlik\n except:\n doluAgirlik = ''\n # try: Duruma Göre sonradan eklenebilir diye silmiyoruz!\n # azot = fm200[0]['azot']\n # except:\n # azot = ''\n try:\n bar = Valf_fm200_Data.bar\n except:\n bar = ''\n \n try:\n havuztestPersonel = get_first_and_lastname(User.objects.filter(id=Valf_havuz_Data.havuz_personel_id).first().username)\n except:\n havuztestPersonel = ''\n try:\n havuztestTarih = Valf_havuz_Data.kayit_tarihi\n except:\n havuztestTarih = ''\n try:\n havuzTestUygun = 'Uygun' if Valf_havuz_Data.uygunluk == True else 'Uygun Değil' \n except:\n havuzTestUygun = ''\n\n \n try:\n finalmontajPersonel = get_first_and_lastname(User.objects.filter(id=Valf_final_Data.personel_id).first().username)\n except:\n finalmontajPersonel = ''\n try:\n finalmontajTarih = Valf_final_Data.kayit_tarihi\n except:\n finalmontajTarih = ''\n\n try:\n membranTipi = Emir_Data.valf_turu\n except:\n membranTipi = ''\n try:\n ventilTipi = Emir_Data.emniyet_ventil_turu\n except:\n ventilTipi = ''\n try:\n tugovdetipi= Emir_Data.tup_govde_turu\n except:\n tugovdetipi= ''\n try:\n siboplotno = Valf_montaj_Data.sibop\n except:\n siboplotno = ''\n\n print(valftestPersonel,Emir_Data.emniyet_ventil_turu)\n veri = \"veri\"\n html_string = render_to_string('external/pdf-template.html', {'veri': veri, \"qr\": urun_seri_no,\n 'valfmontajPersonel': valfmontajPersonel, 'valfmontajTarih':valfmontajTarih,'valfgovdePersonel':valfgovdePersonel,\n 'valftestPersonel': valftestPersonel, 'valftestTarih': valftestTarih,'valfTestUygun':valfTestUygun,'havuzTestUygun':havuzTestUygun,\n 'valfgovdePersonel': valftestPersonel, 'valfgovdeTarih': valfgovdeTarih,'valfGovdeUygun':valfGovdeUygun,'valfMontajUygun':\"Uygun*\",'fm200Uygun':\"Uygun*\",'finalMontajUygun':\"Uygun*\",\n 'fm200Personel': fm200Personel, 'fm200Tarih': fm200Tarih,\n 'bosAgirlik' : bosAgirlik, 'doluAgirlik' : doluAgirlik,\n 'havuztestPersonel': havuztestPersonel, 'havuztestTarih': havuztestTarih,\n 'finalmontajPersonel': finalmontajPersonel, 'finalmontajTarih': finalmontajTarih,\n 'altnipelno': altnipelno, 'ustnipelno': ustnipelno, 'switchno': switchno,'manometreno': manometreno,\n 'is_emri': i,'membranTipi': membranTipi,'ventilTipi': ventilTipi,'urunserino':urun_seri_no,'bar':bar,'tugovdetipi':tugovdetipi,'siboplotno':siboplotno\n }, request=request)\n\n html = HTML(string=html_string, base_url=request.build_absolute_uri())\n html.write_pdf(target='/tmp/' + qr + '.pdf');\n\n fs = FileSystemStorage('/tmp/')\n with fs.open(qr + '.pdf') as pdf:\n response = HttpResponse(pdf, content_type='application/pdf')\n response['Content-Disposition'] = 'inline; filename=\"pdf.pdf\"'\n return response\n\n return response\n\n#Test sonuçları\n@csrf_exempt\ndef dashboard(request):\n bugun = timezone.now()\n print(request.POST.get('gun_sayisi'))\n gun = int(request.POST.get('gun_sayisi'))\n kac_gun = bugun - timezone.timedelta(days=gun)\n veris = Test.objects.filter(test_tarihi__range=[kac_gun,bugun])\n temp = []\n for o in veris.values():\n temp.append(o)\n veri = list(temp)\n print(\"dashboard\", veri)\n return JsonResponse(veri,safe=False)\n\n@csrf_exempt\ndef uretimdurum(request):\n i = request.POST.get('is_emri')\n print(i)\n veri = list()\n print(Valf.objects.filter(is_emri_id=i).values_list('valf_montaj_id',flat=True).count(),Valf.objects.filter(is_emri_id=i).filter(valf_test_id__isnull=False).values_list('valf_test_id',flat=True).count())\n try:\n veri.append(Valf.objects.filter(is_emri_id=i).filter(valf_montaj_id__isnull=False).values_list('valf_montaj_id',flat=True).count())\n veri.append(Valf.objects.filter(is_emri_id=i).filter(valf_test_id__isnull=False).values_list('valf_test_id',flat=True).count())\n veri.append(Valf.objects.filter(is_emri_id=i).filter(valf_govde_id__isnull=False).values_list('valf_govde_id',flat=True).count())\n veri.append(Valf.objects.filter(is_emri_id=i).filter(fm200_azot_id__isnull=False).values_list('fm200_azot_id',flat=True).count())\n veri.append(Valf.objects.filter(is_emri_id=i).filter(havuz_id__isnull=False).values_list('havuz_id',flat=True).count())\n veri.append(Valf.objects.filter(is_emri_id=i).filter(valf_final_montaj_id__isnull=False).values_list('valf_final_montaj_id',flat=True).count())\n veri.append(Emir.objects.filter(id=i).values()[0]['tup_sayisi'])\n except Exception as err:\n print(err)\n veri = [0,0,0,0,0,0,10]\n print(veri)\n return JsonResponse(veri,safe=False)\n\n@csrf_exempt\ndef personeldurum(request):\n p = request.POST.get('personel')\n g = request.POST.get('gun_sayisi')\n print(p,g)\n bugun = timezone.now()\n gun = int(request.POST.get('gun_sayisi'))\n kac_gun = bugun - timezone.timedelta(days=gun)\n veris = Test.objects.filter(test_tarihi__range=[kac_gun,bugun])\n veri = list()\n try:\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun,bugun]).filter(tur=\"manometre\").filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun,bugun]).filter(tur=\"basinc\").filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun,bugun]).filter(tur=\"altnipel\").filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun,bugun]).filter(tur=\"ustnipel\").filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun,bugun]).filter(tur=\"bakirmembran\").filter(testi_yapan=p).count())\n veri.append(Test.objects.filter(test_tarihi__range=[kac_gun,bugun]).filter(tur=\"emniyet\").filter(testi_yapan=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun,bugun]).filter(tur=\"kurlenme\").filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun,bugun]).filter(tur=\"valftest\").filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun,bugun]).filter(tur=\"valfgovde\").filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun,bugun]).filter(tur=\"fm200\").filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun,bugun]).filter(tur=\"havuztest\").filter(personel=p).count())\n veri.append(Uretim.objects.filter(date__range=[kac_gun,bugun]).filter(tur=\"finalmontaj\").filter(personel=p).count())\n except:\n veri = [0,0,0,0,0,0,0,0,0,0,0,10]\n print(veri)\n return JsonResponse(veri,safe=False)\n\n@csrf_exempt\ndef tupTuru(request):\n if request.method == 'POST':\n try:\n u = Emir.objects.filter(is_emri=request.POST.dict()['is_emri']).first()\n bos_agirlik_miktari= u.bos_agirlik_miktari\n fm200_miktari= u.fm200_miktari\n renk= u.renk\n response= bos_agirlik_miktari + ';'+ fm200_miktari +';'+renk\n return HttpResponse(str(response))\n except e :\n print(e)\n return str('tur')\n\n\n@csrf_exempt\ndef getEmirNo(request):\n if request.method == 'POST':\n vsn=request.POST.dict()['veri']\n print('getEmirNo',vsn)\n\n try:\n is_emri = Emir.objects.filter(id=vsn).values_list('is_emri',flat=True).first()\n return HttpResponse(str(is_emri))\n except:\n return HttpResponse(str('NO'))\n return str('is_emri')\n\n\n\n@csrf_exempt\ndef kontrolEt(request):\n if request.method == 'POST':\n tur = request.POST['tur']\n veri = request.POST['veri']\n isemri = request.POST['isemri']\n t = Test.objects.filter(tur=tur)\n r = \"NO\"\n if(tur == 'altnipel'):\n t = Test.objects.filter(tur=tur)\n try:\n if(int(veri) in t.values_list('lot_no',flat=True)):\n r = ('OK')\n else:\n r = ('NO')\n except:\n r = 'NO'\n if(tur == 'ustnipel'):\n t = Test.objects.filter(tur=tur)\n try:\n if(veri in t.values_list('baslangic_seri_no',flat=True)):\n r = ('OK')\n else:\n r = ('NO')\n except:\n r = \"NO\"\n if(tur == 'manometre'):\n t = Test.objects.filter(tur=tur)\n try:\n if(veri in t.values_list('seri_no',flat=True)):\n r = ('OK')\n else:\n r = ('NO')\n except Exception as e:\n print(e)\n r = \"NO\"\n if(tur == 'basinc'):\n t = Test.objects.filter(tur=tur)\n try:\n if(veri in t.values_list('seri_no',flat=True)):\n r = ('OK')\n else:\n r = ('NO')\n except:\n r = \"NO\"\n if(tur == 'bakirmembran'):\n t = Test.objects.filter(tur=tur)\n try:\n if(int(veri) in t.values_list('lot_no',flat=True)):\n r = ('OK')\n else:\n r = ('NO')\n except:\n r = \"NO\"\n if(tur == 'emniyet'):\n t = Test.objects.filter(tur=tur)\n try:\n if(veri in t.values_list('lot_no',flat=True)):\n r = ('OK')\n else:\n r = ('NO')\n except:\n r = \"NO\"\n if(tur == 'valf_govde'):\n try:\n #Valf.objects.filter(valf_montaj_id=veri).values_list('valf_test_id',flat = True).first()\n #a = isinstance(Valf.objects.filter(valf_montaj_id=veri).values_list('valf_test_id',flat = True).first(),int)\n valf_id=Valf.objects.filter(valf_montaj_id=veri).values_list('valf_test_id',flat = True).first() \n if isinstance(valf_id,int):\n Valf_test.objects.filter(id=valf_id).values_list('uygun',flat = True).first()\n if (Valf_test.objects.filter(id=valf_id).values_list('uygun',flat = True).first()):\n r = ('OK')\n else:\n r = ('NO')\n else:\n r = ('NO')\n except:\n r = \"NO\"\n if(tur == 'sibop'):\n print(tur,veri,t.values_list('lot_no',flat=True))\n t = Test.objects.filter(tur=tur)\n try:\n if(int(veri) in t.values_list('lot_no',flat=True)):\n r = ('OK')\n else:\n r = ('NO')\n except:\n r = \"NO\"\n return HttpResponse(r)\n\n@csrf_exempt\ndef kurlenmeKontrol(request):\n if request.method == 'POST':\n r = \"NO\"\n tur = request.POST['tur']\n vsn = request.POST['veri']\n print('kurlenmeKontrol',tur,vsn)\n if(tur == 'montaj_kurlenme'):\n try:\n u = Uretim.objects.filter(vsn=vsn)\n print(u,\"---------------\")\n if(u.values()[0]['montaj_kurlenme_zamani']<timezone.now()):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n elif (tur=='govde_kurlenme'):\n\n try:\n u = Uretim.objects.filter(vsn=vsn)\n print('govde_kurlenme_zamani',u.values()[0]['govde_kurlenme_zamani'])\n print('now',timezone.now())\n if(u.values()[0]['govde_kurlenme_zamani']<timezone.now()):\n r = 'OK'\n else:\n r = 'NO'\n except:\n r = 'NO'\n elif (tur=='valf_test'):\n print(\"içerdeyim-----> Valf Test\")\n try:\n print(vsn,\"----------------------------\")\n valf_montaj_id = Valf.objects.filter(id=vsn).first().valf_montaj_id\n print(valf_montaj_id)\n tarih = Valf_montaj.objects.filter(id=valf_montaj_id).first().kurlenme_bitis_tarihi\n print(tarih)\n print(type(timezone.now()),timezone.now())\n print(type(tarih),tarih)\n if(tarih<timezone.now()):\n print(\"büyüktür\")\n r='OK'\n else:\n print(\"küçük\")\n r='NO'\n except Exception as err:\n \n print('r',err)\n r='NO'\n elif (tur=='pdfkontrol'):\n print(vsn)\n try:\n if Valf.objects.filter(valf_montaj_id=vsn).count():\n r='OK'\n else:\n r='NO'\n except Exception as err:\n \n r='NO'\n print(err)\n\n return HttpResponse(r)\n\n@csrf_exempt\ndef newVSN(request):\n if request.method == 'POST':\n\n vsn = \"\"\n if not Uretim.objects.all():\n vsn = 1\n else:\n a = Uretim.objects.all().order_by('-vsn').values()[0]\n s = a['vsn']\n print('sssss',s)\n\n vsn = s + 1\n \n print(vsn)\n r = (str(vsn))\n return HttpResponse(r)\n #return HttpResponse(str(vsn))\n #return JsonResponse({'vsn':vsn})\n\n\n@csrf_exempt\ndef hardreset(request):\n print('Hard')\n",
"step-ids": [
10,
25,
26,
33,
35
]
}
|
[
10,
25,
26,
33,
35
] |
/Users/linhly/anaconda/lib/python3.6/reprlib.py
|
normal
|
{
"blob_id": "127ca34d3fae3af4506258388a28c539ccc7c33b",
"index": 4120,
"step-1": "/Users/linhly/anaconda/lib/python3.6/reprlib.py",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def plot_grad_flow(named_parameters):
"""Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow"""
ave_grads = []
max_grads = []
layers = []
for n, p in named_parameters:
if p.requires_grad and 'bias' not in n:
layers.append(n)
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color='c')
plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color='b')
plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color='k')
plt.xticks(range(0, len(ave_grads), 1), layers, rotation='vertical')
plt.xlim(left=0, right=len(ave_grads))
plt.ylim(bottom=-0.001, top=0.02)
plt.xlabel('Layers')
plt.ylabel('average gradient')
plt.title('Gradient flow')
plt.grid(True)
plt.legend([Line2D([0], [0], color='c', lw=4), Line2D([0], [0], color=
'b', lw=4), Line2D([0], [0], color='k', lw=4)], ['max-gradient',
'mean-gradient', 'zero-gradient'])
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def plot_grad_flow(named_parameters):
"""Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow"""
ave_grads = []
max_grads = []
layers = []
for n, p in named_parameters:
if p.requires_grad and 'bias' not in n:
layers.append(n)
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color='c')
plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color='b')
plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color='k')
plt.xticks(range(0, len(ave_grads), 1), layers, rotation='vertical')
plt.xlim(left=0, right=len(ave_grads))
plt.ylim(bottom=-0.001, top=0.02)
plt.xlabel('Layers')
plt.ylabel('average gradient')
plt.title('Gradient flow')
plt.grid(True)
plt.legend([Line2D([0], [0], color='c', lw=4), Line2D([0], [0], color=
'b', lw=4), Line2D([0], [0], color='k', lw=4)], ['max-gradient',
'mean-gradient', 'zero-gradient'])
plt.show()
<|reserved_special_token_0|>
loss.backward()
plot_grad_flow(model.named_parameters())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
dtype = torch.float
device = torch.device('cpu')
N, D_in, H, D_out = 64, 1000, 100, 10
x = torch.randn(N, D_in, device=device, dtype=dtype)
y = torch.randn(N, D_out, device=device, dtype=dtype)
model = torch.nn.Sequential(torch.nn.Linear(D_in, H), torch.nn.ReLU(),
torch.nn.Linear(H, D_out))
def plot_grad_flow(named_parameters):
"""Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow"""
ave_grads = []
max_grads = []
layers = []
for n, p in named_parameters:
if p.requires_grad and 'bias' not in n:
layers.append(n)
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color='c')
plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color='b')
plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color='k')
plt.xticks(range(0, len(ave_grads), 1), layers, rotation='vertical')
plt.xlim(left=0, right=len(ave_grads))
plt.ylim(bottom=-0.001, top=0.02)
plt.xlabel('Layers')
plt.ylabel('average gradient')
plt.title('Gradient flow')
plt.grid(True)
plt.legend([Line2D([0], [0], color='c', lw=4), Line2D([0], [0], color=
'b', lw=4), Line2D([0], [0], color='k', lw=4)], ['max-gradient',
'mean-gradient', 'zero-gradient'])
plt.show()
learning_rate = 1e-06
y_pred = model(x)
loss = (y_pred - y).pow(2).sum()
loss.backward()
plot_grad_flow(model.named_parameters())
<|reserved_special_token_1|>
import torch
import torch.nn.functional as f
import time
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
dtype = torch.float
device = torch.device('cpu')
N, D_in, H, D_out = 64, 1000, 100, 10
x = torch.randn(N, D_in, device=device, dtype=dtype)
y = torch.randn(N, D_out, device=device, dtype=dtype)
model = torch.nn.Sequential(torch.nn.Linear(D_in, H), torch.nn.ReLU(),
torch.nn.Linear(H, D_out))
def plot_grad_flow(named_parameters):
"""Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow"""
ave_grads = []
max_grads = []
layers = []
for n, p in named_parameters:
if p.requires_grad and 'bias' not in n:
layers.append(n)
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color='c')
plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color='b')
plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color='k')
plt.xticks(range(0, len(ave_grads), 1), layers, rotation='vertical')
plt.xlim(left=0, right=len(ave_grads))
plt.ylim(bottom=-0.001, top=0.02)
plt.xlabel('Layers')
plt.ylabel('average gradient')
plt.title('Gradient flow')
plt.grid(True)
plt.legend([Line2D([0], [0], color='c', lw=4), Line2D([0], [0], color=
'b', lw=4), Line2D([0], [0], color='k', lw=4)], ['max-gradient',
'mean-gradient', 'zero-gradient'])
plt.show()
learning_rate = 1e-06
y_pred = model(x)
loss = (y_pred - y).pow(2).sum()
loss.backward()
plot_grad_flow(model.named_parameters())
<|reserved_special_token_1|>
import torch
import torch.nn.functional as f
import time
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
dtype = torch.float
device = torch.device("cpu")
# device = torch.device("cuda:0") # Uncomment this to run on GPU
N, D_in, H, D_out = 64, 1000, 100, 10
x = torch.randn(N, D_in, device=device, dtype=dtype)
y = torch.randn(N, D_out, device=device, dtype=dtype)
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
def plot_grad_flow(named_parameters):
'''Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow'''
ave_grads = []
max_grads = []
layers = []
for n, p in named_parameters:
if (p.requires_grad) and ("bias" not in n):
layers.append(n)
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="c")
plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color="b")
plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color="k")
plt.xticks(range(0, len(ave_grads), 1), layers, rotation="vertical")
plt.xlim(left=0, right=len(ave_grads))
plt.ylim(bottom=-0.001, top=0.02) # zoom in on the lower gradient regions
plt.xlabel("Layers")
plt.ylabel("average gradient")
plt.title("Gradient flow")
plt.grid(True)
plt.legend([Line2D([0], [0], color="c", lw=4),
Line2D([0], [0], color="b", lw=4),
Line2D([0], [0], color="k", lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient'])
plt.show()
learning_rate = 1e-6
y_pred = model(x)
loss = (y_pred - y).pow(2).sum()
loss.backward()
plot_grad_flow(model.named_parameters())
|
flexible
|
{
"blob_id": "0fb424dafaac184882ea56f36265e0b19b5a4c50",
"index": 9758,
"step-1": "<mask token>\n\n\ndef plot_grad_flow(named_parameters):\n \"\"\"Plots the gradients flowing through different layers in the net during training.\n Can be used for checking for possible gradient vanishing / exploding problems.\n\n Usage: Plug this function in Trainer class after loss.backwards() as\n \"plot_grad_flow(self.model.named_parameters())\" to visualize the gradient flow\"\"\"\n ave_grads = []\n max_grads = []\n layers = []\n for n, p in named_parameters:\n if p.requires_grad and 'bias' not in n:\n layers.append(n)\n ave_grads.append(p.grad.abs().mean())\n max_grads.append(p.grad.abs().max())\n plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color='c')\n plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color='b')\n plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color='k')\n plt.xticks(range(0, len(ave_grads), 1), layers, rotation='vertical')\n plt.xlim(left=0, right=len(ave_grads))\n plt.ylim(bottom=-0.001, top=0.02)\n plt.xlabel('Layers')\n plt.ylabel('average gradient')\n plt.title('Gradient flow')\n plt.grid(True)\n plt.legend([Line2D([0], [0], color='c', lw=4), Line2D([0], [0], color=\n 'b', lw=4), Line2D([0], [0], color='k', lw=4)], ['max-gradient',\n 'mean-gradient', 'zero-gradient'])\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef plot_grad_flow(named_parameters):\n \"\"\"Plots the gradients flowing through different layers in the net during training.\n Can be used for checking for possible gradient vanishing / exploding problems.\n\n Usage: Plug this function in Trainer class after loss.backwards() as\n \"plot_grad_flow(self.model.named_parameters())\" to visualize the gradient flow\"\"\"\n ave_grads = []\n max_grads = []\n layers = []\n for n, p in named_parameters:\n if p.requires_grad and 'bias' not in n:\n layers.append(n)\n ave_grads.append(p.grad.abs().mean())\n max_grads.append(p.grad.abs().max())\n plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color='c')\n plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color='b')\n plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color='k')\n plt.xticks(range(0, len(ave_grads), 1), layers, rotation='vertical')\n plt.xlim(left=0, right=len(ave_grads))\n plt.ylim(bottom=-0.001, top=0.02)\n plt.xlabel('Layers')\n plt.ylabel('average gradient')\n plt.title('Gradient flow')\n plt.grid(True)\n plt.legend([Line2D([0], [0], color='c', lw=4), Line2D([0], [0], color=\n 'b', lw=4), Line2D([0], [0], color='k', lw=4)], ['max-gradient',\n 'mean-gradient', 'zero-gradient'])\n plt.show()\n\n\n<mask token>\nloss.backward()\nplot_grad_flow(model.named_parameters())\n",
"step-3": "<mask token>\ndtype = torch.float\ndevice = torch.device('cpu')\nN, D_in, H, D_out = 64, 1000, 100, 10\nx = torch.randn(N, D_in, device=device, dtype=dtype)\ny = torch.randn(N, D_out, device=device, dtype=dtype)\nmodel = torch.nn.Sequential(torch.nn.Linear(D_in, H), torch.nn.ReLU(),\n torch.nn.Linear(H, D_out))\n\n\ndef plot_grad_flow(named_parameters):\n \"\"\"Plots the gradients flowing through different layers in the net during training.\n Can be used for checking for possible gradient vanishing / exploding problems.\n\n Usage: Plug this function in Trainer class after loss.backwards() as\n \"plot_grad_flow(self.model.named_parameters())\" to visualize the gradient flow\"\"\"\n ave_grads = []\n max_grads = []\n layers = []\n for n, p in named_parameters:\n if p.requires_grad and 'bias' not in n:\n layers.append(n)\n ave_grads.append(p.grad.abs().mean())\n max_grads.append(p.grad.abs().max())\n plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color='c')\n plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color='b')\n plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color='k')\n plt.xticks(range(0, len(ave_grads), 1), layers, rotation='vertical')\n plt.xlim(left=0, right=len(ave_grads))\n plt.ylim(bottom=-0.001, top=0.02)\n plt.xlabel('Layers')\n plt.ylabel('average gradient')\n plt.title('Gradient flow')\n plt.grid(True)\n plt.legend([Line2D([0], [0], color='c', lw=4), Line2D([0], [0], color=\n 'b', lw=4), Line2D([0], [0], color='k', lw=4)], ['max-gradient',\n 'mean-gradient', 'zero-gradient'])\n plt.show()\n\n\nlearning_rate = 1e-06\ny_pred = model(x)\nloss = (y_pred - y).pow(2).sum()\nloss.backward()\nplot_grad_flow(model.named_parameters())\n",
"step-4": "import torch\nimport torch.nn.functional as f\nimport time\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nimport numpy as np\ndtype = torch.float\ndevice = torch.device('cpu')\nN, D_in, H, D_out = 64, 1000, 100, 10\nx = torch.randn(N, D_in, device=device, dtype=dtype)\ny = torch.randn(N, D_out, device=device, dtype=dtype)\nmodel = torch.nn.Sequential(torch.nn.Linear(D_in, H), torch.nn.ReLU(),\n torch.nn.Linear(H, D_out))\n\n\ndef plot_grad_flow(named_parameters):\n \"\"\"Plots the gradients flowing through different layers in the net during training.\n Can be used for checking for possible gradient vanishing / exploding problems.\n\n Usage: Plug this function in Trainer class after loss.backwards() as\n \"plot_grad_flow(self.model.named_parameters())\" to visualize the gradient flow\"\"\"\n ave_grads = []\n max_grads = []\n layers = []\n for n, p in named_parameters:\n if p.requires_grad and 'bias' not in n:\n layers.append(n)\n ave_grads.append(p.grad.abs().mean())\n max_grads.append(p.grad.abs().max())\n plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color='c')\n plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color='b')\n plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color='k')\n plt.xticks(range(0, len(ave_grads), 1), layers, rotation='vertical')\n plt.xlim(left=0, right=len(ave_grads))\n plt.ylim(bottom=-0.001, top=0.02)\n plt.xlabel('Layers')\n plt.ylabel('average gradient')\n plt.title('Gradient flow')\n plt.grid(True)\n plt.legend([Line2D([0], [0], color='c', lw=4), Line2D([0], [0], color=\n 'b', lw=4), Line2D([0], [0], color='k', lw=4)], ['max-gradient',\n 'mean-gradient', 'zero-gradient'])\n plt.show()\n\n\nlearning_rate = 1e-06\ny_pred = model(x)\nloss = (y_pred - y).pow(2).sum()\nloss.backward()\nplot_grad_flow(model.named_parameters())\n",
"step-5": "\nimport torch\nimport torch.nn.functional as f\nimport time\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nimport numpy as np\n\ndtype = torch.float\ndevice = torch.device(\"cpu\")\n# device = torch.device(\"cuda:0\") # Uncomment this to run on GPU\n\nN, D_in, H, D_out = 64, 1000, 100, 10\n\nx = torch.randn(N, D_in, device=device, dtype=dtype)\ny = torch.randn(N, D_out, device=device, dtype=dtype)\n\nmodel = torch.nn.Sequential(\n torch.nn.Linear(D_in, H),\n torch.nn.ReLU(),\n torch.nn.Linear(H, D_out),\n)\n\n\ndef plot_grad_flow(named_parameters):\n '''Plots the gradients flowing through different layers in the net during training.\n Can be used for checking for possible gradient vanishing / exploding problems.\n\n Usage: Plug this function in Trainer class after loss.backwards() as\n \"plot_grad_flow(self.model.named_parameters())\" to visualize the gradient flow'''\n ave_grads = []\n max_grads = []\n layers = []\n for n, p in named_parameters:\n if (p.requires_grad) and (\"bias\" not in n):\n layers.append(n)\n ave_grads.append(p.grad.abs().mean())\n max_grads.append(p.grad.abs().max())\n plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color=\"c\")\n plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color=\"b\")\n plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color=\"k\")\n plt.xticks(range(0, len(ave_grads), 1), layers, rotation=\"vertical\")\n plt.xlim(left=0, right=len(ave_grads))\n plt.ylim(bottom=-0.001, top=0.02) # zoom in on the lower gradient regions\n plt.xlabel(\"Layers\")\n plt.ylabel(\"average gradient\")\n plt.title(\"Gradient flow\")\n plt.grid(True)\n plt.legend([Line2D([0], [0], color=\"c\", lw=4),\n Line2D([0], [0], color=\"b\", lw=4),\n Line2D([0], [0], color=\"k\", lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient'])\n\n plt.show()\n\nlearning_rate = 1e-6\ny_pred = model(x)\nloss = (y_pred - y).pow(2).sum()\nloss.backward()\nplot_grad_flow(model.named_parameters())\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.