code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ax.set_xlim([-0.007, 1.0])
ax.set_ylim([0.0, 1.01])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.set_title('Receiver operating characteristic (AUC: %.3f)' % auc(fpr, tpr))
ax.plot([0, 1], [0, 1], color='red', linestyle='--', label='Random model')
ax.plot(fpr, tpr, color='yellow', label='IArt')
ax.plot([0, 0, 1], [0, 1, 1], color='green', linestyle='--', label=
'Perfect model')
ax.legend(loc='lower right')
<|reserved_special_token_0|>
ax.axis('off')
get_figure('confusion_matrix').colorbar(im)
save_fig()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
y = np.array([[0.8869, 1.0], [1.0 - 0.578, 0.0], [0.7959, 1.0], [0.8618,
1.0], [1.0 - 0.2278, 0.0], [0.6607, 1.0], [0.7006, 1.0], [1.0 - 0.4859,
0.0], [0.6935, 1.0], [0.9048, 1.0], [0.6681, 1.0], [0.7585, 1.0], [1.0 -
0.5063, 0.0], [1.0 - 0.4516, 0.0], [1.0 - 0.5158, 0.0], [1.0 - 0.5873,
0.0], [1.0 - 0.7682, 0.0], [0.862, 1.0], [1 - 0.7337, 0.0], [0.9412,
1.0], [1.0 - 0.5819, 0.0], [0.2738, 1.0], [1.0 - 0.5136, 0.0], [0.8819,
1.0], [1.0 - 0.4387, 0.0], [1.0 - 0.6257, 0.0], [0.7857, 1.0], [1.0 -
0.3722, 0.0], [1.0 - 0.8049, 0.0], [0.7864, 1.0], [1.0 - 0.2372, 0.0],
[0.7934, 1.0], [0.9583, 1.0], [0.9739, 1.0], [1.0 - 0.3556, 0.0], [1.0 -
0.2551, 0.0], [1.0 - 0.4532, 0.0], [0.4605, 1.0], [0.7572, 1.0], [
0.9496, 1.0], [0.8268, 1.0], [1.0 - 0.4876, 0.0], [0.8523, 1.0], [1.0 -
0.2629, 0.0], [1.0 - 0.9021, 0.0], [0.6977, 1.0], [0.9142, 1.0], [1.0 -
0.8175, 0.0], [1.0 - 0.4865, 0.0], [0.911, 1.0], [1.0 - 0.2159, 0.0], [
1.0 - 0.6943, 0.0], [1.0 - 0.2753, 0.0], [0.859, 1.0], [0.8273, 1.0], [
1.0 - 0.5169, 0.0], [1.0 - 0.7412, 0.0]])
fpr, tpr, thresholds = roc_curve(y[:, 1], y[:, 0], pos_label=1)
ax = plt('roc_curve').gca()
ax.set_xlim([-0.007, 1.0])
ax.set_ylim([0.0, 1.01])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.set_title('Receiver operating characteristic (AUC: %.3f)' % auc(fpr, tpr))
ax.plot([0, 1], [0, 1], color='red', linestyle='--', label='Random model')
ax.plot(fpr, tpr, color='yellow', label='IArt')
ax.plot([0, 0, 1], [0, 1, 1], color='green', linestyle='--', label=
'Perfect model')
ax.legend(loc='lower right')
ax = plt('confusion_matrix').gca()
y_threshold = (y > 0.7).astype(int)
matrix = confusion_matrix(y[:, 1], y_threshold[:, 0])
matrix = matrix / matrix.astype(np.float).sum(axis=1)
im = ax.imshow(matrix, cmap=cm.Greys_r, extent=(-3, 3, 3, -3))
ax.axis('off')
get_figure('confusion_matrix').colorbar(im)
save_fig()
<|reserved_special_token_1|>
from matplotlib import cm
from datascience.visu.util import plt, save_fig, get_figure
from sklearn.metrics import roc_curve, auc, confusion_matrix
import numpy as np
y = np.array([[0.8869, 1.0], [1.0 - 0.578, 0.0], [0.7959, 1.0], [0.8618,
1.0], [1.0 - 0.2278, 0.0], [0.6607, 1.0], [0.7006, 1.0], [1.0 - 0.4859,
0.0], [0.6935, 1.0], [0.9048, 1.0], [0.6681, 1.0], [0.7585, 1.0], [1.0 -
0.5063, 0.0], [1.0 - 0.4516, 0.0], [1.0 - 0.5158, 0.0], [1.0 - 0.5873,
0.0], [1.0 - 0.7682, 0.0], [0.862, 1.0], [1 - 0.7337, 0.0], [0.9412,
1.0], [1.0 - 0.5819, 0.0], [0.2738, 1.0], [1.0 - 0.5136, 0.0], [0.8819,
1.0], [1.0 - 0.4387, 0.0], [1.0 - 0.6257, 0.0], [0.7857, 1.0], [1.0 -
0.3722, 0.0], [1.0 - 0.8049, 0.0], [0.7864, 1.0], [1.0 - 0.2372, 0.0],
[0.7934, 1.0], [0.9583, 1.0], [0.9739, 1.0], [1.0 - 0.3556, 0.0], [1.0 -
0.2551, 0.0], [1.0 - 0.4532, 0.0], [0.4605, 1.0], [0.7572, 1.0], [
0.9496, 1.0], [0.8268, 1.0], [1.0 - 0.4876, 0.0], [0.8523, 1.0], [1.0 -
0.2629, 0.0], [1.0 - 0.9021, 0.0], [0.6977, 1.0], [0.9142, 1.0], [1.0 -
0.8175, 0.0], [1.0 - 0.4865, 0.0], [0.911, 1.0], [1.0 - 0.2159, 0.0], [
1.0 - 0.6943, 0.0], [1.0 - 0.2753, 0.0], [0.859, 1.0], [0.8273, 1.0], [
1.0 - 0.5169, 0.0], [1.0 - 0.7412, 0.0]])
fpr, tpr, thresholds = roc_curve(y[:, 1], y[:, 0], pos_label=1)
ax = plt('roc_curve').gca()
ax.set_xlim([-0.007, 1.0])
ax.set_ylim([0.0, 1.01])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.set_title('Receiver operating characteristic (AUC: %.3f)' % auc(fpr, tpr))
ax.plot([0, 1], [0, 1], color='red', linestyle='--', label='Random model')
ax.plot(fpr, tpr, color='yellow', label='IArt')
ax.plot([0, 0, 1], [0, 1, 1], color='green', linestyle='--', label=
'Perfect model')
ax.legend(loc='lower right')
ax = plt('confusion_matrix').gca()
y_threshold = (y > 0.7).astype(int)
matrix = confusion_matrix(y[:, 1], y_threshold[:, 0])
matrix = matrix / matrix.astype(np.float).sum(axis=1)
im = ax.imshow(matrix, cmap=cm.Greys_r, extent=(-3, 3, 3, -3))
ax.axis('off')
get_figure('confusion_matrix').colorbar(im)
save_fig()
<|reserved_special_token_1|>
from matplotlib import cm
from datascience.visu.util import plt, save_fig, get_figure
from sklearn.metrics import roc_curve, auc, confusion_matrix
import numpy as np
y = np.array([
[0.8869, 1.],
[1.-0.578, 0.],
[0.7959, 1.],
[0.8618, 1.],
[1.-0.2278, 0.],
[0.6607, 1.],
[0.7006, 1.],
[1.-0.4859, 0.],
[0.6935, 1.],
[0.9048, 1.],
[0.6681, 1.],
[0.7585, 1.],
[1.-0.5063, 0.],
[1.-0.4516, 0.],
[1.-0.5158, 0.],
[1.-0.5873, 0.],
[1.-0.7682, 0.],
[0.8620, 1.],
[1-0.7337, 0.],
[0.9412, 1.],
[1.-0.5819, 0.],
[.2738, 1.],
[1.-.5136, 0.],
[.8819, 1.],
[1.-.4387, 0.],
[1.-.6257, 0.],
[.7857, 1.],
[1.-.3722, 0.],
[1.-0.8049, 0.],
[0.7864, 1.],
[1.-0.2372, 0.],
[0.7934, 1.],
[0.9583, 1.],
[0.9739, 1.],
[1.-0.3556, 0.],
[1.-0.2551, 0.],
[1.-0.4532, 0.],
[0.4605, 1.],
[0.7572, 1.],
[0.9496, 1.],
[0.8268, 1.],
[1.-0.4876, 0.],
[0.8523, 1.],
[1.-0.2629, 0.],
[1.-0.9021, 0.],
[0.6977, 1.],
[0.9142, 1.],
[1.-0.8175, 0.],
[1.-0.4865, 0.],
[0.9110, 1.],
[1.-0.2159, 0.],
[1.-0.6943, 0.],
[1.-0.2753, 0.],
[0.8590, 1.],
[0.8273, 1.],
[1.-0.5169, 0.],
[1.-0.7412, 0.]
])
fpr, tpr, thresholds = roc_curve(y[:, 1], y[:, 0], pos_label=1)
ax = plt('roc_curve').gca()
ax.set_xlim([-0.007, 1.0])
ax.set_ylim([0.0, 1.01])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.set_title('Receiver operating characteristic (AUC: %.3f)' % auc(fpr, tpr))
ax.plot([0, 1], [0, 1], color='red', linestyle='--', label='Random model')
ax.plot(fpr, tpr, color='yellow', label='IArt')
ax.plot([0, 0, 1], [0, 1, 1], color='green', linestyle='--', label='Perfect model')
ax.legend(loc="lower right")
ax = plt('confusion_matrix').gca()
y_threshold = (y > 0.7).astype(int)
matrix = confusion_matrix(y[:, 1], y_threshold[:, 0])
matrix = matrix / matrix.astype(np.float).sum(axis=1)
im = ax.imshow(matrix, cmap=cm.Greys_r, extent=(-3, 3, 3, -3))
ax.axis('off')
get_figure('confusion_matrix').colorbar(im)
save_fig()
|
flexible
|
{
"blob_id": "5b3514af839c132fda9a2e6e178ae62f780f291e",
"index": 3388,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nax.set_xlim([-0.007, 1.0])\nax.set_ylim([0.0, 1.01])\nax.set_xlabel('False Positive Rate')\nax.set_ylabel('True Positive Rate')\nax.set_title('Receiver operating characteristic (AUC: %.3f)' % auc(fpr, tpr))\nax.plot([0, 1], [0, 1], color='red', linestyle='--', label='Random model')\nax.plot(fpr, tpr, color='yellow', label='IArt')\nax.plot([0, 0, 1], [0, 1, 1], color='green', linestyle='--', label=\n 'Perfect model')\nax.legend(loc='lower right')\n<mask token>\nax.axis('off')\nget_figure('confusion_matrix').colorbar(im)\nsave_fig()\n",
"step-3": "<mask token>\ny = np.array([[0.8869, 1.0], [1.0 - 0.578, 0.0], [0.7959, 1.0], [0.8618, \n 1.0], [1.0 - 0.2278, 0.0], [0.6607, 1.0], [0.7006, 1.0], [1.0 - 0.4859,\n 0.0], [0.6935, 1.0], [0.9048, 1.0], [0.6681, 1.0], [0.7585, 1.0], [1.0 -\n 0.5063, 0.0], [1.0 - 0.4516, 0.0], [1.0 - 0.5158, 0.0], [1.0 - 0.5873, \n 0.0], [1.0 - 0.7682, 0.0], [0.862, 1.0], [1 - 0.7337, 0.0], [0.9412, \n 1.0], [1.0 - 0.5819, 0.0], [0.2738, 1.0], [1.0 - 0.5136, 0.0], [0.8819,\n 1.0], [1.0 - 0.4387, 0.0], [1.0 - 0.6257, 0.0], [0.7857, 1.0], [1.0 - \n 0.3722, 0.0], [1.0 - 0.8049, 0.0], [0.7864, 1.0], [1.0 - 0.2372, 0.0],\n [0.7934, 1.0], [0.9583, 1.0], [0.9739, 1.0], [1.0 - 0.3556, 0.0], [1.0 -\n 0.2551, 0.0], [1.0 - 0.4532, 0.0], [0.4605, 1.0], [0.7572, 1.0], [\n 0.9496, 1.0], [0.8268, 1.0], [1.0 - 0.4876, 0.0], [0.8523, 1.0], [1.0 -\n 0.2629, 0.0], [1.0 - 0.9021, 0.0], [0.6977, 1.0], [0.9142, 1.0], [1.0 -\n 0.8175, 0.0], [1.0 - 0.4865, 0.0], [0.911, 1.0], [1.0 - 0.2159, 0.0], [\n 1.0 - 0.6943, 0.0], [1.0 - 0.2753, 0.0], [0.859, 1.0], [0.8273, 1.0], [\n 1.0 - 0.5169, 0.0], [1.0 - 0.7412, 0.0]])\nfpr, tpr, thresholds = roc_curve(y[:, 1], y[:, 0], pos_label=1)\nax = plt('roc_curve').gca()\nax.set_xlim([-0.007, 1.0])\nax.set_ylim([0.0, 1.01])\nax.set_xlabel('False Positive Rate')\nax.set_ylabel('True Positive Rate')\nax.set_title('Receiver operating characteristic (AUC: %.3f)' % auc(fpr, tpr))\nax.plot([0, 1], [0, 1], color='red', linestyle='--', label='Random model')\nax.plot(fpr, tpr, color='yellow', label='IArt')\nax.plot([0, 0, 1], [0, 1, 1], color='green', linestyle='--', label=\n 'Perfect model')\nax.legend(loc='lower right')\nax = plt('confusion_matrix').gca()\ny_threshold = (y > 0.7).astype(int)\nmatrix = confusion_matrix(y[:, 1], y_threshold[:, 0])\nmatrix = matrix / matrix.astype(np.float).sum(axis=1)\nim = ax.imshow(matrix, cmap=cm.Greys_r, extent=(-3, 3, 3, -3))\nax.axis('off')\nget_figure('confusion_matrix').colorbar(im)\nsave_fig()\n",
"step-4": "from matplotlib import cm\nfrom datascience.visu.util import plt, save_fig, get_figure\nfrom sklearn.metrics import roc_curve, auc, confusion_matrix\nimport numpy as np\ny = np.array([[0.8869, 1.0], [1.0 - 0.578, 0.0], [0.7959, 1.0], [0.8618, \n 1.0], [1.0 - 0.2278, 0.0], [0.6607, 1.0], [0.7006, 1.0], [1.0 - 0.4859,\n 0.0], [0.6935, 1.0], [0.9048, 1.0], [0.6681, 1.0], [0.7585, 1.0], [1.0 -\n 0.5063, 0.0], [1.0 - 0.4516, 0.0], [1.0 - 0.5158, 0.0], [1.0 - 0.5873, \n 0.0], [1.0 - 0.7682, 0.0], [0.862, 1.0], [1 - 0.7337, 0.0], [0.9412, \n 1.0], [1.0 - 0.5819, 0.0], [0.2738, 1.0], [1.0 - 0.5136, 0.0], [0.8819,\n 1.0], [1.0 - 0.4387, 0.0], [1.0 - 0.6257, 0.0], [0.7857, 1.0], [1.0 - \n 0.3722, 0.0], [1.0 - 0.8049, 0.0], [0.7864, 1.0], [1.0 - 0.2372, 0.0],\n [0.7934, 1.0], [0.9583, 1.0], [0.9739, 1.0], [1.0 - 0.3556, 0.0], [1.0 -\n 0.2551, 0.0], [1.0 - 0.4532, 0.0], [0.4605, 1.0], [0.7572, 1.0], [\n 0.9496, 1.0], [0.8268, 1.0], [1.0 - 0.4876, 0.0], [0.8523, 1.0], [1.0 -\n 0.2629, 0.0], [1.0 - 0.9021, 0.0], [0.6977, 1.0], [0.9142, 1.0], [1.0 -\n 0.8175, 0.0], [1.0 - 0.4865, 0.0], [0.911, 1.0], [1.0 - 0.2159, 0.0], [\n 1.0 - 0.6943, 0.0], [1.0 - 0.2753, 0.0], [0.859, 1.0], [0.8273, 1.0], [\n 1.0 - 0.5169, 0.0], [1.0 - 0.7412, 0.0]])\nfpr, tpr, thresholds = roc_curve(y[:, 1], y[:, 0], pos_label=1)\nax = plt('roc_curve').gca()\nax.set_xlim([-0.007, 1.0])\nax.set_ylim([0.0, 1.01])\nax.set_xlabel('False Positive Rate')\nax.set_ylabel('True Positive Rate')\nax.set_title('Receiver operating characteristic (AUC: %.3f)' % auc(fpr, tpr))\nax.plot([0, 1], [0, 1], color='red', linestyle='--', label='Random model')\nax.plot(fpr, tpr, color='yellow', label='IArt')\nax.plot([0, 0, 1], [0, 1, 1], color='green', linestyle='--', label=\n 'Perfect model')\nax.legend(loc='lower right')\nax = plt('confusion_matrix').gca()\ny_threshold = (y > 0.7).astype(int)\nmatrix = confusion_matrix(y[:, 1], y_threshold[:, 0])\nmatrix = matrix / matrix.astype(np.float).sum(axis=1)\nim = ax.imshow(matrix, cmap=cm.Greys_r, extent=(-3, 3, 3, -3))\nax.axis('off')\nget_figure('confusion_matrix').colorbar(im)\nsave_fig()\n",
"step-5": "from matplotlib import cm\n\nfrom datascience.visu.util import plt, save_fig, get_figure\n\nfrom sklearn.metrics import roc_curve, auc, confusion_matrix\n\nimport numpy as np\n\ny = np.array([\n [0.8869, 1.],\n [1.-0.578, 0.],\n [0.7959, 1.],\n [0.8618, 1.],\n [1.-0.2278, 0.],\n [0.6607, 1.],\n [0.7006, 1.],\n [1.-0.4859, 0.],\n [0.6935, 1.],\n [0.9048, 1.],\n [0.6681, 1.],\n [0.7585, 1.],\n [1.-0.5063, 0.],\n [1.-0.4516, 0.],\n [1.-0.5158, 0.],\n [1.-0.5873, 0.],\n [1.-0.7682, 0.],\n [0.8620, 1.],\n [1-0.7337, 0.],\n [0.9412, 1.],\n [1.-0.5819, 0.],\n [.2738, 1.],\n [1.-.5136, 0.],\n [.8819, 1.],\n [1.-.4387, 0.],\n [1.-.6257, 0.],\n [.7857, 1.],\n [1.-.3722, 0.],\n [1.-0.8049, 0.],\n [0.7864, 1.],\n [1.-0.2372, 0.],\n [0.7934, 1.],\n [0.9583, 1.],\n [0.9739, 1.],\n [1.-0.3556, 0.],\n [1.-0.2551, 0.],\n [1.-0.4532, 0.],\n [0.4605, 1.],\n [0.7572, 1.],\n [0.9496, 1.],\n [0.8268, 1.],\n [1.-0.4876, 0.],\n [0.8523, 1.],\n [1.-0.2629, 0.],\n [1.-0.9021, 0.],\n [0.6977, 1.],\n [0.9142, 1.],\n [1.-0.8175, 0.],\n [1.-0.4865, 0.],\n [0.9110, 1.],\n [1.-0.2159, 0.],\n [1.-0.6943, 0.],\n [1.-0.2753, 0.],\n [0.8590, 1.],\n [0.8273, 1.],\n [1.-0.5169, 0.],\n [1.-0.7412, 0.]\n])\n\nfpr, tpr, thresholds = roc_curve(y[:, 1], y[:, 0], pos_label=1)\n\nax = plt('roc_curve').gca()\n\nax.set_xlim([-0.007, 1.0])\nax.set_ylim([0.0, 1.01])\nax.set_xlabel('False Positive Rate')\nax.set_ylabel('True Positive Rate')\nax.set_title('Receiver operating characteristic (AUC: %.3f)' % auc(fpr, tpr))\n\nax.plot([0, 1], [0, 1], color='red', linestyle='--', label='Random model')\nax.plot(fpr, tpr, color='yellow', label='IArt')\nax.plot([0, 0, 1], [0, 1, 1], color='green', linestyle='--', label='Perfect model')\n\nax.legend(loc=\"lower right\")\n\nax = plt('confusion_matrix').gca()\ny_threshold = (y > 0.7).astype(int)\n\nmatrix = confusion_matrix(y[:, 1], y_threshold[:, 0])\n\nmatrix = matrix / matrix.astype(np.float).sum(axis=1)\n\nim = ax.imshow(matrix, cmap=cm.Greys_r, extent=(-3, 3, 3, -3))\nax.axis('off')\nget_figure('confusion_matrix').colorbar(im)\n\nsave_fig()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
import argparse
import logging
import tango
def delete_devices():
"""."""
db = tango.Database()
class_list = db.get_class_list('*')
print('class list = ', class_list)
server_list = db.get_server_list('*')
print('server list = ', server_list)
# for index in range(num_devices):
# name = 'low_sdp/elt/test_device_{:05d}'.format(index)
# db.delete_server('TestDevice/test1')
# db.delete_device('tango/test1/000')
def delete_server():
"""."""
db = tango.Database()
db.delete_server('')
if __name__ == '__main__':
delete_devices()
|
normal
|
{
"blob_id": "f3dad6a474d5882beaac7d98f8f60c347730ee55",
"index": 8428,
"step-1": "<mask token>\n\n\ndef delete_devices():\n \"\"\".\"\"\"\n db = tango.Database()\n class_list = db.get_class_list('*')\n print('class list = ', class_list)\n server_list = db.get_server_list('*')\n print('server list = ', server_list)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef delete_devices():\n \"\"\".\"\"\"\n db = tango.Database()\n class_list = db.get_class_list('*')\n print('class list = ', class_list)\n server_list = db.get_server_list('*')\n print('server list = ', server_list)\n\n\ndef delete_server():\n \"\"\".\"\"\"\n db = tango.Database()\n db.delete_server('')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef delete_devices():\n \"\"\".\"\"\"\n db = tango.Database()\n class_list = db.get_class_list('*')\n print('class list = ', class_list)\n server_list = db.get_server_list('*')\n print('server list = ', server_list)\n\n\ndef delete_server():\n \"\"\".\"\"\"\n db = tango.Database()\n db.delete_server('')\n\n\nif __name__ == '__main__':\n delete_devices()\n",
"step-4": "import argparse\nimport logging\nimport tango\n\n\ndef delete_devices():\n \"\"\".\"\"\"\n db = tango.Database()\n class_list = db.get_class_list('*')\n print('class list = ', class_list)\n server_list = db.get_server_list('*')\n print('server list = ', server_list)\n\n\ndef delete_server():\n \"\"\".\"\"\"\n db = tango.Database()\n db.delete_server('')\n\n\nif __name__ == '__main__':\n delete_devices()\n",
"step-5": "#!/usr/bin/env python3\nimport argparse\nimport logging\n\nimport tango\n\n\ndef delete_devices():\n \"\"\".\"\"\"\n db = tango.Database()\n class_list = db.get_class_list('*')\n print('class list = ', class_list)\n server_list = db.get_server_list('*')\n print('server list = ', server_list)\n\n # for index in range(num_devices):\n # name = 'low_sdp/elt/test_device_{:05d}'.format(index)\n\n # db.delete_server('TestDevice/test1')\n # db.delete_device('tango/test1/000')\n\n\ndef delete_server():\n \"\"\".\"\"\"\n db = tango.Database()\n db.delete_server('')\n\n\nif __name__ == '__main__':\n delete_devices()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/python
# This IDAPython code can be used to de-obfuscate strings generated by
# CryptoWall version 3, as well as any other malware samples that make use of
# this technique.
'''
Example disassembly:
.text:00403EC8 mov ecx, 'V'
.text:00403ECD mov [ebp+var_1C], cx
.text:00403ED1 mov edx, 'e'
.text:00403ED6 mov [ebp+var_1A], dx
.text:00403EDA mov eax, 'r'
.text:00403EDF mov [ebp+var_18], ax
.text:00403EE3 mov ecx, 's'
.text:00403EE8 mov [ebp+var_16], cx
.text:00403EEC mov edx, 'i'
.text:00403EF1 mov [ebp+var_14], dx
.text:00403EF5 mov eax, 'o'
.text:00403EFA mov [ebp+var_12], ax
.text:00403EFE mov ecx, 'n'
'''
pos = here()
original_pos = pos
out = ""
while True:
if GetMnem(pos) == "mov" and GetOpnd(pos, 0)[0] == "e" and GetOpnd(pos, 0)[2] == "x":
out += chr(GetOperandValue(pos,1))
elif GetMnem(pos) == "mov" and "[ebp" in GetOpnd(pos, 0):
None
elif GetMnem(pos) == "xor":
MakeComm(original_pos, out)
print "Making String: %s" % out
out = ""
original_pos = pos
else:
break
pos = NextHead(pos)
|
normal
|
{
"blob_id": "e38149f0d421a43f6aa34a977eee89fe29021b85",
"index": 7451,
"step-1": "#!/usr/bin/python\n# This IDAPython code can be used to de-obfuscate strings generated by\n# CryptoWall version 3, as well as any other malware samples that make use of\n# this technique. \n\n'''\nExample disassembly:\n\n\t.text:00403EC8 mov ecx, 'V'\n\t.text:00403ECD mov [ebp+var_1C], cx\n\t.text:00403ED1 mov edx, 'e'\n\t.text:00403ED6 mov [ebp+var_1A], dx\n\t.text:00403EDA mov eax, 'r'\n\t.text:00403EDF mov [ebp+var_18], ax\n\t.text:00403EE3 mov ecx, 's'\n\t.text:00403EE8 mov [ebp+var_16], cx\n\t.text:00403EEC mov edx, 'i'\n\t.text:00403EF1 mov [ebp+var_14], dx\n\t.text:00403EF5 mov eax, 'o'\n\t.text:00403EFA mov [ebp+var_12], ax\n\t.text:00403EFE mov ecx, 'n'\n'''\n\npos = here()\noriginal_pos = pos\nout = \"\"\nwhile True:\n\tif GetMnem(pos) == \"mov\" and GetOpnd(pos, 0)[0] == \"e\" and GetOpnd(pos, 0)[2] == \"x\":\n\t\tout += chr(GetOperandValue(pos,1))\n\telif GetMnem(pos) == \"mov\" and \"[ebp\" in GetOpnd(pos, 0):\n\t\tNone\n\telif GetMnem(pos) == \"xor\":\n\t\tMakeComm(original_pos, out)\n\t\tprint \"Making String: %s\" % out\n\t\tout = \"\"\n\t\toriginal_pos = pos\n\telse:\n\t\tbreak\n\tpos = NextHead(pos)\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import unittest
from unittest.mock import patch
from fsqlfly.db_helper import *
from fsqlfly.tests.base_test import FSQLFlyTestCase
class MyTestCase(FSQLFlyTestCase):
def test_positive_delete(self):
namespace = Namespace(name='iii')
self.session.add(namespace)
self.session.commit()
t = Transform(name='test', sql='select 1;', namespace=namespace)
self.session.add(t)
self.session.commit()
self.session.delete(namespace)
self.session.commit()
self.assertEqual(self.session.query(Transform).count(), 0)
def get_create_object(self):
connection = Connection(name='a', url='#', type='hive', connector='text')
schema = SchemaEvent(name='test', connection=connection, version=1)
schema2 = SchemaEvent(name='test2', connection=connection, version=2)
r_name = ResourceName(name='b', full_name='a.b', connection=connection, schema_version=schema)
t_name = ResourceTemplate(name='c', resource_name=r_name, type='both', full_name='a.b.c', connection=connection,
schema_version=schema)
v_name = ResourceVersion(name='d', template=t_name, full_name='a.b.c.d', connection=connection,
resource_name=r_name, schema_version=schema)
return connection, schema, schema2, r_name, t_name, v_name
def test_positive_delete_connection(self):
connection, schema, schema2, r_name, t_name, v_name = self.get_create_object()
self.session.add_all([connection, schema, schema2, r_name, t_name, v_name])
self.session.commit()
self.session.delete(connection)
self.session.commit()
self.assertEqual(self.session.query(ResourceName).count(), 0)
self.assertEqual(self.session.query(Connection).count(), 0)
self.assertEqual(self.session.query(ResourceVersion).count(), 0)
self.assertEqual(self.session.query(ResourceTemplate).count(), 0)
self.assertEqual(self.session.query(SchemaEvent).count(), 0)
def test_positive_delete_connection_by_db_helper(self):
connection, schema, schema2, r_name, t_name, v_name = self.get_create_object()
self.session.add_all([connection, schema, schema2, r_name, t_name, v_name])
self.session.commit()
self.assertEqual(self.session.query(Connection).count(), 1)
DBSession.init_engine(self.engine)
with patch.object(settings, 'FSQLFLY_SAVE_MODE_DISABLE', True):
res = DBDao.delete('connection', pk=connection.id)
self.assertEqual(res.success, True)
self.session.close()
self.session = self.get_session()
self.assertEqual(self.session.query(Connection).count(), 0)
self.assertEqual(self.session.query(ResourceName).count(), 0)
self.assertEqual(self.session.query(ResourceVersion).count(), 0)
self.assertEqual(self.session.query(ResourceTemplate).count(), 0)
self.assertEqual(self.session.query(SchemaEvent).count(), 0)
def test_positive_delete_other(self):
connection, schema, schema2, r_name, t_name, v_name = self.get_create_object()
self.session.add_all([connection, schema, schema2, r_name, t_name, v_name])
self.session.commit()
self.session.delete(schema)
self.session.commit()
self.assertEqual(self.session.query(Connection).count(), 1)
self.assertEqual(self.session.query(ResourceName).count(), 0)
self.assertEqual(self.session.query(Connection).count(), 1)
self.assertEqual(self.session.query(ResourceVersion).count(), 0)
self.assertEqual(self.session.query(ResourceTemplate).count(), 0)
self.assertEqual(self.session.query(SchemaEvent).count(), 1)
def test_get_connection_and_resource_name_config(self):
connection_config = """
[jdbc]
insert_primary_key = false
"""
resource_name_config = """
[jdbc]
insert_primary_key = true
"""
connection = Connection(name='a', url='#', type='hive', connector='text', config=connection_config)
schema = SchemaEvent(name='test', connection=connection)
r_name = ResourceName(name='b', full_name='a.b', connection=connection, schema_version=schema,
config=resource_name_config)
self.assertTrue(not r_name.get_config('add_read_partition_key', 'jdbc', bool))
self.assertTrue(not r_name.get_config('add_read_partition_key', 'jdbc', bool))
self.assertEqual(connection.get_config('read_partition_num', 'jdbc', int), 50)
self.assertTrue(r_name.get_config('example11') is None)
self.assertTrue(r_name.get_config('insert_primary_key', 'jdbc', bool))
self.assertTrue(not connection.get_config('insert_primary_key', 'jdbc', bool))
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "abbefb1e426408b32fa9e125c78b572de22dbb8c",
"index": 7493,
"step-1": "<mask token>\n\n\nclass MyTestCase(FSQLFlyTestCase):\n\n def test_positive_delete(self):\n namespace = Namespace(name='iii')\n self.session.add(namespace)\n self.session.commit()\n t = Transform(name='test', sql='select 1;', namespace=namespace)\n self.session.add(t)\n self.session.commit()\n self.session.delete(namespace)\n self.session.commit()\n self.assertEqual(self.session.query(Transform).count(), 0)\n <mask token>\n <mask token>\n <mask token>\n\n def test_positive_delete_other(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.session.delete(schema)\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 1)\n\n def test_get_connection_and_resource_name_config(self):\n connection_config = '\\n[jdbc]\\ninsert_primary_key = false\\n\\n '\n resource_name_config = '\\n[jdbc]\\ninsert_primary_key = true\\n '\n connection = Connection(name='a', url='#', type='hive', connector=\n 'text', config=connection_config)\n schema = SchemaEvent(name='test', connection=connection)\n r_name = ResourceName(name='b', full_name='a.b', connection=\n connection, schema_version=schema, config=resource_name_config)\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertEqual(connection.get_config('read_partition_num', 'jdbc',\n int), 50)\n self.assertTrue(r_name.get_config('example11') is None)\n self.assertTrue(r_name.get_config('insert_primary_key', 'jdbc', bool))\n self.assertTrue(not connection.get_config('insert_primary_key',\n 'jdbc', bool))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyTestCase(FSQLFlyTestCase):\n\n def test_positive_delete(self):\n namespace = Namespace(name='iii')\n self.session.add(namespace)\n self.session.commit()\n t = Transform(name='test', sql='select 1;', namespace=namespace)\n self.session.add(t)\n self.session.commit()\n self.session.delete(namespace)\n self.session.commit()\n self.assertEqual(self.session.query(Transform).count(), 0)\n\n def get_create_object(self):\n connection = Connection(name='a', url='#', type='hive', connector=\n 'text')\n schema = SchemaEvent(name='test', connection=connection, version=1)\n schema2 = SchemaEvent(name='test2', connection=connection, version=2)\n r_name = ResourceName(name='b', full_name='a.b', connection=\n connection, schema_version=schema)\n t_name = ResourceTemplate(name='c', resource_name=r_name, type=\n 'both', full_name='a.b.c', connection=connection,\n schema_version=schema)\n v_name = ResourceVersion(name='d', template=t_name, full_name=\n 'a.b.c.d', connection=connection, resource_name=r_name,\n schema_version=schema)\n return connection, schema, schema2, r_name, t_name, v_name\n <mask token>\n <mask token>\n\n def test_positive_delete_other(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.session.delete(schema)\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 1)\n\n def test_get_connection_and_resource_name_config(self):\n connection_config = '\\n[jdbc]\\ninsert_primary_key = false\\n\\n '\n resource_name_config = '\\n[jdbc]\\ninsert_primary_key = true\\n '\n connection = Connection(name='a', url='#', type='hive', connector=\n 'text', config=connection_config)\n schema = SchemaEvent(name='test', connection=connection)\n r_name = ResourceName(name='b', full_name='a.b', connection=\n connection, schema_version=schema, config=resource_name_config)\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertEqual(connection.get_config('read_partition_num', 'jdbc',\n int), 50)\n self.assertTrue(r_name.get_config('example11') is None)\n self.assertTrue(r_name.get_config('insert_primary_key', 'jdbc', bool))\n self.assertTrue(not connection.get_config('insert_primary_key',\n 'jdbc', bool))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MyTestCase(FSQLFlyTestCase):\n\n def test_positive_delete(self):\n namespace = Namespace(name='iii')\n self.session.add(namespace)\n self.session.commit()\n t = Transform(name='test', sql='select 1;', namespace=namespace)\n self.session.add(t)\n self.session.commit()\n self.session.delete(namespace)\n self.session.commit()\n self.assertEqual(self.session.query(Transform).count(), 0)\n\n def get_create_object(self):\n connection = Connection(name='a', url='#', type='hive', connector=\n 'text')\n schema = SchemaEvent(name='test', connection=connection, version=1)\n schema2 = SchemaEvent(name='test2', connection=connection, version=2)\n r_name = ResourceName(name='b', full_name='a.b', connection=\n connection, schema_version=schema)\n t_name = ResourceTemplate(name='c', resource_name=r_name, type=\n 'both', full_name='a.b.c', connection=connection,\n schema_version=schema)\n v_name = ResourceVersion(name='d', template=t_name, full_name=\n 'a.b.c.d', connection=connection, resource_name=r_name,\n schema_version=schema)\n return connection, schema, schema2, r_name, t_name, v_name\n\n def test_positive_delete_connection(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.session.delete(connection)\n self.session.commit()\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 0)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 0)\n\n def test_positive_delete_connection_by_db_helper(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n DBSession.init_engine(self.engine)\n with patch.object(settings, 'FSQLFLY_SAVE_MODE_DISABLE', True):\n res = DBDao.delete('connection', pk=connection.id)\n self.assertEqual(res.success, True)\n self.session.close()\n self.session = self.get_session()\n self.assertEqual(self.session.query(Connection).count(), 0)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 0)\n\n def test_positive_delete_other(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.session.delete(schema)\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 1)\n\n def test_get_connection_and_resource_name_config(self):\n connection_config = '\\n[jdbc]\\ninsert_primary_key = false\\n\\n '\n resource_name_config = '\\n[jdbc]\\ninsert_primary_key = true\\n '\n connection = Connection(name='a', url='#', type='hive', connector=\n 'text', config=connection_config)\n schema = SchemaEvent(name='test', connection=connection)\n r_name = ResourceName(name='b', full_name='a.b', connection=\n connection, schema_version=schema, config=resource_name_config)\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertEqual(connection.get_config('read_partition_num', 'jdbc',\n int), 50)\n self.assertTrue(r_name.get_config('example11') is None)\n self.assertTrue(r_name.get_config('insert_primary_key', 'jdbc', bool))\n self.assertTrue(not connection.get_config('insert_primary_key',\n 'jdbc', bool))\n\n\n<mask token>\n",
"step-4": "import unittest\nfrom unittest.mock import patch\nfrom fsqlfly.db_helper import *\nfrom fsqlfly.tests.base_test import FSQLFlyTestCase\n\n\nclass MyTestCase(FSQLFlyTestCase):\n\n def test_positive_delete(self):\n namespace = Namespace(name='iii')\n self.session.add(namespace)\n self.session.commit()\n t = Transform(name='test', sql='select 1;', namespace=namespace)\n self.session.add(t)\n self.session.commit()\n self.session.delete(namespace)\n self.session.commit()\n self.assertEqual(self.session.query(Transform).count(), 0)\n\n def get_create_object(self):\n connection = Connection(name='a', url='#', type='hive', connector=\n 'text')\n schema = SchemaEvent(name='test', connection=connection, version=1)\n schema2 = SchemaEvent(name='test2', connection=connection, version=2)\n r_name = ResourceName(name='b', full_name='a.b', connection=\n connection, schema_version=schema)\n t_name = ResourceTemplate(name='c', resource_name=r_name, type=\n 'both', full_name='a.b.c', connection=connection,\n schema_version=schema)\n v_name = ResourceVersion(name='d', template=t_name, full_name=\n 'a.b.c.d', connection=connection, resource_name=r_name,\n schema_version=schema)\n return connection, schema, schema2, r_name, t_name, v_name\n\n def test_positive_delete_connection(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.session.delete(connection)\n self.session.commit()\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 0)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 0)\n\n def test_positive_delete_connection_by_db_helper(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n DBSession.init_engine(self.engine)\n with patch.object(settings, 'FSQLFLY_SAVE_MODE_DISABLE', True):\n res = DBDao.delete('connection', pk=connection.id)\n self.assertEqual(res.success, True)\n self.session.close()\n self.session = self.get_session()\n self.assertEqual(self.session.query(Connection).count(), 0)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 0)\n\n def test_positive_delete_other(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.session.delete(schema)\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 1)\n\n def test_get_connection_and_resource_name_config(self):\n connection_config = '\\n[jdbc]\\ninsert_primary_key = false\\n\\n '\n resource_name_config = '\\n[jdbc]\\ninsert_primary_key = true\\n '\n connection = Connection(name='a', url='#', type='hive', connector=\n 'text', config=connection_config)\n schema = SchemaEvent(name='test', connection=connection)\n r_name = ResourceName(name='b', full_name='a.b', connection=\n connection, schema_version=schema, config=resource_name_config)\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertEqual(connection.get_config('read_partition_num', 'jdbc',\n int), 50)\n self.assertTrue(r_name.get_config('example11') is None)\n self.assertTrue(r_name.get_config('insert_primary_key', 'jdbc', bool))\n self.assertTrue(not connection.get_config('insert_primary_key',\n 'jdbc', bool))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\nfrom unittest.mock import patch\nfrom fsqlfly.db_helper import *\nfrom fsqlfly.tests.base_test import FSQLFlyTestCase\n\n\nclass MyTestCase(FSQLFlyTestCase):\n def test_positive_delete(self):\n namespace = Namespace(name='iii')\n self.session.add(namespace)\n self.session.commit()\n\n t = Transform(name='test', sql='select 1;', namespace=namespace)\n self.session.add(t)\n self.session.commit()\n self.session.delete(namespace)\n self.session.commit()\n self.assertEqual(self.session.query(Transform).count(), 0)\n\n def get_create_object(self):\n connection = Connection(name='a', url='#', type='hive', connector='text')\n schema = SchemaEvent(name='test', connection=connection, version=1)\n schema2 = SchemaEvent(name='test2', connection=connection, version=2)\n r_name = ResourceName(name='b', full_name='a.b', connection=connection, schema_version=schema)\n t_name = ResourceTemplate(name='c', resource_name=r_name, type='both', full_name='a.b.c', connection=connection,\n schema_version=schema)\n v_name = ResourceVersion(name='d', template=t_name, full_name='a.b.c.d', connection=connection,\n resource_name=r_name, schema_version=schema)\n return connection, schema, schema2, r_name, t_name, v_name\n\n def test_positive_delete_connection(self):\n connection, schema, schema2, r_name, t_name, v_name = self.get_create_object()\n\n self.session.add_all([connection, schema, schema2, r_name, t_name, v_name])\n self.session.commit()\n self.session.delete(connection)\n self.session.commit()\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 0)\n\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 0)\n\n def test_positive_delete_connection_by_db_helper(self):\n connection, schema, schema2, r_name, t_name, v_name = self.get_create_object()\n\n self.session.add_all([connection, schema, schema2, r_name, t_name, v_name])\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n DBSession.init_engine(self.engine)\n with patch.object(settings, 'FSQLFLY_SAVE_MODE_DISABLE', True):\n res = DBDao.delete('connection', pk=connection.id)\n self.assertEqual(res.success, True)\n self.session.close()\n self.session = self.get_session()\n self.assertEqual(self.session.query(Connection).count(), 0)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 0)\n\n def test_positive_delete_other(self):\n connection, schema, schema2, r_name, t_name, v_name = self.get_create_object()\n\n self.session.add_all([connection, schema, schema2, r_name, t_name, v_name])\n self.session.commit()\n self.session.delete(schema)\n\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 1)\n\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 1)\n\n def test_get_connection_and_resource_name_config(self):\n connection_config = \"\"\"\n[jdbc]\ninsert_primary_key = false\n\n \"\"\"\n resource_name_config = \"\"\"\n[jdbc]\ninsert_primary_key = true\n \"\"\"\n connection = Connection(name='a', url='#', type='hive', connector='text', config=connection_config)\n schema = SchemaEvent(name='test', connection=connection)\n r_name = ResourceName(name='b', full_name='a.b', connection=connection, schema_version=schema,\n config=resource_name_config)\n self.assertTrue(not r_name.get_config('add_read_partition_key', 'jdbc', bool))\n self.assertTrue(not r_name.get_config('add_read_partition_key', 'jdbc', bool))\n self.assertEqual(connection.get_config('read_partition_num', 'jdbc', int), 50)\n self.assertTrue(r_name.get_config('example11') is None)\n\n self.assertTrue(r_name.get_config('insert_primary_key', 'jdbc', bool))\n self.assertTrue(not connection.get_config('insert_primary_key', 'jdbc', bool))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
4,
5,
7,
9,
10
]
}
|
[
4,
5,
7,
9,
10
] |
# Generated by Django 3.1.7 on 2021-03-24 14:51
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Products_Table',
fields=[
('product_id', models.IntegerField(auto_created=True, primary_key=True, serialize=False)),
('product_name', models.CharField(max_length=50)),
('product_details', models.TextField()),
('product_price', models.IntegerField()),
('product_release_date', models.DateTimeField()),
],
),
]
|
normal
|
{
"blob_id": "90b9dcd2dfc28446d1979d58ed49a12a85ce5b98",
"index": 7429,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Products_Table', fields=[(\n 'product_id', models.IntegerField(auto_created=True, primary_key=\n True, serialize=False)), ('product_name', models.CharField(\n max_length=50)), ('product_details', models.TextField()), (\n 'product_price', models.IntegerField()), ('product_release_date',\n models.DateTimeField())])]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Products_Table', fields=[(\n 'product_id', models.IntegerField(auto_created=True, primary_key=\n True, serialize=False)), ('product_name', models.CharField(\n max_length=50)), ('product_details', models.TextField()), (\n 'product_price', models.IntegerField()), ('product_release_date',\n models.DateTimeField())])]\n",
"step-5": "# Generated by Django 3.1.7 on 2021-03-24 14:51\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Products_Table',\n fields=[\n ('product_id', models.IntegerField(auto_created=True, primary_key=True, serialize=False)),\n ('product_name', models.CharField(max_length=50)),\n ('product_details', models.TextField()),\n ('product_price', models.IntegerField()),\n ('product_release_date', models.DateTimeField()),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def inner_dsym_download(project_id: int, config_id: str) ->None:
"""Downloads the dSYMs from App Store Connect and stores them in the Project's debug files."""
with sdk.configure_scope() as scope:
scope.set_tag('project', project_id)
scope.set_tag('config_id', config_id)
project = Project.objects.get(pk=project_id)
config = appconnect.AppStoreConnectConfig.from_project_config(project,
config_id)
client = appconnect.AppConnectClient.from_config(config)
listed_builds = client.list_builds()
builds = process_builds(project=project, config=config, to_process=
listed_builds)
if not builds:
return
for i, (build, build_state) in enumerate(builds):
with sdk.configure_scope() as scope:
scope.set_context('dsym_downloads', {'total': len(builds),
'completed': i})
with tempfile.NamedTemporaryFile() as dsyms_zip:
try:
client.download_dsyms(build, pathlib.Path(dsyms_zip.name))
except appconnect.NoDsymsError:
logger.debug('No dSYMs for build %s', build)
except appconnect.PendingDsymsError:
logger.debug('dSYM url currently unavailable for build %s',
build)
continue
except appstoreconnect_api.UnauthorizedError:
sentry_sdk.capture_message(
'Not authorized to download dSYM using current App Store Connect credentials'
, level='info')
return
except appstoreconnect_api.ForbiddenError:
sentry_sdk.capture_message(
'Forbidden from downloading dSYM using current App Store Connect credentials'
, level='info')
return
except ValueError as e:
sdk.capture_exception(e)
continue
except appstoreconnect_api.RequestError as e:
sdk.capture_exception(e)
continue
except requests.RequestException as e:
sdk.capture_exception(e)
continue
else:
create_difs_from_dsyms_zip(dsyms_zip.name, project)
logger.debug('Uploaded dSYMs for build %s', build)
metrics.incr('tasks.app_store_connect.builds_ingested',
sample_rate=1)
build_state.fetched = True
build_state.save()
<|reserved_special_token_0|>
def process_builds(project: Project, config: appconnect.
AppStoreConnectConfig, to_process: List[appconnect.BuildInfo]) ->List[Tuple
[appconnect.BuildInfo, AppConnectBuild]]:
"""Returns a list of builds whose dSYMs need to be updated or fetched.
This will create a new "pending" :class:`AppConnectBuild` for any :class:`appconnect.BuildInfo`
that cannot be found in the DB. These pending :class:`AppConnectBuild`s are immediately saved
upon creation.
"""
pending_builds = []
with sentry_sdk.start_span(op='appconnect-update-builds', description=
'Update AppStoreConnect builds in database'):
for build in to_process:
build_state = get_or_create_persisted_build(project, config, build)
if not build_state.fetched:
pending_builds.append((build, build_state))
LatestAppConnectBuildsCheck.objects.create_or_update(project=project,
source_id=config.id, values={'last_checked': timezone.now()})
return pending_builds
@instrumented_task(name='sentry.tasks.app_store_connect.refresh_all_builds',
queue='appstoreconnect', ignore_result=True)
def refresh_all_builds() ->None:
inner_refresh_all_builds()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def inner_dsym_download(project_id: int, config_id: str) ->None:
"""Downloads the dSYMs from App Store Connect and stores them in the Project's debug files."""
with sdk.configure_scope() as scope:
scope.set_tag('project', project_id)
scope.set_tag('config_id', config_id)
project = Project.objects.get(pk=project_id)
config = appconnect.AppStoreConnectConfig.from_project_config(project,
config_id)
client = appconnect.AppConnectClient.from_config(config)
listed_builds = client.list_builds()
builds = process_builds(project=project, config=config, to_process=
listed_builds)
if not builds:
return
for i, (build, build_state) in enumerate(builds):
with sdk.configure_scope() as scope:
scope.set_context('dsym_downloads', {'total': len(builds),
'completed': i})
with tempfile.NamedTemporaryFile() as dsyms_zip:
try:
client.download_dsyms(build, pathlib.Path(dsyms_zip.name))
except appconnect.NoDsymsError:
logger.debug('No dSYMs for build %s', build)
except appconnect.PendingDsymsError:
logger.debug('dSYM url currently unavailable for build %s',
build)
continue
except appstoreconnect_api.UnauthorizedError:
sentry_sdk.capture_message(
'Not authorized to download dSYM using current App Store Connect credentials'
, level='info')
return
except appstoreconnect_api.ForbiddenError:
sentry_sdk.capture_message(
'Forbidden from downloading dSYM using current App Store Connect credentials'
, level='info')
return
except ValueError as e:
sdk.capture_exception(e)
continue
except appstoreconnect_api.RequestError as e:
sdk.capture_exception(e)
continue
except requests.RequestException as e:
sdk.capture_exception(e)
continue
else:
create_difs_from_dsyms_zip(dsyms_zip.name, project)
logger.debug('Uploaded dSYMs for build %s', build)
metrics.incr('tasks.app_store_connect.builds_ingested',
sample_rate=1)
build_state.fetched = True
build_state.save()
def create_difs_from_dsyms_zip(dsyms_zip: str, project: Project) ->None:
with sentry_sdk.start_span(op='dsym-difs', description=
'Extract difs dSYM zip'):
with open(dsyms_zip, 'rb') as fp:
created = debugfile.create_files_from_dif_zip(fp, project,
accept_unknown=True)
for proj_debug_file in created:
logger.debug('Created %r for project %s', proj_debug_file,
project.id)
def get_or_create_persisted_build(project: Project, config: appconnect.
AppStoreConnectConfig, build: appconnect.BuildInfo) ->AppConnectBuild:
"""Fetches the sentry-internal :class:`AppConnectBuild`.
The build corresponds to the :class:`appconnect.BuildInfo` as returned by the
AppStore Connect API. If no build exists yet, a new "pending" build is created.
"""
try:
build_state = AppConnectBuild.objects.get(project=project, app_id=
build.app_id, platform=build.platform, bundle_short_version=
build.version, bundle_version=build.build_number)
except AppConnectBuild.DoesNotExist:
build_state = AppConnectBuild(project=project, app_id=build.app_id,
bundle_id=config.bundleId, platform=build.platform,
bundle_short_version=build.version, bundle_version=build.
build_number, uploaded_to_appstore=build.uploaded_date,
first_seen=timezone.now(), fetched=False)
build_state.save()
return build_state
def process_builds(project: Project, config: appconnect.
AppStoreConnectConfig, to_process: List[appconnect.BuildInfo]) ->List[Tuple
[appconnect.BuildInfo, AppConnectBuild]]:
"""Returns a list of builds whose dSYMs need to be updated or fetched.
This will create a new "pending" :class:`AppConnectBuild` for any :class:`appconnect.BuildInfo`
that cannot be found in the DB. These pending :class:`AppConnectBuild`s are immediately saved
upon creation.
"""
pending_builds = []
with sentry_sdk.start_span(op='appconnect-update-builds', description=
'Update AppStoreConnect builds in database'):
for build in to_process:
build_state = get_or_create_persisted_build(project, config, build)
if not build_state.fetched:
pending_builds.append((build, build_state))
LatestAppConnectBuildsCheck.objects.create_or_update(project=project,
source_id=config.id, values={'last_checked': timezone.now()})
return pending_builds
@instrumented_task(name='sentry.tasks.app_store_connect.refresh_all_builds',
queue='appstoreconnect', ignore_result=True)
def refresh_all_builds() ->None:
inner_refresh_all_builds()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@instrumented_task(name='sentry.tasks.app_store_connect.dsym_download',
queue='appstoreconnect', ignore_result=True)
def dsym_download(project_id: int, config_id: str) ->None:
inner_dsym_download(project_id=project_id, config_id=config_id)
def inner_dsym_download(project_id: int, config_id: str) ->None:
"""Downloads the dSYMs from App Store Connect and stores them in the Project's debug files."""
with sdk.configure_scope() as scope:
scope.set_tag('project', project_id)
scope.set_tag('config_id', config_id)
project = Project.objects.get(pk=project_id)
config = appconnect.AppStoreConnectConfig.from_project_config(project,
config_id)
client = appconnect.AppConnectClient.from_config(config)
listed_builds = client.list_builds()
builds = process_builds(project=project, config=config, to_process=
listed_builds)
if not builds:
return
for i, (build, build_state) in enumerate(builds):
with sdk.configure_scope() as scope:
scope.set_context('dsym_downloads', {'total': len(builds),
'completed': i})
with tempfile.NamedTemporaryFile() as dsyms_zip:
try:
client.download_dsyms(build, pathlib.Path(dsyms_zip.name))
except appconnect.NoDsymsError:
logger.debug('No dSYMs for build %s', build)
except appconnect.PendingDsymsError:
logger.debug('dSYM url currently unavailable for build %s',
build)
continue
except appstoreconnect_api.UnauthorizedError:
sentry_sdk.capture_message(
'Not authorized to download dSYM using current App Store Connect credentials'
, level='info')
return
except appstoreconnect_api.ForbiddenError:
sentry_sdk.capture_message(
'Forbidden from downloading dSYM using current App Store Connect credentials'
, level='info')
return
except ValueError as e:
sdk.capture_exception(e)
continue
except appstoreconnect_api.RequestError as e:
sdk.capture_exception(e)
continue
except requests.RequestException as e:
sdk.capture_exception(e)
continue
else:
create_difs_from_dsyms_zip(dsyms_zip.name, project)
logger.debug('Uploaded dSYMs for build %s', build)
metrics.incr('tasks.app_store_connect.builds_ingested',
sample_rate=1)
build_state.fetched = True
build_state.save()
def create_difs_from_dsyms_zip(dsyms_zip: str, project: Project) ->None:
with sentry_sdk.start_span(op='dsym-difs', description=
'Extract difs dSYM zip'):
with open(dsyms_zip, 'rb') as fp:
created = debugfile.create_files_from_dif_zip(fp, project,
accept_unknown=True)
for proj_debug_file in created:
logger.debug('Created %r for project %s', proj_debug_file,
project.id)
def get_or_create_persisted_build(project: Project, config: appconnect.
AppStoreConnectConfig, build: appconnect.BuildInfo) ->AppConnectBuild:
"""Fetches the sentry-internal :class:`AppConnectBuild`.
The build corresponds to the :class:`appconnect.BuildInfo` as returned by the
AppStore Connect API. If no build exists yet, a new "pending" build is created.
"""
try:
build_state = AppConnectBuild.objects.get(project=project, app_id=
build.app_id, platform=build.platform, bundle_short_version=
build.version, bundle_version=build.build_number)
except AppConnectBuild.DoesNotExist:
build_state = AppConnectBuild(project=project, app_id=build.app_id,
bundle_id=config.bundleId, platform=build.platform,
bundle_short_version=build.version, bundle_version=build.
build_number, uploaded_to_appstore=build.uploaded_date,
first_seen=timezone.now(), fetched=False)
build_state.save()
return build_state
def process_builds(project: Project, config: appconnect.
AppStoreConnectConfig, to_process: List[appconnect.BuildInfo]) ->List[Tuple
[appconnect.BuildInfo, AppConnectBuild]]:
"""Returns a list of builds whose dSYMs need to be updated or fetched.
This will create a new "pending" :class:`AppConnectBuild` for any :class:`appconnect.BuildInfo`
that cannot be found in the DB. These pending :class:`AppConnectBuild`s are immediately saved
upon creation.
"""
pending_builds = []
with sentry_sdk.start_span(op='appconnect-update-builds', description=
'Update AppStoreConnect builds in database'):
for build in to_process:
build_state = get_or_create_persisted_build(project, config, build)
if not build_state.fetched:
pending_builds.append((build, build_state))
LatestAppConnectBuildsCheck.objects.create_or_update(project=project,
source_id=config.id, values={'last_checked': timezone.now()})
return pending_builds
@instrumented_task(name='sentry.tasks.app_store_connect.refresh_all_builds',
queue='appstoreconnect', ignore_result=True)
def refresh_all_builds() ->None:
inner_refresh_all_builds()
def inner_refresh_all_builds() ->None:
"""Refreshes all AppStoreConnect builds for all projects.
This iterates over all the projects configured in Sentry and for any which has an
AppStoreConnect symbol source configured will poll the AppStoreConnect API to check if
there are new builds.
"""
options = ProjectOption.objects.filter(key=appconnect.
SYMBOL_SOURCES_PROP_NAME)
count = 0
for option in options:
with sdk.push_scope() as scope:
scope.set_tag('project', option.project_id)
try:
if not option.value:
continue
all_sources: List[Mapping[str, str]] = json.loads(option.value)
for source in all_sources:
try:
source_id = source['id']
source_type = source['type']
except KeyError:
logger.exception('Malformed symbol source')
continue
if source_type == appconnect.SYMBOL_SOURCE_TYPE_NAME:
dsym_download.apply_async(kwargs={'project_id':
option.project_id, 'config_id': source_id})
count += 1
except Exception:
logger.exception('Failed to refresh AppStoreConnect builds')
metrics.gauge('tasks.app_store_connect.refreshed', count, sample_rate=1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import logging
import pathlib
import tempfile
from typing import List, Mapping, Tuple
import requests
import sentry_sdk
from django.utils import timezone
from sentry.lang.native import appconnect
from sentry.models import AppConnectBuild, LatestAppConnectBuildsCheck, Project, ProjectOption, debugfile
from sentry.tasks.base import instrumented_task
from sentry.utils import json, metrics, sdk
from sentry.utils.appleconnect import appstore_connect as appstoreconnect_api
logger = logging.getLogger(__name__)
@instrumented_task(name='sentry.tasks.app_store_connect.dsym_download',
queue='appstoreconnect', ignore_result=True)
def dsym_download(project_id: int, config_id: str) ->None:
inner_dsym_download(project_id=project_id, config_id=config_id)
def inner_dsym_download(project_id: int, config_id: str) ->None:
"""Downloads the dSYMs from App Store Connect and stores them in the Project's debug files."""
with sdk.configure_scope() as scope:
scope.set_tag('project', project_id)
scope.set_tag('config_id', config_id)
project = Project.objects.get(pk=project_id)
config = appconnect.AppStoreConnectConfig.from_project_config(project,
config_id)
client = appconnect.AppConnectClient.from_config(config)
listed_builds = client.list_builds()
builds = process_builds(project=project, config=config, to_process=
listed_builds)
if not builds:
return
for i, (build, build_state) in enumerate(builds):
with sdk.configure_scope() as scope:
scope.set_context('dsym_downloads', {'total': len(builds),
'completed': i})
with tempfile.NamedTemporaryFile() as dsyms_zip:
try:
client.download_dsyms(build, pathlib.Path(dsyms_zip.name))
except appconnect.NoDsymsError:
logger.debug('No dSYMs for build %s', build)
except appconnect.PendingDsymsError:
logger.debug('dSYM url currently unavailable for build %s',
build)
continue
except appstoreconnect_api.UnauthorizedError:
sentry_sdk.capture_message(
'Not authorized to download dSYM using current App Store Connect credentials'
, level='info')
return
except appstoreconnect_api.ForbiddenError:
sentry_sdk.capture_message(
'Forbidden from downloading dSYM using current App Store Connect credentials'
, level='info')
return
except ValueError as e:
sdk.capture_exception(e)
continue
except appstoreconnect_api.RequestError as e:
sdk.capture_exception(e)
continue
except requests.RequestException as e:
sdk.capture_exception(e)
continue
else:
create_difs_from_dsyms_zip(dsyms_zip.name, project)
logger.debug('Uploaded dSYMs for build %s', build)
metrics.incr('tasks.app_store_connect.builds_ingested',
sample_rate=1)
build_state.fetched = True
build_state.save()
def create_difs_from_dsyms_zip(dsyms_zip: str, project: Project) ->None:
with sentry_sdk.start_span(op='dsym-difs', description=
'Extract difs dSYM zip'):
with open(dsyms_zip, 'rb') as fp:
created = debugfile.create_files_from_dif_zip(fp, project,
accept_unknown=True)
for proj_debug_file in created:
logger.debug('Created %r for project %s', proj_debug_file,
project.id)
def get_or_create_persisted_build(project: Project, config: appconnect.
AppStoreConnectConfig, build: appconnect.BuildInfo) ->AppConnectBuild:
"""Fetches the sentry-internal :class:`AppConnectBuild`.
The build corresponds to the :class:`appconnect.BuildInfo` as returned by the
AppStore Connect API. If no build exists yet, a new "pending" build is created.
"""
try:
build_state = AppConnectBuild.objects.get(project=project, app_id=
build.app_id, platform=build.platform, bundle_short_version=
build.version, bundle_version=build.build_number)
except AppConnectBuild.DoesNotExist:
build_state = AppConnectBuild(project=project, app_id=build.app_id,
bundle_id=config.bundleId, platform=build.platform,
bundle_short_version=build.version, bundle_version=build.
build_number, uploaded_to_appstore=build.uploaded_date,
first_seen=timezone.now(), fetched=False)
build_state.save()
return build_state
def process_builds(project: Project, config: appconnect.
AppStoreConnectConfig, to_process: List[appconnect.BuildInfo]) ->List[Tuple
[appconnect.BuildInfo, AppConnectBuild]]:
"""Returns a list of builds whose dSYMs need to be updated or fetched.
This will create a new "pending" :class:`AppConnectBuild` for any :class:`appconnect.BuildInfo`
that cannot be found in the DB. These pending :class:`AppConnectBuild`s are immediately saved
upon creation.
"""
pending_builds = []
with sentry_sdk.start_span(op='appconnect-update-builds', description=
'Update AppStoreConnect builds in database'):
for build in to_process:
build_state = get_or_create_persisted_build(project, config, build)
if not build_state.fetched:
pending_builds.append((build, build_state))
LatestAppConnectBuildsCheck.objects.create_or_update(project=project,
source_id=config.id, values={'last_checked': timezone.now()})
return pending_builds
@instrumented_task(name='sentry.tasks.app_store_connect.refresh_all_builds',
queue='appstoreconnect', ignore_result=True)
def refresh_all_builds() ->None:
inner_refresh_all_builds()
def inner_refresh_all_builds() ->None:
"""Refreshes all AppStoreConnect builds for all projects.
This iterates over all the projects configured in Sentry and for any which has an
AppStoreConnect symbol source configured will poll the AppStoreConnect API to check if
there are new builds.
"""
options = ProjectOption.objects.filter(key=appconnect.
SYMBOL_SOURCES_PROP_NAME)
count = 0
for option in options:
with sdk.push_scope() as scope:
scope.set_tag('project', option.project_id)
try:
if not option.value:
continue
all_sources: List[Mapping[str, str]] = json.loads(option.value)
for source in all_sources:
try:
source_id = source['id']
source_type = source['type']
except KeyError:
logger.exception('Malformed symbol source')
continue
if source_type == appconnect.SYMBOL_SOURCE_TYPE_NAME:
dsym_download.apply_async(kwargs={'project_id':
option.project_id, 'config_id': source_id})
count += 1
except Exception:
logger.exception('Failed to refresh AppStoreConnect builds')
metrics.gauge('tasks.app_store_connect.refreshed', count, sample_rate=1)
<|reserved_special_token_1|>
"""Tasks for managing Debug Information Files from Apple App Store Connect.
Users can instruct Sentry to download dSYM from App Store Connect and put them into Sentry's
debug files. These tasks enable this functionality.
"""
import logging
import pathlib
import tempfile
from typing import List, Mapping, Tuple
import requests
import sentry_sdk
from django.utils import timezone
from sentry.lang.native import appconnect
from sentry.models import (
AppConnectBuild,
LatestAppConnectBuildsCheck,
Project,
ProjectOption,
debugfile,
)
from sentry.tasks.base import instrumented_task
from sentry.utils import json, metrics, sdk
from sentry.utils.appleconnect import appstore_connect as appstoreconnect_api
logger = logging.getLogger(__name__)
# Sadly this decorator makes this entire function untyped for now as it does not itself have
# typing annotations. So we do all the work outside of the decorated task function to work
# around this.
# Since all these args must be pickled we keep them to built-in types as well.
@instrumented_task(name="sentry.tasks.app_store_connect.dsym_download", queue="appstoreconnect", ignore_result=True) # type: ignore
def dsym_download(project_id: int, config_id: str) -> None:
inner_dsym_download(project_id=project_id, config_id=config_id)
def inner_dsym_download(project_id: int, config_id: str) -> None:
"""Downloads the dSYMs from App Store Connect and stores them in the Project's debug files."""
with sdk.configure_scope() as scope:
scope.set_tag("project", project_id)
scope.set_tag("config_id", config_id)
project = Project.objects.get(pk=project_id)
config = appconnect.AppStoreConnectConfig.from_project_config(project, config_id)
client = appconnect.AppConnectClient.from_config(config)
listed_builds = client.list_builds()
builds = process_builds(project=project, config=config, to_process=listed_builds)
if not builds:
return
for i, (build, build_state) in enumerate(builds):
with sdk.configure_scope() as scope:
scope.set_context("dsym_downloads", {"total": len(builds), "completed": i})
with tempfile.NamedTemporaryFile() as dsyms_zip:
try:
client.download_dsyms(build, pathlib.Path(dsyms_zip.name))
# For no dSYMs, let the build be marked as fetched so they're not
# repeatedly re-checked every time this task is run.
except appconnect.NoDsymsError:
logger.debug("No dSYMs for build %s", build)
# Moves on to the next build so we don't check off fetched. This url will
# eventuallyTM be populated, so revisit it at a later time.
except appconnect.PendingDsymsError:
logger.debug("dSYM url currently unavailable for build %s", build)
continue
# early-return in unauthorized and forbidden to avoid trying all the other builds
# as well, since an expired token will error for all of them.
# the error is also swallowed unreported because this is an expected and actionable
# error.
except appstoreconnect_api.UnauthorizedError:
sentry_sdk.capture_message(
"Not authorized to download dSYM using current App Store Connect credentials",
level="info",
)
return
except appstoreconnect_api.ForbiddenError:
sentry_sdk.capture_message(
"Forbidden from downloading dSYM using current App Store Connect credentials",
level="info",
)
return
# Don't let malformed URLs abort all pending downloads in case it's an isolated instance
except ValueError as e:
sdk.capture_exception(e)
continue
# Assume request errors are a server side issue and do not abort all the
# pending downloads.
except appstoreconnect_api.RequestError as e:
sdk.capture_exception(e)
continue
except requests.RequestException as e:
sdk.capture_exception(e)
continue
else:
create_difs_from_dsyms_zip(dsyms_zip.name, project)
logger.debug("Uploaded dSYMs for build %s", build)
metrics.incr("tasks.app_store_connect.builds_ingested", sample_rate=1)
build_state.fetched = True
build_state.save()
def create_difs_from_dsyms_zip(dsyms_zip: str, project: Project) -> None:
with sentry_sdk.start_span(op="dsym-difs", description="Extract difs dSYM zip"):
with open(dsyms_zip, "rb") as fp:
created = debugfile.create_files_from_dif_zip(fp, project, accept_unknown=True)
for proj_debug_file in created:
logger.debug("Created %r for project %s", proj_debug_file, project.id)
def get_or_create_persisted_build(
project: Project, config: appconnect.AppStoreConnectConfig, build: appconnect.BuildInfo
) -> AppConnectBuild:
"""Fetches the sentry-internal :class:`AppConnectBuild`.
The build corresponds to the :class:`appconnect.BuildInfo` as returned by the
AppStore Connect API. If no build exists yet, a new "pending" build is created.
"""
try:
build_state = AppConnectBuild.objects.get(
project=project,
app_id=build.app_id,
platform=build.platform,
bundle_short_version=build.version,
bundle_version=build.build_number,
)
except AppConnectBuild.DoesNotExist:
build_state = AppConnectBuild(
project=project,
app_id=build.app_id,
bundle_id=config.bundleId,
platform=build.platform,
bundle_short_version=build.version,
bundle_version=build.build_number,
uploaded_to_appstore=build.uploaded_date,
first_seen=timezone.now(),
fetched=False,
)
build_state.save()
return build_state
def process_builds(
project: Project,
config: appconnect.AppStoreConnectConfig,
to_process: List[appconnect.BuildInfo],
) -> List[Tuple[appconnect.BuildInfo, AppConnectBuild]]:
"""Returns a list of builds whose dSYMs need to be updated or fetched.
This will create a new "pending" :class:`AppConnectBuild` for any :class:`appconnect.BuildInfo`
that cannot be found in the DB. These pending :class:`AppConnectBuild`s are immediately saved
upon creation.
"""
pending_builds = []
with sentry_sdk.start_span(
op="appconnect-update-builds", description="Update AppStoreConnect builds in database"
):
for build in to_process:
build_state = get_or_create_persisted_build(project, config, build)
if not build_state.fetched:
pending_builds.append((build, build_state))
LatestAppConnectBuildsCheck.objects.create_or_update(
project=project, source_id=config.id, values={"last_checked": timezone.now()}
)
return pending_builds
# Untyped decorator would stop type-checking of entire function, split into an inner
# function instead which can be type checked.
@instrumented_task( # type: ignore
name="sentry.tasks.app_store_connect.refresh_all_builds",
queue="appstoreconnect",
ignore_result=True,
)
def refresh_all_builds() -> None:
inner_refresh_all_builds()
def inner_refresh_all_builds() -> None:
"""Refreshes all AppStoreConnect builds for all projects.
This iterates over all the projects configured in Sentry and for any which has an
AppStoreConnect symbol source configured will poll the AppStoreConnect API to check if
there are new builds.
"""
# We have no way to query for AppStore Connect symbol sources directly, but
# getting all of the project options that have custom symbol sources
# configured is a reasonable compromise, as the number of those should be
# low enough to traverse every hour.
# Another alternative would be to get a list of projects that have had a
# previous successful import, as indicated by existing `AppConnectBuild`
# objects. But that would miss projects that have a valid AppStore Connect
# setup, but have not yet published any kind of build to AppStore.
options = ProjectOption.objects.filter(key=appconnect.SYMBOL_SOURCES_PROP_NAME)
count = 0
for option in options:
with sdk.push_scope() as scope:
scope.set_tag("project", option.project_id)
try:
if not option.value:
# An empty string set as option value, the UI does this when deleting
# all sources. This is not valid JSON.
continue
# We are parsing JSON thus all types are Any, so give the type-checker some
# extra help. We are maybe slightly lying about the type, but the
# attributes we do access are all string values.
all_sources: List[Mapping[str, str]] = json.loads(option.value)
for source in all_sources:
try:
source_id = source["id"]
source_type = source["type"]
except KeyError:
logger.exception("Malformed symbol source")
continue
if source_type == appconnect.SYMBOL_SOURCE_TYPE_NAME:
dsym_download.apply_async(
kwargs={
"project_id": option.project_id,
"config_id": source_id,
}
)
count += 1
except Exception:
logger.exception("Failed to refresh AppStoreConnect builds")
metrics.gauge("tasks.app_store_connect.refreshed", count, sample_rate=1)
|
flexible
|
{
"blob_id": "51bc2668a9f9f4425166f9e6da72b7a1c37baa01",
"index": 9628,
"step-1": "<mask token>\n\n\ndef inner_dsym_download(project_id: int, config_id: str) ->None:\n \"\"\"Downloads the dSYMs from App Store Connect and stores them in the Project's debug files.\"\"\"\n with sdk.configure_scope() as scope:\n scope.set_tag('project', project_id)\n scope.set_tag('config_id', config_id)\n project = Project.objects.get(pk=project_id)\n config = appconnect.AppStoreConnectConfig.from_project_config(project,\n config_id)\n client = appconnect.AppConnectClient.from_config(config)\n listed_builds = client.list_builds()\n builds = process_builds(project=project, config=config, to_process=\n listed_builds)\n if not builds:\n return\n for i, (build, build_state) in enumerate(builds):\n with sdk.configure_scope() as scope:\n scope.set_context('dsym_downloads', {'total': len(builds),\n 'completed': i})\n with tempfile.NamedTemporaryFile() as dsyms_zip:\n try:\n client.download_dsyms(build, pathlib.Path(dsyms_zip.name))\n except appconnect.NoDsymsError:\n logger.debug('No dSYMs for build %s', build)\n except appconnect.PendingDsymsError:\n logger.debug('dSYM url currently unavailable for build %s',\n build)\n continue\n except appstoreconnect_api.UnauthorizedError:\n sentry_sdk.capture_message(\n 'Not authorized to download dSYM using current App Store Connect credentials'\n , level='info')\n return\n except appstoreconnect_api.ForbiddenError:\n sentry_sdk.capture_message(\n 'Forbidden from downloading dSYM using current App Store Connect credentials'\n , level='info')\n return\n except ValueError as e:\n sdk.capture_exception(e)\n continue\n except appstoreconnect_api.RequestError as e:\n sdk.capture_exception(e)\n continue\n except requests.RequestException as e:\n sdk.capture_exception(e)\n continue\n else:\n create_difs_from_dsyms_zip(dsyms_zip.name, project)\n logger.debug('Uploaded dSYMs for build %s', build)\n metrics.incr('tasks.app_store_connect.builds_ingested',\n sample_rate=1)\n build_state.fetched = True\n build_state.save()\n\n\n<mask token>\n\n\ndef process_builds(project: Project, config: appconnect.\n AppStoreConnectConfig, to_process: List[appconnect.BuildInfo]) ->List[Tuple\n [appconnect.BuildInfo, AppConnectBuild]]:\n \"\"\"Returns a list of builds whose dSYMs need to be updated or fetched.\n\n This will create a new \"pending\" :class:`AppConnectBuild` for any :class:`appconnect.BuildInfo`\n that cannot be found in the DB. These pending :class:`AppConnectBuild`s are immediately saved\n upon creation.\n \"\"\"\n pending_builds = []\n with sentry_sdk.start_span(op='appconnect-update-builds', description=\n 'Update AppStoreConnect builds in database'):\n for build in to_process:\n build_state = get_or_create_persisted_build(project, config, build)\n if not build_state.fetched:\n pending_builds.append((build, build_state))\n LatestAppConnectBuildsCheck.objects.create_or_update(project=project,\n source_id=config.id, values={'last_checked': timezone.now()})\n return pending_builds\n\n\n@instrumented_task(name='sentry.tasks.app_store_connect.refresh_all_builds',\n queue='appstoreconnect', ignore_result=True)\ndef refresh_all_builds() ->None:\n inner_refresh_all_builds()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef inner_dsym_download(project_id: int, config_id: str) ->None:\n \"\"\"Downloads the dSYMs from App Store Connect and stores them in the Project's debug files.\"\"\"\n with sdk.configure_scope() as scope:\n scope.set_tag('project', project_id)\n scope.set_tag('config_id', config_id)\n project = Project.objects.get(pk=project_id)\n config = appconnect.AppStoreConnectConfig.from_project_config(project,\n config_id)\n client = appconnect.AppConnectClient.from_config(config)\n listed_builds = client.list_builds()\n builds = process_builds(project=project, config=config, to_process=\n listed_builds)\n if not builds:\n return\n for i, (build, build_state) in enumerate(builds):\n with sdk.configure_scope() as scope:\n scope.set_context('dsym_downloads', {'total': len(builds),\n 'completed': i})\n with tempfile.NamedTemporaryFile() as dsyms_zip:\n try:\n client.download_dsyms(build, pathlib.Path(dsyms_zip.name))\n except appconnect.NoDsymsError:\n logger.debug('No dSYMs for build %s', build)\n except appconnect.PendingDsymsError:\n logger.debug('dSYM url currently unavailable for build %s',\n build)\n continue\n except appstoreconnect_api.UnauthorizedError:\n sentry_sdk.capture_message(\n 'Not authorized to download dSYM using current App Store Connect credentials'\n , level='info')\n return\n except appstoreconnect_api.ForbiddenError:\n sentry_sdk.capture_message(\n 'Forbidden from downloading dSYM using current App Store Connect credentials'\n , level='info')\n return\n except ValueError as e:\n sdk.capture_exception(e)\n continue\n except appstoreconnect_api.RequestError as e:\n sdk.capture_exception(e)\n continue\n except requests.RequestException as e:\n sdk.capture_exception(e)\n continue\n else:\n create_difs_from_dsyms_zip(dsyms_zip.name, project)\n logger.debug('Uploaded dSYMs for build %s', build)\n metrics.incr('tasks.app_store_connect.builds_ingested',\n sample_rate=1)\n build_state.fetched = True\n build_state.save()\n\n\ndef create_difs_from_dsyms_zip(dsyms_zip: str, project: Project) ->None:\n with sentry_sdk.start_span(op='dsym-difs', description=\n 'Extract difs dSYM zip'):\n with open(dsyms_zip, 'rb') as fp:\n created = debugfile.create_files_from_dif_zip(fp, project,\n accept_unknown=True)\n for proj_debug_file in created:\n logger.debug('Created %r for project %s', proj_debug_file,\n project.id)\n\n\ndef get_or_create_persisted_build(project: Project, config: appconnect.\n AppStoreConnectConfig, build: appconnect.BuildInfo) ->AppConnectBuild:\n \"\"\"Fetches the sentry-internal :class:`AppConnectBuild`.\n\n The build corresponds to the :class:`appconnect.BuildInfo` as returned by the\n AppStore Connect API. If no build exists yet, a new \"pending\" build is created.\n \"\"\"\n try:\n build_state = AppConnectBuild.objects.get(project=project, app_id=\n build.app_id, platform=build.platform, bundle_short_version=\n build.version, bundle_version=build.build_number)\n except AppConnectBuild.DoesNotExist:\n build_state = AppConnectBuild(project=project, app_id=build.app_id,\n bundle_id=config.bundleId, platform=build.platform,\n bundle_short_version=build.version, bundle_version=build.\n build_number, uploaded_to_appstore=build.uploaded_date,\n first_seen=timezone.now(), fetched=False)\n build_state.save()\n return build_state\n\n\ndef process_builds(project: Project, config: appconnect.\n AppStoreConnectConfig, to_process: List[appconnect.BuildInfo]) ->List[Tuple\n [appconnect.BuildInfo, AppConnectBuild]]:\n \"\"\"Returns a list of builds whose dSYMs need to be updated or fetched.\n\n This will create a new \"pending\" :class:`AppConnectBuild` for any :class:`appconnect.BuildInfo`\n that cannot be found in the DB. These pending :class:`AppConnectBuild`s are immediately saved\n upon creation.\n \"\"\"\n pending_builds = []\n with sentry_sdk.start_span(op='appconnect-update-builds', description=\n 'Update AppStoreConnect builds in database'):\n for build in to_process:\n build_state = get_or_create_persisted_build(project, config, build)\n if not build_state.fetched:\n pending_builds.append((build, build_state))\n LatestAppConnectBuildsCheck.objects.create_or_update(project=project,\n source_id=config.id, values={'last_checked': timezone.now()})\n return pending_builds\n\n\n@instrumented_task(name='sentry.tasks.app_store_connect.refresh_all_builds',\n queue='appstoreconnect', ignore_result=True)\ndef refresh_all_builds() ->None:\n inner_refresh_all_builds()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@instrumented_task(name='sentry.tasks.app_store_connect.dsym_download',\n queue='appstoreconnect', ignore_result=True)\ndef dsym_download(project_id: int, config_id: str) ->None:\n inner_dsym_download(project_id=project_id, config_id=config_id)\n\n\ndef inner_dsym_download(project_id: int, config_id: str) ->None:\n \"\"\"Downloads the dSYMs from App Store Connect and stores them in the Project's debug files.\"\"\"\n with sdk.configure_scope() as scope:\n scope.set_tag('project', project_id)\n scope.set_tag('config_id', config_id)\n project = Project.objects.get(pk=project_id)\n config = appconnect.AppStoreConnectConfig.from_project_config(project,\n config_id)\n client = appconnect.AppConnectClient.from_config(config)\n listed_builds = client.list_builds()\n builds = process_builds(project=project, config=config, to_process=\n listed_builds)\n if not builds:\n return\n for i, (build, build_state) in enumerate(builds):\n with sdk.configure_scope() as scope:\n scope.set_context('dsym_downloads', {'total': len(builds),\n 'completed': i})\n with tempfile.NamedTemporaryFile() as dsyms_zip:\n try:\n client.download_dsyms(build, pathlib.Path(dsyms_zip.name))\n except appconnect.NoDsymsError:\n logger.debug('No dSYMs for build %s', build)\n except appconnect.PendingDsymsError:\n logger.debug('dSYM url currently unavailable for build %s',\n build)\n continue\n except appstoreconnect_api.UnauthorizedError:\n sentry_sdk.capture_message(\n 'Not authorized to download dSYM using current App Store Connect credentials'\n , level='info')\n return\n except appstoreconnect_api.ForbiddenError:\n sentry_sdk.capture_message(\n 'Forbidden from downloading dSYM using current App Store Connect credentials'\n , level='info')\n return\n except ValueError as e:\n sdk.capture_exception(e)\n continue\n except appstoreconnect_api.RequestError as e:\n sdk.capture_exception(e)\n continue\n except requests.RequestException as e:\n sdk.capture_exception(e)\n continue\n else:\n create_difs_from_dsyms_zip(dsyms_zip.name, project)\n logger.debug('Uploaded dSYMs for build %s', build)\n metrics.incr('tasks.app_store_connect.builds_ingested',\n sample_rate=1)\n build_state.fetched = True\n build_state.save()\n\n\ndef create_difs_from_dsyms_zip(dsyms_zip: str, project: Project) ->None:\n with sentry_sdk.start_span(op='dsym-difs', description=\n 'Extract difs dSYM zip'):\n with open(dsyms_zip, 'rb') as fp:\n created = debugfile.create_files_from_dif_zip(fp, project,\n accept_unknown=True)\n for proj_debug_file in created:\n logger.debug('Created %r for project %s', proj_debug_file,\n project.id)\n\n\ndef get_or_create_persisted_build(project: Project, config: appconnect.\n AppStoreConnectConfig, build: appconnect.BuildInfo) ->AppConnectBuild:\n \"\"\"Fetches the sentry-internal :class:`AppConnectBuild`.\n\n The build corresponds to the :class:`appconnect.BuildInfo` as returned by the\n AppStore Connect API. If no build exists yet, a new \"pending\" build is created.\n \"\"\"\n try:\n build_state = AppConnectBuild.objects.get(project=project, app_id=\n build.app_id, platform=build.platform, bundle_short_version=\n build.version, bundle_version=build.build_number)\n except AppConnectBuild.DoesNotExist:\n build_state = AppConnectBuild(project=project, app_id=build.app_id,\n bundle_id=config.bundleId, platform=build.platform,\n bundle_short_version=build.version, bundle_version=build.\n build_number, uploaded_to_appstore=build.uploaded_date,\n first_seen=timezone.now(), fetched=False)\n build_state.save()\n return build_state\n\n\ndef process_builds(project: Project, config: appconnect.\n AppStoreConnectConfig, to_process: List[appconnect.BuildInfo]) ->List[Tuple\n [appconnect.BuildInfo, AppConnectBuild]]:\n \"\"\"Returns a list of builds whose dSYMs need to be updated or fetched.\n\n This will create a new \"pending\" :class:`AppConnectBuild` for any :class:`appconnect.BuildInfo`\n that cannot be found in the DB. These pending :class:`AppConnectBuild`s are immediately saved\n upon creation.\n \"\"\"\n pending_builds = []\n with sentry_sdk.start_span(op='appconnect-update-builds', description=\n 'Update AppStoreConnect builds in database'):\n for build in to_process:\n build_state = get_or_create_persisted_build(project, config, build)\n if not build_state.fetched:\n pending_builds.append((build, build_state))\n LatestAppConnectBuildsCheck.objects.create_or_update(project=project,\n source_id=config.id, values={'last_checked': timezone.now()})\n return pending_builds\n\n\n@instrumented_task(name='sentry.tasks.app_store_connect.refresh_all_builds',\n queue='appstoreconnect', ignore_result=True)\ndef refresh_all_builds() ->None:\n inner_refresh_all_builds()\n\n\ndef inner_refresh_all_builds() ->None:\n \"\"\"Refreshes all AppStoreConnect builds for all projects.\n\n This iterates over all the projects configured in Sentry and for any which has an\n AppStoreConnect symbol source configured will poll the AppStoreConnect API to check if\n there are new builds.\n \"\"\"\n options = ProjectOption.objects.filter(key=appconnect.\n SYMBOL_SOURCES_PROP_NAME)\n count = 0\n for option in options:\n with sdk.push_scope() as scope:\n scope.set_tag('project', option.project_id)\n try:\n if not option.value:\n continue\n all_sources: List[Mapping[str, str]] = json.loads(option.value)\n for source in all_sources:\n try:\n source_id = source['id']\n source_type = source['type']\n except KeyError:\n logger.exception('Malformed symbol source')\n continue\n if source_type == appconnect.SYMBOL_SOURCE_TYPE_NAME:\n dsym_download.apply_async(kwargs={'project_id':\n option.project_id, 'config_id': source_id})\n count += 1\n except Exception:\n logger.exception('Failed to refresh AppStoreConnect builds')\n metrics.gauge('tasks.app_store_connect.refreshed', count, sample_rate=1)\n",
"step-4": "<mask token>\nimport logging\nimport pathlib\nimport tempfile\nfrom typing import List, Mapping, Tuple\nimport requests\nimport sentry_sdk\nfrom django.utils import timezone\nfrom sentry.lang.native import appconnect\nfrom sentry.models import AppConnectBuild, LatestAppConnectBuildsCheck, Project, ProjectOption, debugfile\nfrom sentry.tasks.base import instrumented_task\nfrom sentry.utils import json, metrics, sdk\nfrom sentry.utils.appleconnect import appstore_connect as appstoreconnect_api\nlogger = logging.getLogger(__name__)\n\n\n@instrumented_task(name='sentry.tasks.app_store_connect.dsym_download',\n queue='appstoreconnect', ignore_result=True)\ndef dsym_download(project_id: int, config_id: str) ->None:\n inner_dsym_download(project_id=project_id, config_id=config_id)\n\n\ndef inner_dsym_download(project_id: int, config_id: str) ->None:\n \"\"\"Downloads the dSYMs from App Store Connect and stores them in the Project's debug files.\"\"\"\n with sdk.configure_scope() as scope:\n scope.set_tag('project', project_id)\n scope.set_tag('config_id', config_id)\n project = Project.objects.get(pk=project_id)\n config = appconnect.AppStoreConnectConfig.from_project_config(project,\n config_id)\n client = appconnect.AppConnectClient.from_config(config)\n listed_builds = client.list_builds()\n builds = process_builds(project=project, config=config, to_process=\n listed_builds)\n if not builds:\n return\n for i, (build, build_state) in enumerate(builds):\n with sdk.configure_scope() as scope:\n scope.set_context('dsym_downloads', {'total': len(builds),\n 'completed': i})\n with tempfile.NamedTemporaryFile() as dsyms_zip:\n try:\n client.download_dsyms(build, pathlib.Path(dsyms_zip.name))\n except appconnect.NoDsymsError:\n logger.debug('No dSYMs for build %s', build)\n except appconnect.PendingDsymsError:\n logger.debug('dSYM url currently unavailable for build %s',\n build)\n continue\n except appstoreconnect_api.UnauthorizedError:\n sentry_sdk.capture_message(\n 'Not authorized to download dSYM using current App Store Connect credentials'\n , level='info')\n return\n except appstoreconnect_api.ForbiddenError:\n sentry_sdk.capture_message(\n 'Forbidden from downloading dSYM using current App Store Connect credentials'\n , level='info')\n return\n except ValueError as e:\n sdk.capture_exception(e)\n continue\n except appstoreconnect_api.RequestError as e:\n sdk.capture_exception(e)\n continue\n except requests.RequestException as e:\n sdk.capture_exception(e)\n continue\n else:\n create_difs_from_dsyms_zip(dsyms_zip.name, project)\n logger.debug('Uploaded dSYMs for build %s', build)\n metrics.incr('tasks.app_store_connect.builds_ingested',\n sample_rate=1)\n build_state.fetched = True\n build_state.save()\n\n\ndef create_difs_from_dsyms_zip(dsyms_zip: str, project: Project) ->None:\n with sentry_sdk.start_span(op='dsym-difs', description=\n 'Extract difs dSYM zip'):\n with open(dsyms_zip, 'rb') as fp:\n created = debugfile.create_files_from_dif_zip(fp, project,\n accept_unknown=True)\n for proj_debug_file in created:\n logger.debug('Created %r for project %s', proj_debug_file,\n project.id)\n\n\ndef get_or_create_persisted_build(project: Project, config: appconnect.\n AppStoreConnectConfig, build: appconnect.BuildInfo) ->AppConnectBuild:\n \"\"\"Fetches the sentry-internal :class:`AppConnectBuild`.\n\n The build corresponds to the :class:`appconnect.BuildInfo` as returned by the\n AppStore Connect API. If no build exists yet, a new \"pending\" build is created.\n \"\"\"\n try:\n build_state = AppConnectBuild.objects.get(project=project, app_id=\n build.app_id, platform=build.platform, bundle_short_version=\n build.version, bundle_version=build.build_number)\n except AppConnectBuild.DoesNotExist:\n build_state = AppConnectBuild(project=project, app_id=build.app_id,\n bundle_id=config.bundleId, platform=build.platform,\n bundle_short_version=build.version, bundle_version=build.\n build_number, uploaded_to_appstore=build.uploaded_date,\n first_seen=timezone.now(), fetched=False)\n build_state.save()\n return build_state\n\n\ndef process_builds(project: Project, config: appconnect.\n AppStoreConnectConfig, to_process: List[appconnect.BuildInfo]) ->List[Tuple\n [appconnect.BuildInfo, AppConnectBuild]]:\n \"\"\"Returns a list of builds whose dSYMs need to be updated or fetched.\n\n This will create a new \"pending\" :class:`AppConnectBuild` for any :class:`appconnect.BuildInfo`\n that cannot be found in the DB. These pending :class:`AppConnectBuild`s are immediately saved\n upon creation.\n \"\"\"\n pending_builds = []\n with sentry_sdk.start_span(op='appconnect-update-builds', description=\n 'Update AppStoreConnect builds in database'):\n for build in to_process:\n build_state = get_or_create_persisted_build(project, config, build)\n if not build_state.fetched:\n pending_builds.append((build, build_state))\n LatestAppConnectBuildsCheck.objects.create_or_update(project=project,\n source_id=config.id, values={'last_checked': timezone.now()})\n return pending_builds\n\n\n@instrumented_task(name='sentry.tasks.app_store_connect.refresh_all_builds',\n queue='appstoreconnect', ignore_result=True)\ndef refresh_all_builds() ->None:\n inner_refresh_all_builds()\n\n\ndef inner_refresh_all_builds() ->None:\n \"\"\"Refreshes all AppStoreConnect builds for all projects.\n\n This iterates over all the projects configured in Sentry and for any which has an\n AppStoreConnect symbol source configured will poll the AppStoreConnect API to check if\n there are new builds.\n \"\"\"\n options = ProjectOption.objects.filter(key=appconnect.\n SYMBOL_SOURCES_PROP_NAME)\n count = 0\n for option in options:\n with sdk.push_scope() as scope:\n scope.set_tag('project', option.project_id)\n try:\n if not option.value:\n continue\n all_sources: List[Mapping[str, str]] = json.loads(option.value)\n for source in all_sources:\n try:\n source_id = source['id']\n source_type = source['type']\n except KeyError:\n logger.exception('Malformed symbol source')\n continue\n if source_type == appconnect.SYMBOL_SOURCE_TYPE_NAME:\n dsym_download.apply_async(kwargs={'project_id':\n option.project_id, 'config_id': source_id})\n count += 1\n except Exception:\n logger.exception('Failed to refresh AppStoreConnect builds')\n metrics.gauge('tasks.app_store_connect.refreshed', count, sample_rate=1)\n",
"step-5": "\"\"\"Tasks for managing Debug Information Files from Apple App Store Connect.\n\nUsers can instruct Sentry to download dSYM from App Store Connect and put them into Sentry's\ndebug files. These tasks enable this functionality.\n\"\"\"\n\nimport logging\nimport pathlib\nimport tempfile\nfrom typing import List, Mapping, Tuple\n\nimport requests\nimport sentry_sdk\nfrom django.utils import timezone\n\nfrom sentry.lang.native import appconnect\nfrom sentry.models import (\n AppConnectBuild,\n LatestAppConnectBuildsCheck,\n Project,\n ProjectOption,\n debugfile,\n)\nfrom sentry.tasks.base import instrumented_task\nfrom sentry.utils import json, metrics, sdk\nfrom sentry.utils.appleconnect import appstore_connect as appstoreconnect_api\n\nlogger = logging.getLogger(__name__)\n\n\n# Sadly this decorator makes this entire function untyped for now as it does not itself have\n# typing annotations. So we do all the work outside of the decorated task function to work\n# around this.\n# Since all these args must be pickled we keep them to built-in types as well.\n@instrumented_task(name=\"sentry.tasks.app_store_connect.dsym_download\", queue=\"appstoreconnect\", ignore_result=True) # type: ignore\ndef dsym_download(project_id: int, config_id: str) -> None:\n inner_dsym_download(project_id=project_id, config_id=config_id)\n\n\ndef inner_dsym_download(project_id: int, config_id: str) -> None:\n \"\"\"Downloads the dSYMs from App Store Connect and stores them in the Project's debug files.\"\"\"\n with sdk.configure_scope() as scope:\n scope.set_tag(\"project\", project_id)\n scope.set_tag(\"config_id\", config_id)\n\n project = Project.objects.get(pk=project_id)\n config = appconnect.AppStoreConnectConfig.from_project_config(project, config_id)\n client = appconnect.AppConnectClient.from_config(config)\n\n listed_builds = client.list_builds()\n builds = process_builds(project=project, config=config, to_process=listed_builds)\n\n if not builds:\n return\n\n for i, (build, build_state) in enumerate(builds):\n with sdk.configure_scope() as scope:\n scope.set_context(\"dsym_downloads\", {\"total\": len(builds), \"completed\": i})\n with tempfile.NamedTemporaryFile() as dsyms_zip:\n try:\n client.download_dsyms(build, pathlib.Path(dsyms_zip.name))\n # For no dSYMs, let the build be marked as fetched so they're not\n # repeatedly re-checked every time this task is run.\n except appconnect.NoDsymsError:\n logger.debug(\"No dSYMs for build %s\", build)\n # Moves on to the next build so we don't check off fetched. This url will\n # eventuallyTM be populated, so revisit it at a later time.\n except appconnect.PendingDsymsError:\n logger.debug(\"dSYM url currently unavailable for build %s\", build)\n continue\n # early-return in unauthorized and forbidden to avoid trying all the other builds\n # as well, since an expired token will error for all of them.\n # the error is also swallowed unreported because this is an expected and actionable\n # error.\n except appstoreconnect_api.UnauthorizedError:\n sentry_sdk.capture_message(\n \"Not authorized to download dSYM using current App Store Connect credentials\",\n level=\"info\",\n )\n return\n except appstoreconnect_api.ForbiddenError:\n sentry_sdk.capture_message(\n \"Forbidden from downloading dSYM using current App Store Connect credentials\",\n level=\"info\",\n )\n return\n # Don't let malformed URLs abort all pending downloads in case it's an isolated instance\n except ValueError as e:\n sdk.capture_exception(e)\n continue\n # Assume request errors are a server side issue and do not abort all the\n # pending downloads.\n except appstoreconnect_api.RequestError as e:\n sdk.capture_exception(e)\n continue\n except requests.RequestException as e:\n sdk.capture_exception(e)\n continue\n else:\n create_difs_from_dsyms_zip(dsyms_zip.name, project)\n logger.debug(\"Uploaded dSYMs for build %s\", build)\n metrics.incr(\"tasks.app_store_connect.builds_ingested\", sample_rate=1)\n\n build_state.fetched = True\n build_state.save()\n\n\ndef create_difs_from_dsyms_zip(dsyms_zip: str, project: Project) -> None:\n with sentry_sdk.start_span(op=\"dsym-difs\", description=\"Extract difs dSYM zip\"):\n with open(dsyms_zip, \"rb\") as fp:\n created = debugfile.create_files_from_dif_zip(fp, project, accept_unknown=True)\n for proj_debug_file in created:\n logger.debug(\"Created %r for project %s\", proj_debug_file, project.id)\n\n\ndef get_or_create_persisted_build(\n project: Project, config: appconnect.AppStoreConnectConfig, build: appconnect.BuildInfo\n) -> AppConnectBuild:\n \"\"\"Fetches the sentry-internal :class:`AppConnectBuild`.\n\n The build corresponds to the :class:`appconnect.BuildInfo` as returned by the\n AppStore Connect API. If no build exists yet, a new \"pending\" build is created.\n \"\"\"\n try:\n build_state = AppConnectBuild.objects.get(\n project=project,\n app_id=build.app_id,\n platform=build.platform,\n bundle_short_version=build.version,\n bundle_version=build.build_number,\n )\n except AppConnectBuild.DoesNotExist:\n build_state = AppConnectBuild(\n project=project,\n app_id=build.app_id,\n bundle_id=config.bundleId,\n platform=build.platform,\n bundle_short_version=build.version,\n bundle_version=build.build_number,\n uploaded_to_appstore=build.uploaded_date,\n first_seen=timezone.now(),\n fetched=False,\n )\n build_state.save()\n return build_state\n\n\ndef process_builds(\n project: Project,\n config: appconnect.AppStoreConnectConfig,\n to_process: List[appconnect.BuildInfo],\n) -> List[Tuple[appconnect.BuildInfo, AppConnectBuild]]:\n \"\"\"Returns a list of builds whose dSYMs need to be updated or fetched.\n\n This will create a new \"pending\" :class:`AppConnectBuild` for any :class:`appconnect.BuildInfo`\n that cannot be found in the DB. These pending :class:`AppConnectBuild`s are immediately saved\n upon creation.\n \"\"\"\n\n pending_builds = []\n\n with sentry_sdk.start_span(\n op=\"appconnect-update-builds\", description=\"Update AppStoreConnect builds in database\"\n ):\n for build in to_process:\n build_state = get_or_create_persisted_build(project, config, build)\n if not build_state.fetched:\n pending_builds.append((build, build_state))\n\n LatestAppConnectBuildsCheck.objects.create_or_update(\n project=project, source_id=config.id, values={\"last_checked\": timezone.now()}\n )\n\n return pending_builds\n\n\n# Untyped decorator would stop type-checking of entire function, split into an inner\n# function instead which can be type checked.\n@instrumented_task( # type: ignore\n name=\"sentry.tasks.app_store_connect.refresh_all_builds\",\n queue=\"appstoreconnect\",\n ignore_result=True,\n)\ndef refresh_all_builds() -> None:\n inner_refresh_all_builds()\n\n\ndef inner_refresh_all_builds() -> None:\n \"\"\"Refreshes all AppStoreConnect builds for all projects.\n\n This iterates over all the projects configured in Sentry and for any which has an\n AppStoreConnect symbol source configured will poll the AppStoreConnect API to check if\n there are new builds.\n \"\"\"\n # We have no way to query for AppStore Connect symbol sources directly, but\n # getting all of the project options that have custom symbol sources\n # configured is a reasonable compromise, as the number of those should be\n # low enough to traverse every hour.\n # Another alternative would be to get a list of projects that have had a\n # previous successful import, as indicated by existing `AppConnectBuild`\n # objects. But that would miss projects that have a valid AppStore Connect\n # setup, but have not yet published any kind of build to AppStore.\n options = ProjectOption.objects.filter(key=appconnect.SYMBOL_SOURCES_PROP_NAME)\n count = 0\n for option in options:\n with sdk.push_scope() as scope:\n scope.set_tag(\"project\", option.project_id)\n try:\n if not option.value:\n # An empty string set as option value, the UI does this when deleting\n # all sources. This is not valid JSON.\n continue\n # We are parsing JSON thus all types are Any, so give the type-checker some\n # extra help. We are maybe slightly lying about the type, but the\n # attributes we do access are all string values.\n all_sources: List[Mapping[str, str]] = json.loads(option.value)\n for source in all_sources:\n try:\n source_id = source[\"id\"]\n source_type = source[\"type\"]\n except KeyError:\n logger.exception(\"Malformed symbol source\")\n continue\n if source_type == appconnect.SYMBOL_SOURCE_TYPE_NAME:\n dsym_download.apply_async(\n kwargs={\n \"project_id\": option.project_id,\n \"config_id\": source_id,\n }\n )\n count += 1\n except Exception:\n logger.exception(\"Failed to refresh AppStoreConnect builds\")\n metrics.gauge(\"tasks.app_store_connect.refreshed\", count, sample_rate=1)\n",
"step-ids": [
3,
5,
7,
9,
10
]
}
|
[
3,
5,
7,
9,
10
] |
import numpy as np
import sys
import os
import cv2
if __name__ == "__main__":
# print(sys.argv[1])
# img = cv2.imread(sys.argv[1], 0)
# cv2.imshow('img', img)
# cv2.waitKey(0)
img = np.array([[1, 2], [1, 3], [1, 4]])
print(img.tolist())
sys.stdout.flush()
|
normal
|
{
"blob_id": "54833c19d68bb7a1817639ef761367ce75a3a46f",
"index": 9200,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n img = np.array([[1, 2], [1, 3], [1, 4]])\n print(img.tolist())\n sys.stdout.flush()\n",
"step-3": "import numpy as np\nimport sys\nimport os\nimport cv2\nif __name__ == '__main__':\n img = np.array([[1, 2], [1, 3], [1, 4]])\n print(img.tolist())\n sys.stdout.flush()\n",
"step-4": "import numpy as np\nimport sys\nimport os\nimport cv2\n\n\nif __name__ == \"__main__\":\n \n # print(sys.argv[1])\n # img = cv2.imread(sys.argv[1], 0)\n # cv2.imshow('img', img)\n # cv2.waitKey(0)\n img = np.array([[1, 2], [1, 3], [1, 4]])\n print(img.tolist())\n sys.stdout.flush()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('core', '0001_initial')]
operations = [migrations.AlterField(model_name='mascota', name=
'descripcion', field=models.CharField(max_length=200))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('core', '0001_initial')]
operations = [migrations.AlterField(model_name='mascota', name=
'descripcion', field=models.CharField(max_length=200))]
<|reserved_special_token_1|>
# Generated by Django 2.1.2 on 2018-10-19 22:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='mascota',
name='descripcion',
field=models.CharField(max_length=200),
),
]
|
flexible
|
{
"blob_id": "fcfec60a2302ee0c1385add053d4371040a2aff4",
"index": 3667,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0001_initial')]\n operations = [migrations.AlterField(model_name='mascota', name=\n 'descripcion', field=models.CharField(max_length=200))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0001_initial')]\n operations = [migrations.AlterField(model_name='mascota', name=\n 'descripcion', field=models.CharField(max_length=200))]\n",
"step-5": "# Generated by Django 2.1.2 on 2018-10-19 22:13\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='mascota',\n name='descripcion',\n field=models.CharField(max_length=200),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_input_text(expected_result, actual_result):
assert expected_result == actual_result, f'expected {expected_result}, got {actual_result}'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_input_text(expected_result, actual_result):
assert expected_result == actual_result, f'expected {expected_result}, got {actual_result}'
test_input_text(a, b)
<|reserved_special_token_1|>
a, b = input().split()
def test_input_text(expected_result, actual_result):
assert expected_result == actual_result, f'expected {expected_result}, got {actual_result}'
test_input_text(a, b)
<|reserved_special_token_1|>
a, b = input().split()
def test_input_text(expected_result, actual_result):
assert expected_result == actual_result, \
f'expected {expected_result}, got {actual_result}'
test_input_text(a,b)
|
flexible
|
{
"blob_id": "63391b31d1746f9b3583df5353ae160a430943a9",
"index": 9027,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_input_text(expected_result, actual_result):\n assert expected_result == actual_result, f'expected {expected_result}, got {actual_result}'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_input_text(expected_result, actual_result):\n assert expected_result == actual_result, f'expected {expected_result}, got {actual_result}'\n\n\ntest_input_text(a, b)\n",
"step-4": "a, b = input().split()\n\n\ndef test_input_text(expected_result, actual_result):\n assert expected_result == actual_result, f'expected {expected_result}, got {actual_result}'\n\n\ntest_input_text(a, b)\n",
"step-5": "a, b = input().split()\n\ndef test_input_text(expected_result, actual_result):\n assert expected_result == actual_result, \\\n f'expected {expected_result}, got {actual_result}'\n\ntest_input_text(a,b)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def create_graph():
with tf.gfile.FastGFile(out_pb_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print_tensors_in_checkpoint_file(checkpoint_path, tensor_name='',
all_tensors=True, all_tensor_names=True)
<|reserved_special_token_0|>
for key in var_to_shape_map:
print('tensor_name: ', key)
n += 1
print('n:', n)
<|reserved_special_token_0|>
def create_graph():
with tf.gfile.FastGFile(out_pb_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
create_graph()
<|reserved_special_token_0|>
for tensor_name in tensor_name_list:
print('pd:', tensor_name, '\n')
m += 1
print('m:', m)
<|reserved_special_token_0|>
graph_def.ParseFromString(gfile.FastGFile(model, 'rb').read())
tf.import_graph_def(graph_def, name='graph')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
checkpoint_path = '/your/path'
print_tensors_in_checkpoint_file(checkpoint_path, tensor_name='',
all_tensors=True, all_tensor_names=True)
<|reserved_special_token_0|>
checkpoint_path = '/your/path'
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
n = 0
for key in var_to_shape_map:
print('tensor_name: ', key)
n += 1
print('n:', n)
<|reserved_special_token_0|>
out_pb_path = '/your/path'
def create_graph():
with tf.gfile.FastGFile(out_pb_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
create_graph()
tensor_name_list = [tensor.name for tensor in tf.get_default_graph().
as_graph_def().node]
m = 0
for tensor_name in tensor_name_list:
print('pd:', tensor_name, '\n')
m += 1
print('m:', m)
<|reserved_special_token_0|>
model = '/your/path'
graph = tf.get_default_graph()
graph_def = graph.as_graph_def()
graph_def.ParseFromString(gfile.FastGFile(model, 'rb').read())
tf.import_graph_def(graph_def, name='graph')
summaryWriter = tf.summary.FileWriter('log/', graph)
<|reserved_special_token_1|>
from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
checkpoint_path = '/your/path'
print_tensors_in_checkpoint_file(checkpoint_path, tensor_name='',
all_tensors=True, all_tensor_names=True)
from tensorflow.python import pywrap_tensorflow
checkpoint_path = '/your/path'
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
n = 0
for key in var_to_shape_map:
print('tensor_name: ', key)
n += 1
print('n:', n)
import tensorflow as tf
import os
out_pb_path = '/your/path'
def create_graph():
with tf.gfile.FastGFile(out_pb_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
create_graph()
tensor_name_list = [tensor.name for tensor in tf.get_default_graph().
as_graph_def().node]
m = 0
for tensor_name in tensor_name_list:
print('pd:', tensor_name, '\n')
m += 1
print('m:', m)
import tensorflow as tf
from tensorflow.python.platform import gfile
model = '/your/path'
graph = tf.get_default_graph()
graph_def = graph.as_graph_def()
graph_def.ParseFromString(gfile.FastGFile(model, 'rb').read())
tf.import_graph_def(graph_def, name='graph')
summaryWriter = tf.summary.FileWriter('log/', graph)
<|reserved_special_token_1|>
#打印ckpt或pb模型的tensor
# ckpt模型
#第一种方法:
from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
checkpoint_path="/your/path"
print_tensors_in_checkpoint_file(checkpoint_path,tensor_name='', all_tensors=True, all_tensor_names=True)
#第二种方法:
from tensorflow.python import pywrap_tensorflow
checkpoint_path = "/your/path"
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
n=0
for key in var_to_shape_map:
print("tensor_name: ", key)
#print("****",reader.get_tensor(key))
n+=1
print("n:",n)
#pb模型
#打印tensor
import tensorflow as tf
import os
out_pb_path="/your/path"
def create_graph():
with tf.gfile.FastGFile(out_pb_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
create_graph()
tensor_name_list = [tensor.name for tensor in tf.get_default_graph().as_graph_def().node]
m=0
for tensor_name in tensor_name_list:
print("pd:",tensor_name,'\n')
m+=1
print("m:",m)
#获得pb模型的图
import tensorflow as tf
from tensorflow.python.platform import gfile
model = "/your/path"
graph = tf.get_default_graph()
graph_def = graph.as_graph_def()
graph_def.ParseFromString(gfile.FastGFile(model, 'rb').read())
tf.import_graph_def(graph_def, name='graph')
summaryWriter = tf.summary.FileWriter('log/', graph)
#命令tensorboard --logdir=/opt/data/hyh/tboard/tusimple_lanenet/vgg
|
flexible
|
{
"blob_id": "50fab726b90f65a82c1206a8c7df955a8b76da99",
"index": 1572,
"step-1": "<mask token>\n\n\ndef create_graph():\n with tf.gfile.FastGFile(out_pb_path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint_tensors_in_checkpoint_file(checkpoint_path, tensor_name='',\n all_tensors=True, all_tensor_names=True)\n<mask token>\nfor key in var_to_shape_map:\n print('tensor_name: ', key)\n n += 1\nprint('n:', n)\n<mask token>\n\n\ndef create_graph():\n with tf.gfile.FastGFile(out_pb_path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')\n\n\ncreate_graph()\n<mask token>\nfor tensor_name in tensor_name_list:\n print('pd:', tensor_name, '\\n')\n m += 1\nprint('m:', m)\n<mask token>\ngraph_def.ParseFromString(gfile.FastGFile(model, 'rb').read())\ntf.import_graph_def(graph_def, name='graph')\n<mask token>\n",
"step-3": "<mask token>\ncheckpoint_path = '/your/path'\nprint_tensors_in_checkpoint_file(checkpoint_path, tensor_name='',\n all_tensors=True, all_tensor_names=True)\n<mask token>\ncheckpoint_path = '/your/path'\nreader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)\nvar_to_shape_map = reader.get_variable_to_shape_map()\nn = 0\nfor key in var_to_shape_map:\n print('tensor_name: ', key)\n n += 1\nprint('n:', n)\n<mask token>\nout_pb_path = '/your/path'\n\n\ndef create_graph():\n with tf.gfile.FastGFile(out_pb_path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')\n\n\ncreate_graph()\ntensor_name_list = [tensor.name for tensor in tf.get_default_graph().\n as_graph_def().node]\nm = 0\nfor tensor_name in tensor_name_list:\n print('pd:', tensor_name, '\\n')\n m += 1\nprint('m:', m)\n<mask token>\nmodel = '/your/path'\ngraph = tf.get_default_graph()\ngraph_def = graph.as_graph_def()\ngraph_def.ParseFromString(gfile.FastGFile(model, 'rb').read())\ntf.import_graph_def(graph_def, name='graph')\nsummaryWriter = tf.summary.FileWriter('log/', graph)\n",
"step-4": "from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file\ncheckpoint_path = '/your/path'\nprint_tensors_in_checkpoint_file(checkpoint_path, tensor_name='',\n all_tensors=True, all_tensor_names=True)\nfrom tensorflow.python import pywrap_tensorflow\ncheckpoint_path = '/your/path'\nreader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)\nvar_to_shape_map = reader.get_variable_to_shape_map()\nn = 0\nfor key in var_to_shape_map:\n print('tensor_name: ', key)\n n += 1\nprint('n:', n)\nimport tensorflow as tf\nimport os\nout_pb_path = '/your/path'\n\n\ndef create_graph():\n with tf.gfile.FastGFile(out_pb_path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')\n\n\ncreate_graph()\ntensor_name_list = [tensor.name for tensor in tf.get_default_graph().\n as_graph_def().node]\nm = 0\nfor tensor_name in tensor_name_list:\n print('pd:', tensor_name, '\\n')\n m += 1\nprint('m:', m)\nimport tensorflow as tf\nfrom tensorflow.python.platform import gfile\nmodel = '/your/path'\ngraph = tf.get_default_graph()\ngraph_def = graph.as_graph_def()\ngraph_def.ParseFromString(gfile.FastGFile(model, 'rb').read())\ntf.import_graph_def(graph_def, name='graph')\nsummaryWriter = tf.summary.FileWriter('log/', graph)\n",
"step-5": "#打印ckpt或pb模型的tensor\r\n\r\n# ckpt模型 \r\n#第一种方法: \r\nfrom tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file \r\ncheckpoint_path=\"/your/path\"\r\nprint_tensors_in_checkpoint_file(checkpoint_path,tensor_name='', all_tensors=True, all_tensor_names=True)\r\n\r\n#第二种方法:\r\nfrom tensorflow.python import pywrap_tensorflow\r\ncheckpoint_path = \"/your/path\"\r\nreader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)\r\nvar_to_shape_map = reader.get_variable_to_shape_map()\r\nn=0\r\nfor key in var_to_shape_map:\r\n print(\"tensor_name: \", key)\r\n #print(\"****\",reader.get_tensor(key))\r\n n+=1\r\nprint(\"n:\",n)\r\n\r\n#pb模型\r\n#打印tensor\r\nimport tensorflow as tf\r\nimport os\r\nout_pb_path=\"/your/path\"\r\ndef create_graph():\r\n with tf.gfile.FastGFile(out_pb_path, 'rb') as f:\r\n graph_def = tf.GraphDef()\r\n graph_def.ParseFromString(f.read())\r\n tf.import_graph_def(graph_def, name='')\r\n \r\ncreate_graph()\r\ntensor_name_list = [tensor.name for tensor in tf.get_default_graph().as_graph_def().node]\r\nm=0\r\nfor tensor_name in tensor_name_list:\r\n print(\"pd:\",tensor_name,'\\n')\r\n m+=1\r\nprint(\"m:\",m)\r\n\r\n#获得pb模型的图\r\nimport tensorflow as tf\r\nfrom tensorflow.python.platform import gfile\r\n\r\nmodel = \"/your/path\"\r\ngraph = tf.get_default_graph()\r\ngraph_def = graph.as_graph_def()\r\ngraph_def.ParseFromString(gfile.FastGFile(model, 'rb').read())\r\ntf.import_graph_def(graph_def, name='graph')\r\nsummaryWriter = tf.summary.FileWriter('log/', graph)\r\n\r\n#命令tensorboard --logdir=/opt/data/hyh/tboard/tusimple_lanenet/vgg\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class LetterPage(Page):
def __init__(self, page_num, n):
super(LetterPage, self).__init__(page_num)
self.title = 'Letters'
self.in_index = False
self.n = n
self.tagline = (
'Email klbscroggsbot@gmail.com and your letter will appear here')
self.letters = ''
def background(self):
self.letters = f_read('emails')
if config.NAME == 'KLBFAX' and self.n == 1 and config.has_gmail_login(
):
import gmail
details = f_readlines('gmail')
g = gmail.login(details[0], details[1])
unread = g.inbox().mail(unread=True)
for mail in unread:
mail.fetch()
lines = ''.join(mail.body.split('\r')).split('\n')
if lines[0
] == 'EVENT' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:
try:
with open_local('events', 'a') as f:
for line in lines:
if line != 'EVENT':
f.write(line + '\n')
mail.read()
except:
pass
elif lines[0
] == 'CARD' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:
with open('/home/pi/cards/' + lines[1], 'w') as f:
f.write('\n'.join(lines[2:]))
mail.read()
elif 'POINTS' in lines[0].upper(
) and 'belgin.seymenoglu.10@ucl.ac.uk' in mail.fr:
from points import add_points
length = 1
points_to_give = 0
while length <= len(lines[2]):
try:
if lines[2][:length] != '-':
points_to_give = int(lines[2][:length])
length += 1
except:
break
add_points(lines[1].split('=')[0], points_to_give)
mail.read()
else:
newletter = ''
for line in lines:
if line != '':
while len(line) > 79:
newletter += line[:79] + '\n'
line = line[79:]
newletter += line + '\n'
self.letters = (newletter + '\n' + 'from ' + mail.fr +
'\n\n' + self.letters)
mail.read()
self.letters = self.letters.split('\n')
if len(self.letters) > 1000:
self.letters = self.letters[:1000]
with open_local('emails', 'w') as f:
f.write('\n'.join(self.letters))
else:
self.letters = self.letters.split('\n')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LetterPage(Page):
def __init__(self, page_num, n):
super(LetterPage, self).__init__(page_num)
self.title = 'Letters'
self.in_index = False
self.n = n
self.tagline = (
'Email klbscroggsbot@gmail.com and your letter will appear here')
self.letters = ''
def background(self):
self.letters = f_read('emails')
if config.NAME == 'KLBFAX' and self.n == 1 and config.has_gmail_login(
):
import gmail
details = f_readlines('gmail')
g = gmail.login(details[0], details[1])
unread = g.inbox().mail(unread=True)
for mail in unread:
mail.fetch()
lines = ''.join(mail.body.split('\r')).split('\n')
if lines[0
] == 'EVENT' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:
try:
with open_local('events', 'a') as f:
for line in lines:
if line != 'EVENT':
f.write(line + '\n')
mail.read()
except:
pass
elif lines[0
] == 'CARD' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:
with open('/home/pi/cards/' + lines[1], 'w') as f:
f.write('\n'.join(lines[2:]))
mail.read()
elif 'POINTS' in lines[0].upper(
) and 'belgin.seymenoglu.10@ucl.ac.uk' in mail.fr:
from points import add_points
length = 1
points_to_give = 0
while length <= len(lines[2]):
try:
if lines[2][:length] != '-':
points_to_give = int(lines[2][:length])
length += 1
except:
break
add_points(lines[1].split('=')[0], points_to_give)
mail.read()
else:
newletter = ''
for line in lines:
if line != '':
while len(line) > 79:
newletter += line[:79] + '\n'
line = line[79:]
newletter += line + '\n'
self.letters = (newletter + '\n' + 'from ' + mail.fr +
'\n\n' + self.letters)
mail.read()
self.letters = self.letters.split('\n')
if len(self.letters) > 1000:
self.letters = self.letters[:1000]
with open_local('emails', 'w') as f:
f.write('\n'.join(self.letters))
else:
self.letters = self.letters.split('\n')
def generate_content(self):
letters = self.letters[20 * (self.n - 1):20 * self.n]
letters = '\n'.join(letters)
try:
letters = unicode(letters, 'latin1')
except:
letters = str(letters)
self.add_title('Have your say', font='size4')
a = str(self.n) + '/21'
self.move_cursor(x=90 - len(a))
self.add_text(a, fg='BLUE', bg='YELLOW')
self.move_cursor(x=0)
self.start_random_bg_color()
for line in letters.split('\n'):
line = line.rstrip('\n')
if line == '':
self.end_bg_color()
self.start_random_bg_color()
self.add_text(line, fg='BLACK')
self.add_newline()
self.end_bg_color()
if self.n == 21:
self.add_text('~ END OF LETTERS ~')
else:
self.add_text('The letters continue on page ' + str(200 + self.n))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LetterPage(Page):
def __init__(self, page_num, n):
super(LetterPage, self).__init__(page_num)
self.title = 'Letters'
self.in_index = False
self.n = n
self.tagline = (
'Email klbscroggsbot@gmail.com and your letter will appear here')
self.letters = ''
def background(self):
self.letters = f_read('emails')
if config.NAME == 'KLBFAX' and self.n == 1 and config.has_gmail_login(
):
import gmail
details = f_readlines('gmail')
g = gmail.login(details[0], details[1])
unread = g.inbox().mail(unread=True)
for mail in unread:
mail.fetch()
lines = ''.join(mail.body.split('\r')).split('\n')
if lines[0
] == 'EVENT' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:
try:
with open_local('events', 'a') as f:
for line in lines:
if line != 'EVENT':
f.write(line + '\n')
mail.read()
except:
pass
elif lines[0
] == 'CARD' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:
with open('/home/pi/cards/' + lines[1], 'w') as f:
f.write('\n'.join(lines[2:]))
mail.read()
elif 'POINTS' in lines[0].upper(
) and 'belgin.seymenoglu.10@ucl.ac.uk' in mail.fr:
from points import add_points
length = 1
points_to_give = 0
while length <= len(lines[2]):
try:
if lines[2][:length] != '-':
points_to_give = int(lines[2][:length])
length += 1
except:
break
add_points(lines[1].split('=')[0], points_to_give)
mail.read()
else:
newletter = ''
for line in lines:
if line != '':
while len(line) > 79:
newletter += line[:79] + '\n'
line = line[79:]
newletter += line + '\n'
self.letters = (newletter + '\n' + 'from ' + mail.fr +
'\n\n' + self.letters)
mail.read()
self.letters = self.letters.split('\n')
if len(self.letters) > 1000:
self.letters = self.letters[:1000]
with open_local('emails', 'w') as f:
f.write('\n'.join(self.letters))
else:
self.letters = self.letters.split('\n')
def generate_content(self):
letters = self.letters[20 * (self.n - 1):20 * self.n]
letters = '\n'.join(letters)
try:
letters = unicode(letters, 'latin1')
except:
letters = str(letters)
self.add_title('Have your say', font='size4')
a = str(self.n) + '/21'
self.move_cursor(x=90 - len(a))
self.add_text(a, fg='BLUE', bg='YELLOW')
self.move_cursor(x=0)
self.start_random_bg_color()
for line in letters.split('\n'):
line = line.rstrip('\n')
if line == '':
self.end_bg_color()
self.start_random_bg_color()
self.add_text(line, fg='BLACK')
self.add_newline()
self.end_bg_color()
if self.n == 21:
self.add_text('~ END OF LETTERS ~')
else:
self.add_text('The letters continue on page ' + str(200 + self.n))
letters_page1 = LetterPage('200', 1)
letters_page1.in_index = True
letters_page1.index_num = '200-220'
letters_page2 = LetterPage('201', 2)
letters_page3 = LetterPage('202', 3)
letters_page4 = LetterPage('203', 4)
letters_page5 = LetterPage('204', 5)
letters_page6 = LetterPage('205', 6)
letters_page7 = LetterPage('206', 7)
letters_page8 = LetterPage('207', 8)
letters_page9 = LetterPage('208', 9)
letters_page10 = LetterPage('209', 10)
letters_page11 = LetterPage('210', 11)
letters_page12 = LetterPage('211', 12)
letters_page13 = LetterPage('212', 13)
letters_page14 = LetterPage('213', 14)
letters_page15 = LetterPage('214', 15)
letters_page16 = LetterPage('215', 16)
letters_page17 = LetterPage('216', 17)
letters_page18 = LetterPage('217', 18)
letters_page19 = LetterPage('218', 19)
letters_page20 = LetterPage('219', 20)
letters_page21 = LetterPage('220', 21)
<|reserved_special_token_1|>
import os
import json
from page import Page
from random import choice
from os.path import join, expanduser
from file_handler import f_read, f_readlines, open_local
import config
class LetterPage(Page):
def __init__(self, page_num, n):
super(LetterPage, self).__init__(page_num)
self.title = 'Letters'
self.in_index = False
self.n = n
self.tagline = (
'Email klbscroggsbot@gmail.com and your letter will appear here')
self.letters = ''
def background(self):
self.letters = f_read('emails')
if config.NAME == 'KLBFAX' and self.n == 1 and config.has_gmail_login(
):
import gmail
details = f_readlines('gmail')
g = gmail.login(details[0], details[1])
unread = g.inbox().mail(unread=True)
for mail in unread:
mail.fetch()
lines = ''.join(mail.body.split('\r')).split('\n')
if lines[0
] == 'EVENT' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:
try:
with open_local('events', 'a') as f:
for line in lines:
if line != 'EVENT':
f.write(line + '\n')
mail.read()
except:
pass
elif lines[0
] == 'CARD' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:
with open('/home/pi/cards/' + lines[1], 'w') as f:
f.write('\n'.join(lines[2:]))
mail.read()
elif 'POINTS' in lines[0].upper(
) and 'belgin.seymenoglu.10@ucl.ac.uk' in mail.fr:
from points import add_points
length = 1
points_to_give = 0
while length <= len(lines[2]):
try:
if lines[2][:length] != '-':
points_to_give = int(lines[2][:length])
length += 1
except:
break
add_points(lines[1].split('=')[0], points_to_give)
mail.read()
else:
newletter = ''
for line in lines:
if line != '':
while len(line) > 79:
newletter += line[:79] + '\n'
line = line[79:]
newletter += line + '\n'
self.letters = (newletter + '\n' + 'from ' + mail.fr +
'\n\n' + self.letters)
mail.read()
self.letters = self.letters.split('\n')
if len(self.letters) > 1000:
self.letters = self.letters[:1000]
with open_local('emails', 'w') as f:
f.write('\n'.join(self.letters))
else:
self.letters = self.letters.split('\n')
def generate_content(self):
letters = self.letters[20 * (self.n - 1):20 * self.n]
letters = '\n'.join(letters)
try:
letters = unicode(letters, 'latin1')
except:
letters = str(letters)
self.add_title('Have your say', font='size4')
a = str(self.n) + '/21'
self.move_cursor(x=90 - len(a))
self.add_text(a, fg='BLUE', bg='YELLOW')
self.move_cursor(x=0)
self.start_random_bg_color()
for line in letters.split('\n'):
line = line.rstrip('\n')
if line == '':
self.end_bg_color()
self.start_random_bg_color()
self.add_text(line, fg='BLACK')
self.add_newline()
self.end_bg_color()
if self.n == 21:
self.add_text('~ END OF LETTERS ~')
else:
self.add_text('The letters continue on page ' + str(200 + self.n))
letters_page1 = LetterPage('200', 1)
letters_page1.in_index = True
letters_page1.index_num = '200-220'
letters_page2 = LetterPage('201', 2)
letters_page3 = LetterPage('202', 3)
letters_page4 = LetterPage('203', 4)
letters_page5 = LetterPage('204', 5)
letters_page6 = LetterPage('205', 6)
letters_page7 = LetterPage('206', 7)
letters_page8 = LetterPage('207', 8)
letters_page9 = LetterPage('208', 9)
letters_page10 = LetterPage('209', 10)
letters_page11 = LetterPage('210', 11)
letters_page12 = LetterPage('211', 12)
letters_page13 = LetterPage('212', 13)
letters_page14 = LetterPage('213', 14)
letters_page15 = LetterPage('214', 15)
letters_page16 = LetterPage('215', 16)
letters_page17 = LetterPage('216', 17)
letters_page18 = LetterPage('217', 18)
letters_page19 = LetterPage('218', 19)
letters_page20 = LetterPage('219', 20)
letters_page21 = LetterPage('220', 21)
<|reserved_special_token_1|>
import os
import json
from page import Page
from random import choice
from os.path import join, expanduser
from file_handler import f_read, f_readlines, open_local
import config
class LetterPage(Page):
def __init__(self, page_num,n):
super(LetterPage, self).__init__(page_num)
self.title = "Letters"
self.in_index = False
self.n = n
self.tagline = "Email klbscroggsbot@gmail.com and your letter will appear here"
self.letters = ""
def background(self):
self.letters = f_read("emails")
if config.NAME == "KLBFAX" and self.n==1 and config.has_gmail_login():
import gmail
details = f_readlines("gmail")
g = gmail.login(details[0],details[1])
unread = g.inbox().mail(unread=True)
for mail in unread:
mail.fetch()
lines = "".join(mail.body.split("\r")).split("\n")
if lines[0] == "EVENT" and "matthew.scroggs.14@ucl.ac.uk" in mail.fr:
try:
with open_local('events','a') as f:
for line in lines:
if line!="EVENT":
f.write(line+"\n")
mail.read()
except:
pass
elif lines[0] == "CARD" and "matthew.scroggs.14@ucl.ac.uk" in mail.fr:
with open('/home/pi/cards/'+lines[1],"w") as f:
f.write("\n".join(lines[2:]))
mail.read()
elif "POINTS" in lines[0].upper() and "belgin.seymenoglu.10@ucl.ac.uk" in mail.fr:
from points import add_points
length = 1
points_to_give = 0
while length<=len(lines[2]):
try:
if lines[2][:length]!="-":
points_to_give = int(lines[2][:length])
length += 1
except:
break
add_points(lines[1].split("=")[0],points_to_give)
mail.read()
else:
newletter = ""
for line in lines:
if line!="":
while len(line)>79:
newletter += line[:79]+"\n"
line=line[79:]
newletter+=line+"\n"
self.letters=newletter+"\n"+"from "+mail.fr+"\n\n"+self.letters
mail.read()
self.letters = self.letters.split("\n")
if len(self.letters)>1000:
self.letters = self.letters[:1000]
with open_local("emails","w") as f:
f.write("\n".join(self.letters))
else:
self.letters = self.letters.split("\n")
def generate_content(self):
letters = self.letters[20*(self.n-1):20*self.n]
letters = "\n".join(letters)
try:
letters = unicode(letters,'latin1')
except:
letters = str(letters)
self.add_title("Have your say",font="size4")
a = str(self.n)+"/21"
self.move_cursor(x=90-len(a))
self.add_text(a, fg="BLUE", bg="YELLOW")
self.move_cursor(x=0)
self.start_random_bg_color()
for line in letters.split("\n"):
line = line.rstrip("\n")
if line == "":
self.end_bg_color()
self.start_random_bg_color()
self.add_text(line,fg="BLACK")
self.add_newline()
self.end_bg_color()
if self.n==21:
self.add_text("~ END OF LETTERS ~")
else:
self.add_text("The letters continue on page "+str(200+self.n))
letters_page1 = LetterPage("200",1)
letters_page1.in_index = True
letters_page1.index_num = "200-220"
letters_page2 = LetterPage("201",2)
letters_page3 = LetterPage("202",3)
letters_page4 = LetterPage("203",4)
letters_page5 = LetterPage("204",5)
letters_page6 = LetterPage("205",6)
letters_page7 = LetterPage("206",7)
letters_page8 = LetterPage("207",8)
letters_page9 = LetterPage("208",9)
letters_page10 = LetterPage("209",10)
letters_page11 = LetterPage("210",11)
letters_page12 = LetterPage("211",12)
letters_page13 = LetterPage("212",13)
letters_page14 = LetterPage("213",14)
letters_page15 = LetterPage("214",15)
letters_page16 = LetterPage("215",16)
letters_page17 = LetterPage("216",17)
letters_page18 = LetterPage("217",18)
letters_page19 = LetterPage("218",19)
letters_page20 = LetterPage("219",20)
letters_page21 = LetterPage("220",21)
|
flexible
|
{
"blob_id": "e714fe0e27ec9ea5acb3120a4d2114d3d7674fcf",
"index": 5601,
"step-1": "<mask token>\n\n\nclass LetterPage(Page):\n\n def __init__(self, page_num, n):\n super(LetterPage, self).__init__(page_num)\n self.title = 'Letters'\n self.in_index = False\n self.n = n\n self.tagline = (\n 'Email klbscroggsbot@gmail.com and your letter will appear here')\n self.letters = ''\n\n def background(self):\n self.letters = f_read('emails')\n if config.NAME == 'KLBFAX' and self.n == 1 and config.has_gmail_login(\n ):\n import gmail\n details = f_readlines('gmail')\n g = gmail.login(details[0], details[1])\n unread = g.inbox().mail(unread=True)\n for mail in unread:\n mail.fetch()\n lines = ''.join(mail.body.split('\\r')).split('\\n')\n if lines[0\n ] == 'EVENT' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:\n try:\n with open_local('events', 'a') as f:\n for line in lines:\n if line != 'EVENT':\n f.write(line + '\\n')\n mail.read()\n except:\n pass\n elif lines[0\n ] == 'CARD' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:\n with open('/home/pi/cards/' + lines[1], 'w') as f:\n f.write('\\n'.join(lines[2:]))\n mail.read()\n elif 'POINTS' in lines[0].upper(\n ) and 'belgin.seymenoglu.10@ucl.ac.uk' in mail.fr:\n from points import add_points\n length = 1\n points_to_give = 0\n while length <= len(lines[2]):\n try:\n if lines[2][:length] != '-':\n points_to_give = int(lines[2][:length])\n length += 1\n except:\n break\n add_points(lines[1].split('=')[0], points_to_give)\n mail.read()\n else:\n newletter = ''\n for line in lines:\n if line != '':\n while len(line) > 79:\n newletter += line[:79] + '\\n'\n line = line[79:]\n newletter += line + '\\n'\n self.letters = (newletter + '\\n' + 'from ' + mail.fr +\n '\\n\\n' + self.letters)\n mail.read()\n self.letters = self.letters.split('\\n')\n if len(self.letters) > 1000:\n self.letters = self.letters[:1000]\n with open_local('emails', 'w') as f:\n f.write('\\n'.join(self.letters))\n else:\n self.letters = self.letters.split('\\n')\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass LetterPage(Page):\n\n def __init__(self, page_num, n):\n super(LetterPage, self).__init__(page_num)\n self.title = 'Letters'\n self.in_index = False\n self.n = n\n self.tagline = (\n 'Email klbscroggsbot@gmail.com and your letter will appear here')\n self.letters = ''\n\n def background(self):\n self.letters = f_read('emails')\n if config.NAME == 'KLBFAX' and self.n == 1 and config.has_gmail_login(\n ):\n import gmail\n details = f_readlines('gmail')\n g = gmail.login(details[0], details[1])\n unread = g.inbox().mail(unread=True)\n for mail in unread:\n mail.fetch()\n lines = ''.join(mail.body.split('\\r')).split('\\n')\n if lines[0\n ] == 'EVENT' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:\n try:\n with open_local('events', 'a') as f:\n for line in lines:\n if line != 'EVENT':\n f.write(line + '\\n')\n mail.read()\n except:\n pass\n elif lines[0\n ] == 'CARD' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:\n with open('/home/pi/cards/' + lines[1], 'w') as f:\n f.write('\\n'.join(lines[2:]))\n mail.read()\n elif 'POINTS' in lines[0].upper(\n ) and 'belgin.seymenoglu.10@ucl.ac.uk' in mail.fr:\n from points import add_points\n length = 1\n points_to_give = 0\n while length <= len(lines[2]):\n try:\n if lines[2][:length] != '-':\n points_to_give = int(lines[2][:length])\n length += 1\n except:\n break\n add_points(lines[1].split('=')[0], points_to_give)\n mail.read()\n else:\n newletter = ''\n for line in lines:\n if line != '':\n while len(line) > 79:\n newletter += line[:79] + '\\n'\n line = line[79:]\n newletter += line + '\\n'\n self.letters = (newletter + '\\n' + 'from ' + mail.fr +\n '\\n\\n' + self.letters)\n mail.read()\n self.letters = self.letters.split('\\n')\n if len(self.letters) > 1000:\n self.letters = self.letters[:1000]\n with open_local('emails', 'w') as f:\n f.write('\\n'.join(self.letters))\n else:\n self.letters = self.letters.split('\\n')\n\n def generate_content(self):\n letters = self.letters[20 * (self.n - 1):20 * self.n]\n letters = '\\n'.join(letters)\n try:\n letters = unicode(letters, 'latin1')\n except:\n letters = str(letters)\n self.add_title('Have your say', font='size4')\n a = str(self.n) + '/21'\n self.move_cursor(x=90 - len(a))\n self.add_text(a, fg='BLUE', bg='YELLOW')\n self.move_cursor(x=0)\n self.start_random_bg_color()\n for line in letters.split('\\n'):\n line = line.rstrip('\\n')\n if line == '':\n self.end_bg_color()\n self.start_random_bg_color()\n self.add_text(line, fg='BLACK')\n self.add_newline()\n self.end_bg_color()\n if self.n == 21:\n self.add_text('~ END OF LETTERS ~')\n else:\n self.add_text('The letters continue on page ' + str(200 + self.n))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass LetterPage(Page):\n\n def __init__(self, page_num, n):\n super(LetterPage, self).__init__(page_num)\n self.title = 'Letters'\n self.in_index = False\n self.n = n\n self.tagline = (\n 'Email klbscroggsbot@gmail.com and your letter will appear here')\n self.letters = ''\n\n def background(self):\n self.letters = f_read('emails')\n if config.NAME == 'KLBFAX' and self.n == 1 and config.has_gmail_login(\n ):\n import gmail\n details = f_readlines('gmail')\n g = gmail.login(details[0], details[1])\n unread = g.inbox().mail(unread=True)\n for mail in unread:\n mail.fetch()\n lines = ''.join(mail.body.split('\\r')).split('\\n')\n if lines[0\n ] == 'EVENT' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:\n try:\n with open_local('events', 'a') as f:\n for line in lines:\n if line != 'EVENT':\n f.write(line + '\\n')\n mail.read()\n except:\n pass\n elif lines[0\n ] == 'CARD' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:\n with open('/home/pi/cards/' + lines[1], 'w') as f:\n f.write('\\n'.join(lines[2:]))\n mail.read()\n elif 'POINTS' in lines[0].upper(\n ) and 'belgin.seymenoglu.10@ucl.ac.uk' in mail.fr:\n from points import add_points\n length = 1\n points_to_give = 0\n while length <= len(lines[2]):\n try:\n if lines[2][:length] != '-':\n points_to_give = int(lines[2][:length])\n length += 1\n except:\n break\n add_points(lines[1].split('=')[0], points_to_give)\n mail.read()\n else:\n newletter = ''\n for line in lines:\n if line != '':\n while len(line) > 79:\n newletter += line[:79] + '\\n'\n line = line[79:]\n newletter += line + '\\n'\n self.letters = (newletter + '\\n' + 'from ' + mail.fr +\n '\\n\\n' + self.letters)\n mail.read()\n self.letters = self.letters.split('\\n')\n if len(self.letters) > 1000:\n self.letters = self.letters[:1000]\n with open_local('emails', 'w') as f:\n f.write('\\n'.join(self.letters))\n else:\n self.letters = self.letters.split('\\n')\n\n def generate_content(self):\n letters = self.letters[20 * (self.n - 1):20 * self.n]\n letters = '\\n'.join(letters)\n try:\n letters = unicode(letters, 'latin1')\n except:\n letters = str(letters)\n self.add_title('Have your say', font='size4')\n a = str(self.n) + '/21'\n self.move_cursor(x=90 - len(a))\n self.add_text(a, fg='BLUE', bg='YELLOW')\n self.move_cursor(x=0)\n self.start_random_bg_color()\n for line in letters.split('\\n'):\n line = line.rstrip('\\n')\n if line == '':\n self.end_bg_color()\n self.start_random_bg_color()\n self.add_text(line, fg='BLACK')\n self.add_newline()\n self.end_bg_color()\n if self.n == 21:\n self.add_text('~ END OF LETTERS ~')\n else:\n self.add_text('The letters continue on page ' + str(200 + self.n))\n\n\nletters_page1 = LetterPage('200', 1)\nletters_page1.in_index = True\nletters_page1.index_num = '200-220'\nletters_page2 = LetterPage('201', 2)\nletters_page3 = LetterPage('202', 3)\nletters_page4 = LetterPage('203', 4)\nletters_page5 = LetterPage('204', 5)\nletters_page6 = LetterPage('205', 6)\nletters_page7 = LetterPage('206', 7)\nletters_page8 = LetterPage('207', 8)\nletters_page9 = LetterPage('208', 9)\nletters_page10 = LetterPage('209', 10)\nletters_page11 = LetterPage('210', 11)\nletters_page12 = LetterPage('211', 12)\nletters_page13 = LetterPage('212', 13)\nletters_page14 = LetterPage('213', 14)\nletters_page15 = LetterPage('214', 15)\nletters_page16 = LetterPage('215', 16)\nletters_page17 = LetterPage('216', 17)\nletters_page18 = LetterPage('217', 18)\nletters_page19 = LetterPage('218', 19)\nletters_page20 = LetterPage('219', 20)\nletters_page21 = LetterPage('220', 21)\n",
"step-4": "import os\nimport json\nfrom page import Page\nfrom random import choice\nfrom os.path import join, expanduser\nfrom file_handler import f_read, f_readlines, open_local\nimport config\n\n\nclass LetterPage(Page):\n\n def __init__(self, page_num, n):\n super(LetterPage, self).__init__(page_num)\n self.title = 'Letters'\n self.in_index = False\n self.n = n\n self.tagline = (\n 'Email klbscroggsbot@gmail.com and your letter will appear here')\n self.letters = ''\n\n def background(self):\n self.letters = f_read('emails')\n if config.NAME == 'KLBFAX' and self.n == 1 and config.has_gmail_login(\n ):\n import gmail\n details = f_readlines('gmail')\n g = gmail.login(details[0], details[1])\n unread = g.inbox().mail(unread=True)\n for mail in unread:\n mail.fetch()\n lines = ''.join(mail.body.split('\\r')).split('\\n')\n if lines[0\n ] == 'EVENT' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:\n try:\n with open_local('events', 'a') as f:\n for line in lines:\n if line != 'EVENT':\n f.write(line + '\\n')\n mail.read()\n except:\n pass\n elif lines[0\n ] == 'CARD' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:\n with open('/home/pi/cards/' + lines[1], 'w') as f:\n f.write('\\n'.join(lines[2:]))\n mail.read()\n elif 'POINTS' in lines[0].upper(\n ) and 'belgin.seymenoglu.10@ucl.ac.uk' in mail.fr:\n from points import add_points\n length = 1\n points_to_give = 0\n while length <= len(lines[2]):\n try:\n if lines[2][:length] != '-':\n points_to_give = int(lines[2][:length])\n length += 1\n except:\n break\n add_points(lines[1].split('=')[0], points_to_give)\n mail.read()\n else:\n newletter = ''\n for line in lines:\n if line != '':\n while len(line) > 79:\n newletter += line[:79] + '\\n'\n line = line[79:]\n newletter += line + '\\n'\n self.letters = (newletter + '\\n' + 'from ' + mail.fr +\n '\\n\\n' + self.letters)\n mail.read()\n self.letters = self.letters.split('\\n')\n if len(self.letters) > 1000:\n self.letters = self.letters[:1000]\n with open_local('emails', 'w') as f:\n f.write('\\n'.join(self.letters))\n else:\n self.letters = self.letters.split('\\n')\n\n def generate_content(self):\n letters = self.letters[20 * (self.n - 1):20 * self.n]\n letters = '\\n'.join(letters)\n try:\n letters = unicode(letters, 'latin1')\n except:\n letters = str(letters)\n self.add_title('Have your say', font='size4')\n a = str(self.n) + '/21'\n self.move_cursor(x=90 - len(a))\n self.add_text(a, fg='BLUE', bg='YELLOW')\n self.move_cursor(x=0)\n self.start_random_bg_color()\n for line in letters.split('\\n'):\n line = line.rstrip('\\n')\n if line == '':\n self.end_bg_color()\n self.start_random_bg_color()\n self.add_text(line, fg='BLACK')\n self.add_newline()\n self.end_bg_color()\n if self.n == 21:\n self.add_text('~ END OF LETTERS ~')\n else:\n self.add_text('The letters continue on page ' + str(200 + self.n))\n\n\nletters_page1 = LetterPage('200', 1)\nletters_page1.in_index = True\nletters_page1.index_num = '200-220'\nletters_page2 = LetterPage('201', 2)\nletters_page3 = LetterPage('202', 3)\nletters_page4 = LetterPage('203', 4)\nletters_page5 = LetterPage('204', 5)\nletters_page6 = LetterPage('205', 6)\nletters_page7 = LetterPage('206', 7)\nletters_page8 = LetterPage('207', 8)\nletters_page9 = LetterPage('208', 9)\nletters_page10 = LetterPage('209', 10)\nletters_page11 = LetterPage('210', 11)\nletters_page12 = LetterPage('211', 12)\nletters_page13 = LetterPage('212', 13)\nletters_page14 = LetterPage('213', 14)\nletters_page15 = LetterPage('214', 15)\nletters_page16 = LetterPage('215', 16)\nletters_page17 = LetterPage('216', 17)\nletters_page18 = LetterPage('217', 18)\nletters_page19 = LetterPage('218', 19)\nletters_page20 = LetterPage('219', 20)\nletters_page21 = LetterPage('220', 21)\n",
"step-5": "import os\nimport json\nfrom page import Page\nfrom random import choice\nfrom os.path import join, expanduser\nfrom file_handler import f_read, f_readlines, open_local\nimport config\n\nclass LetterPage(Page):\n def __init__(self, page_num,n):\n super(LetterPage, self).__init__(page_num)\n self.title = \"Letters\"\n self.in_index = False\n self.n = n\n self.tagline = \"Email klbscroggsbot@gmail.com and your letter will appear here\"\n self.letters = \"\"\n\n def background(self):\n self.letters = f_read(\"emails\")\n if config.NAME == \"KLBFAX\" and self.n==1 and config.has_gmail_login():\n import gmail\n details = f_readlines(\"gmail\")\n\n g = gmail.login(details[0],details[1])\n unread = g.inbox().mail(unread=True)\n for mail in unread:\n mail.fetch()\n lines = \"\".join(mail.body.split(\"\\r\")).split(\"\\n\")\n if lines[0] == \"EVENT\" and \"matthew.scroggs.14@ucl.ac.uk\" in mail.fr:\n try:\n with open_local('events','a') as f:\n for line in lines:\n if line!=\"EVENT\":\n f.write(line+\"\\n\")\n mail.read()\n except:\n pass\n elif lines[0] == \"CARD\" and \"matthew.scroggs.14@ucl.ac.uk\" in mail.fr:\n with open('/home/pi/cards/'+lines[1],\"w\") as f:\n f.write(\"\\n\".join(lines[2:]))\n mail.read()\n elif \"POINTS\" in lines[0].upper() and \"belgin.seymenoglu.10@ucl.ac.uk\" in mail.fr:\n from points import add_points\n length = 1\n points_to_give = 0\n while length<=len(lines[2]):\n try:\n if lines[2][:length]!=\"-\":\n points_to_give = int(lines[2][:length])\n length += 1\n except:\n break\n add_points(lines[1].split(\"=\")[0],points_to_give)\n mail.read()\n \n else:\n newletter = \"\"\n for line in lines:\n if line!=\"\":\n while len(line)>79:\n newletter += line[:79]+\"\\n\"\n line=line[79:]\n newletter+=line+\"\\n\"\n \n self.letters=newletter+\"\\n\"+\"from \"+mail.fr+\"\\n\\n\"+self.letters\n mail.read()\n self.letters = self.letters.split(\"\\n\")\n if len(self.letters)>1000:\n self.letters = self.letters[:1000]\n with open_local(\"emails\",\"w\") as f:\n f.write(\"\\n\".join(self.letters))\n else:\n self.letters = self.letters.split(\"\\n\")\n\n\n def generate_content(self):\n letters = self.letters[20*(self.n-1):20*self.n]\n letters = \"\\n\".join(letters)\n try:\n letters = unicode(letters,'latin1')\n except:\n letters = str(letters)\n\n\n self.add_title(\"Have your say\",font=\"size4\")\n a = str(self.n)+\"/21\"\n self.move_cursor(x=90-len(a))\n self.add_text(a, fg=\"BLUE\", bg=\"YELLOW\")\n self.move_cursor(x=0)\n self.start_random_bg_color()\n for line in letters.split(\"\\n\"):\n line = line.rstrip(\"\\n\")\n if line == \"\":\n self.end_bg_color()\n self.start_random_bg_color()\n self.add_text(line,fg=\"BLACK\")\n self.add_newline()\n self.end_bg_color()\n if self.n==21:\n self.add_text(\"~ END OF LETTERS ~\")\n else:\n self.add_text(\"The letters continue on page \"+str(200+self.n))\n\nletters_page1 = LetterPage(\"200\",1)\nletters_page1.in_index = True\nletters_page1.index_num = \"200-220\"\nletters_page2 = LetterPage(\"201\",2)\nletters_page3 = LetterPage(\"202\",3)\nletters_page4 = LetterPage(\"203\",4)\nletters_page5 = LetterPage(\"204\",5)\nletters_page6 = LetterPage(\"205\",6)\nletters_page7 = LetterPage(\"206\",7)\nletters_page8 = LetterPage(\"207\",8)\nletters_page9 = LetterPage(\"208\",9)\nletters_page10 = LetterPage(\"209\",10)\nletters_page11 = LetterPage(\"210\",11)\nletters_page12 = LetterPage(\"211\",12)\nletters_page13 = LetterPage(\"212\",13)\nletters_page14 = LetterPage(\"213\",14)\nletters_page15 = LetterPage(\"214\",15)\nletters_page16 = LetterPage(\"215\",16)\nletters_page17 = LetterPage(\"216\",17)\nletters_page18 = LetterPage(\"217\",18)\nletters_page19 = LetterPage(\"218\",19)\nletters_page20 = LetterPage(\"219\",20)\nletters_page21 = LetterPage(\"220\",21)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# -*- coding: utf-8 -*-
import luigi
from luigi import *
#from luigi import Task
import pandas as pd
from pset.tasks.embeddings.load_embeding import EmbedStudentData
from pset.tasks.data.load_dataset import HashedStudentData
import numpy as npy
import pickle
import os
class NearestStudents(Task):
github_id = Parameter(default='b280302a', description='Github id to search nearby (not hashed)')
n = IntParameter(default=5, description='Output top N')
farthest = BoolParameter(default=False, description='Find farthest instead')
def output(self):
return luigi.LocalTarget("/Users/adcxdpf/Downloads/pset_03/sd.csv")
def requires(self):
return {
'data': HashedStudentData(path='/Users/adcxdpf/Downloads/pset_03/pset/tasks/data'),
'embedStudentData': EmbedStudentData(path='/Users/adcxdpf/Downloads/pset_03/pset/tasks/data')
}
#return self.clone(EmbedStudentData)
def run(self):
vectors_lookup_bytes = (self.input()['embedStudentData'].open(mode='rb'))
vectors_lookup = pickle.load(vectors_lookup_bytes)
vecs_list = pd.Series(vectors_lookup)
vectors_df = pd.DataFrame(vectors_lookup, index=vecs_list.index)
vectors_df.columns = ['vectors']
print('##### vectors_df : ', vectors_df)
print(" vectors_df shape is :: " , vectors_df.shape)
print("github_id param : " , self.github_id)
pd_xls_data = pd.read_excel(self.input()['data'].path,0)
idx = pd_xls_data.index[pd_xls_data['hashed_id']== self.github_id]
#print ('######## idx.values ######### ', idx.values)
my_vec = vectors_df.iloc[[idx.values[0]]]
self.my_vec = (my_vec.values[0][0])
print ("my_vec : " , self.my_vec)
print(" my_vec shape is :: " , self.my_vec.shape)
distances = vectors_df['vectors'].apply(self.my_distance)
sortedDistance= distances.sort_values()
print('###### sortedDistance : ', sortedDistance)
# output data
f = self.output().open('w')
sortedDistance.str[0].to_csv(f)
#df.to_csv(f, sep='\t', encoding='utf-8', index=None)
f.close()
nearDis= sortedDistance.head(self.n).index
print ("******** Nearest**********")
for index in nearDis:
print(pd_xls_data.iloc[index])
farDis = sortedDistance.tail(5).index
print ("******** Farthest**********")
for index in farDis:
print(pd_xls_data.iloc[index])
def cosine_similarity(self,a, b):
# """Takes 2 vectors a, b and returns the cosine similarity according
# to the definition of the dot product
# """
# dot_product = npy.dot(a, b)
# norm_a = npy.linalg.norm(a)
# norm_b = npy.linalg.norm(b)
# return dot_product / (norm_a * norm_b)
dot_product = npy.dot(a[0], b.T)
norm_a = npy.linalg.norm(a)
norm_b = npy.linalg.norm(b)
return dot_product / (norm_a * norm_b)
def my_distance(self,vec1):
return 1 - self.cosine_similarity(vec1, self.my_vec)
|
normal
|
{
"blob_id": "15eed401728e07bfe9299edd12add43ad8b9cb71",
"index": 3802,
"step-1": "<mask token>\n\n\nclass NearestStudents(Task):\n <mask token>\n <mask token>\n <mask token>\n\n def output(self):\n return luigi.LocalTarget('/Users/adcxdpf/Downloads/pset_03/sd.csv')\n\n def requires(self):\n return {'data': HashedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data'),\n 'embedStudentData': EmbedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data')}\n\n def run(self):\n vectors_lookup_bytes = self.input()['embedStudentData'].open(mode='rb')\n vectors_lookup = pickle.load(vectors_lookup_bytes)\n vecs_list = pd.Series(vectors_lookup)\n vectors_df = pd.DataFrame(vectors_lookup, index=vecs_list.index)\n vectors_df.columns = ['vectors']\n print('##### vectors_df : ', vectors_df)\n print(' vectors_df shape is :: ', vectors_df.shape)\n print('github_id param : ', self.github_id)\n pd_xls_data = pd.read_excel(self.input()['data'].path, 0)\n idx = pd_xls_data.index[pd_xls_data['hashed_id'] == self.github_id]\n my_vec = vectors_df.iloc[[idx.values[0]]]\n self.my_vec = my_vec.values[0][0]\n print('my_vec : ', self.my_vec)\n print(' my_vec shape is :: ', self.my_vec.shape)\n distances = vectors_df['vectors'].apply(self.my_distance)\n sortedDistance = distances.sort_values()\n print('###### sortedDistance : ', sortedDistance)\n f = self.output().open('w')\n sortedDistance.str[0].to_csv(f)\n f.close()\n nearDis = sortedDistance.head(self.n).index\n print('******** Nearest**********')\n for index in nearDis:\n print(pd_xls_data.iloc[index])\n farDis = sortedDistance.tail(5).index\n print('******** Farthest**********')\n for index in farDis:\n print(pd_xls_data.iloc[index])\n\n def cosine_similarity(self, a, b):\n dot_product = npy.dot(a[0], b.T)\n norm_a = npy.linalg.norm(a)\n norm_b = npy.linalg.norm(b)\n return dot_product / (norm_a * norm_b)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass NearestStudents(Task):\n <mask token>\n <mask token>\n <mask token>\n\n def output(self):\n return luigi.LocalTarget('/Users/adcxdpf/Downloads/pset_03/sd.csv')\n\n def requires(self):\n return {'data': HashedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data'),\n 'embedStudentData': EmbedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data')}\n\n def run(self):\n vectors_lookup_bytes = self.input()['embedStudentData'].open(mode='rb')\n vectors_lookup = pickle.load(vectors_lookup_bytes)\n vecs_list = pd.Series(vectors_lookup)\n vectors_df = pd.DataFrame(vectors_lookup, index=vecs_list.index)\n vectors_df.columns = ['vectors']\n print('##### vectors_df : ', vectors_df)\n print(' vectors_df shape is :: ', vectors_df.shape)\n print('github_id param : ', self.github_id)\n pd_xls_data = pd.read_excel(self.input()['data'].path, 0)\n idx = pd_xls_data.index[pd_xls_data['hashed_id'] == self.github_id]\n my_vec = vectors_df.iloc[[idx.values[0]]]\n self.my_vec = my_vec.values[0][0]\n print('my_vec : ', self.my_vec)\n print(' my_vec shape is :: ', self.my_vec.shape)\n distances = vectors_df['vectors'].apply(self.my_distance)\n sortedDistance = distances.sort_values()\n print('###### sortedDistance : ', sortedDistance)\n f = self.output().open('w')\n sortedDistance.str[0].to_csv(f)\n f.close()\n nearDis = sortedDistance.head(self.n).index\n print('******** Nearest**********')\n for index in nearDis:\n print(pd_xls_data.iloc[index])\n farDis = sortedDistance.tail(5).index\n print('******** Farthest**********')\n for index in farDis:\n print(pd_xls_data.iloc[index])\n\n def cosine_similarity(self, a, b):\n dot_product = npy.dot(a[0], b.T)\n norm_a = npy.linalg.norm(a)\n norm_b = npy.linalg.norm(b)\n return dot_product / (norm_a * norm_b)\n\n def my_distance(self, vec1):\n return 1 - self.cosine_similarity(vec1, self.my_vec)\n",
"step-3": "<mask token>\n\n\nclass NearestStudents(Task):\n github_id = Parameter(default='b280302a', description=\n 'Github id to search nearby (not hashed)')\n n = IntParameter(default=5, description='Output top N')\n farthest = BoolParameter(default=False, description='Find farthest instead'\n )\n\n def output(self):\n return luigi.LocalTarget('/Users/adcxdpf/Downloads/pset_03/sd.csv')\n\n def requires(self):\n return {'data': HashedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data'),\n 'embedStudentData': EmbedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data')}\n\n def run(self):\n vectors_lookup_bytes = self.input()['embedStudentData'].open(mode='rb')\n vectors_lookup = pickle.load(vectors_lookup_bytes)\n vecs_list = pd.Series(vectors_lookup)\n vectors_df = pd.DataFrame(vectors_lookup, index=vecs_list.index)\n vectors_df.columns = ['vectors']\n print('##### vectors_df : ', vectors_df)\n print(' vectors_df shape is :: ', vectors_df.shape)\n print('github_id param : ', self.github_id)\n pd_xls_data = pd.read_excel(self.input()['data'].path, 0)\n idx = pd_xls_data.index[pd_xls_data['hashed_id'] == self.github_id]\n my_vec = vectors_df.iloc[[idx.values[0]]]\n self.my_vec = my_vec.values[0][0]\n print('my_vec : ', self.my_vec)\n print(' my_vec shape is :: ', self.my_vec.shape)\n distances = vectors_df['vectors'].apply(self.my_distance)\n sortedDistance = distances.sort_values()\n print('###### sortedDistance : ', sortedDistance)\n f = self.output().open('w')\n sortedDistance.str[0].to_csv(f)\n f.close()\n nearDis = sortedDistance.head(self.n).index\n print('******** Nearest**********')\n for index in nearDis:\n print(pd_xls_data.iloc[index])\n farDis = sortedDistance.tail(5).index\n print('******** Farthest**********')\n for index in farDis:\n print(pd_xls_data.iloc[index])\n\n def cosine_similarity(self, a, b):\n dot_product = npy.dot(a[0], b.T)\n norm_a = npy.linalg.norm(a)\n norm_b = npy.linalg.norm(b)\n return dot_product / (norm_a * norm_b)\n\n def my_distance(self, vec1):\n return 1 - self.cosine_similarity(vec1, self.my_vec)\n",
"step-4": "import luigi\nfrom luigi import *\nimport pandas as pd\nfrom pset.tasks.embeddings.load_embeding import EmbedStudentData\nfrom pset.tasks.data.load_dataset import HashedStudentData\nimport numpy as npy\nimport pickle\nimport os\n\n\nclass NearestStudents(Task):\n github_id = Parameter(default='b280302a', description=\n 'Github id to search nearby (not hashed)')\n n = IntParameter(default=5, description='Output top N')\n farthest = BoolParameter(default=False, description='Find farthest instead'\n )\n\n def output(self):\n return luigi.LocalTarget('/Users/adcxdpf/Downloads/pset_03/sd.csv')\n\n def requires(self):\n return {'data': HashedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data'),\n 'embedStudentData': EmbedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data')}\n\n def run(self):\n vectors_lookup_bytes = self.input()['embedStudentData'].open(mode='rb')\n vectors_lookup = pickle.load(vectors_lookup_bytes)\n vecs_list = pd.Series(vectors_lookup)\n vectors_df = pd.DataFrame(vectors_lookup, index=vecs_list.index)\n vectors_df.columns = ['vectors']\n print('##### vectors_df : ', vectors_df)\n print(' vectors_df shape is :: ', vectors_df.shape)\n print('github_id param : ', self.github_id)\n pd_xls_data = pd.read_excel(self.input()['data'].path, 0)\n idx = pd_xls_data.index[pd_xls_data['hashed_id'] == self.github_id]\n my_vec = vectors_df.iloc[[idx.values[0]]]\n self.my_vec = my_vec.values[0][0]\n print('my_vec : ', self.my_vec)\n print(' my_vec shape is :: ', self.my_vec.shape)\n distances = vectors_df['vectors'].apply(self.my_distance)\n sortedDistance = distances.sort_values()\n print('###### sortedDistance : ', sortedDistance)\n f = self.output().open('w')\n sortedDistance.str[0].to_csv(f)\n f.close()\n nearDis = sortedDistance.head(self.n).index\n print('******** Nearest**********')\n for index in nearDis:\n print(pd_xls_data.iloc[index])\n farDis = sortedDistance.tail(5).index\n print('******** Farthest**********')\n for index in farDis:\n print(pd_xls_data.iloc[index])\n\n def cosine_similarity(self, a, b):\n dot_product = npy.dot(a[0], b.T)\n norm_a = npy.linalg.norm(a)\n norm_b = npy.linalg.norm(b)\n return dot_product / (norm_a * norm_b)\n\n def my_distance(self, vec1):\n return 1 - self.cosine_similarity(vec1, self.my_vec)\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport luigi\nfrom luigi import *\n#from luigi import Task\nimport pandas as pd\nfrom pset.tasks.embeddings.load_embeding import EmbedStudentData\nfrom pset.tasks.data.load_dataset import HashedStudentData\nimport numpy as npy\nimport pickle\nimport os\n\nclass NearestStudents(Task):\n\n github_id = Parameter(default='b280302a', description='Github id to search nearby (not hashed)')\n n = IntParameter(default=5, description='Output top N')\n farthest = BoolParameter(default=False, description='Find farthest instead')\n \n def output(self):\n return luigi.LocalTarget(\"/Users/adcxdpf/Downloads/pset_03/sd.csv\")\n \n\n def requires(self):\n return {\n 'data': HashedStudentData(path='/Users/adcxdpf/Downloads/pset_03/pset/tasks/data'),\n 'embedStudentData': EmbedStudentData(path='/Users/adcxdpf/Downloads/pset_03/pset/tasks/data')\n }\n #return self.clone(EmbedStudentData)\n\n\n def run(self):\n \n vectors_lookup_bytes = (self.input()['embedStudentData'].open(mode='rb'))\n vectors_lookup = pickle.load(vectors_lookup_bytes)\n\n vecs_list = pd.Series(vectors_lookup)\n vectors_df = pd.DataFrame(vectors_lookup, index=vecs_list.index)\n vectors_df.columns = ['vectors']\n print('##### vectors_df : ', vectors_df)\n print(\" vectors_df shape is :: \" , vectors_df.shape)\n \n print(\"github_id param : \" , self.github_id)\n \n pd_xls_data = pd.read_excel(self.input()['data'].path,0) \n idx = pd_xls_data.index[pd_xls_data['hashed_id']== self.github_id]\n #print ('######## idx.values ######### ', idx.values)\n \n my_vec = vectors_df.iloc[[idx.values[0]]]\n self.my_vec = (my_vec.values[0][0])\n \n print (\"my_vec : \" , self.my_vec)\n print(\" my_vec shape is :: \" , self.my_vec.shape)\n \n distances = vectors_df['vectors'].apply(self.my_distance)\n \n sortedDistance= distances.sort_values()\n print('###### sortedDistance : ', sortedDistance)\n \n # output data\n f = self.output().open('w')\n sortedDistance.str[0].to_csv(f)\n #df.to_csv(f, sep='\\t', encoding='utf-8', index=None)\n f.close() \n \n nearDis= sortedDistance.head(self.n).index\n print (\"******** Nearest**********\")\n for index in nearDis: \n print(pd_xls_data.iloc[index]) \n \n farDis = sortedDistance.tail(5).index\n print (\"******** Farthest**********\")\n for index in farDis: \n print(pd_xls_data.iloc[index]) \n \n\n\n def cosine_similarity(self,a, b):\n # \"\"\"Takes 2 vectors a, b and returns the cosine similarity according \n # to the definition of the dot product\n # \"\"\"\n # dot_product = npy.dot(a, b)\n # norm_a = npy.linalg.norm(a)\n # norm_b = npy.linalg.norm(b)\n # return dot_product / (norm_a * norm_b)\n \n \n dot_product = npy.dot(a[0], b.T)\n norm_a = npy.linalg.norm(a)\n norm_b = npy.linalg.norm(b)\n \n return dot_product / (norm_a * norm_b)\n \n\n def my_distance(self,vec1):\n \n return 1 - self.cosine_similarity(vec1, self.my_vec)\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import django
from rest_framework import serializers
from django.shortcuts import render
from .models import Student
from .serializiers import StudentSerializer
from rest_framework.renderers import JSONRenderer
from django.http import HttpResponse,JsonResponse
import io
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer
from django.views.decorators.csrf import csrf_exempt
# Single Model object.
def student_detail(request,pk):
#Student model object
stu = Student.objects.get(id=pk)
#Serializers convert student model object to python dictionary
serializers = StudentSerializer(stu)
#JSONRenderer convert student python dictionary to json object
# json_data = JSONRenderer().render(serializers.data)
# return HttpResponse(json_data,content_type='application/json')
#use simply to reduce the extra line of code
return JsonResponse(serializers.data)
def student_list(request):
#Student model object
stu = Student.objects.all()
#Serializers convert student model object to python dictionary
serializers = StudentSerializer(stu,many=True)
#JSONRenderer convert student python dictionary to json object
# json_data = JSONRenderer().render(serializers.data)
# return HttpResponse(json_data,content_type='application/json')
return JsonResponse(serializers.data,safe=False)
@csrf_exempt
def create(request):
if request.method=='POST':
json_data = request.body
stream = io.BytesIO(json_data)
pythondata = JSONParser().parse(stream)
serializer = StudentSerializer(data=pythondata)
if serializer.is_valid():
serializer.save()
res = {'msg':'data inserted','code':200}
json_data = JSONRenderer().render(res)
return HttpResponse(json_data)
else:
json_data = JSONRenderer().render(serializer.errors)
return HttpResponse(json_data)
|
normal
|
{
"blob_id": "99785ffb4b594db1fac05ca3d3f5764151b2b7b6",
"index": 103,
"step-1": "<mask token>\n\n\n@csrf_exempt\ndef create(request):\n if request.method == 'POST':\n json_data = request.body\n stream = io.BytesIO(json_data)\n pythondata = JSONParser().parse(stream)\n serializer = StudentSerializer(data=pythondata)\n if serializer.is_valid():\n serializer.save()\n res = {'msg': 'data inserted', 'code': 200}\n json_data = JSONRenderer().render(res)\n return HttpResponse(json_data)\n else:\n json_data = JSONRenderer().render(serializer.errors)\n return HttpResponse(json_data)\n",
"step-2": "<mask token>\n\n\ndef student_detail(request, pk):\n stu = Student.objects.get(id=pk)\n serializers = StudentSerializer(stu)\n return JsonResponse(serializers.data)\n\n\n<mask token>\n\n\n@csrf_exempt\ndef create(request):\n if request.method == 'POST':\n json_data = request.body\n stream = io.BytesIO(json_data)\n pythondata = JSONParser().parse(stream)\n serializer = StudentSerializer(data=pythondata)\n if serializer.is_valid():\n serializer.save()\n res = {'msg': 'data inserted', 'code': 200}\n json_data = JSONRenderer().render(res)\n return HttpResponse(json_data)\n else:\n json_data = JSONRenderer().render(serializer.errors)\n return HttpResponse(json_data)\n",
"step-3": "<mask token>\n\n\ndef student_detail(request, pk):\n stu = Student.objects.get(id=pk)\n serializers = StudentSerializer(stu)\n return JsonResponse(serializers.data)\n\n\ndef student_list(request):\n stu = Student.objects.all()\n serializers = StudentSerializer(stu, many=True)\n return JsonResponse(serializers.data, safe=False)\n\n\n@csrf_exempt\ndef create(request):\n if request.method == 'POST':\n json_data = request.body\n stream = io.BytesIO(json_data)\n pythondata = JSONParser().parse(stream)\n serializer = StudentSerializer(data=pythondata)\n if serializer.is_valid():\n serializer.save()\n res = {'msg': 'data inserted', 'code': 200}\n json_data = JSONRenderer().render(res)\n return HttpResponse(json_data)\n else:\n json_data = JSONRenderer().render(serializer.errors)\n return HttpResponse(json_data)\n",
"step-4": "import django\nfrom rest_framework import serializers\nfrom django.shortcuts import render\nfrom .models import Student\nfrom .serializiers import StudentSerializer\nfrom rest_framework.renderers import JSONRenderer\nfrom django.http import HttpResponse, JsonResponse\nimport io\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework.renderers import JSONRenderer\nfrom django.views.decorators.csrf import csrf_exempt\n\n\ndef student_detail(request, pk):\n stu = Student.objects.get(id=pk)\n serializers = StudentSerializer(stu)\n return JsonResponse(serializers.data)\n\n\ndef student_list(request):\n stu = Student.objects.all()\n serializers = StudentSerializer(stu, many=True)\n return JsonResponse(serializers.data, safe=False)\n\n\n@csrf_exempt\ndef create(request):\n if request.method == 'POST':\n json_data = request.body\n stream = io.BytesIO(json_data)\n pythondata = JSONParser().parse(stream)\n serializer = StudentSerializer(data=pythondata)\n if serializer.is_valid():\n serializer.save()\n res = {'msg': 'data inserted', 'code': 200}\n json_data = JSONRenderer().render(res)\n return HttpResponse(json_data)\n else:\n json_data = JSONRenderer().render(serializer.errors)\n return HttpResponse(json_data)\n",
"step-5": "import django\nfrom rest_framework import serializers\nfrom django.shortcuts import render\nfrom .models import Student\nfrom .serializiers import StudentSerializer\nfrom rest_framework.renderers import JSONRenderer\nfrom django.http import HttpResponse,JsonResponse\n\nimport io\nfrom rest_framework.parsers import JSONParser \nfrom rest_framework.renderers import JSONRenderer\nfrom django.views.decorators.csrf import csrf_exempt\n\n# Single Model object.\n\n\n\ndef student_detail(request,pk):\n \n #Student model object\n stu = Student.objects.get(id=pk) \n #Serializers convert student model object to python dictionary\n serializers = StudentSerializer(stu)\n #JSONRenderer convert student python dictionary to json object\n # json_data = JSONRenderer().render(serializers.data)\n\n # return HttpResponse(json_data,content_type='application/json')\n\n #use simply to reduce the extra line of code\n return JsonResponse(serializers.data)\n\ndef student_list(request):\n\n #Student model object\n stu = Student.objects.all()\n #Serializers convert student model object to python dictionary\n serializers = StudentSerializer(stu,many=True)\n #JSONRenderer convert student python dictionary to json object\n # json_data = JSONRenderer().render(serializers.data)\n\n # return HttpResponse(json_data,content_type='application/json')\n return JsonResponse(serializers.data,safe=False)\n\n@csrf_exempt\ndef create(request):\n if request.method=='POST':\n json_data = request.body\n stream = io.BytesIO(json_data)\n pythondata = JSONParser().parse(stream)\n serializer = StudentSerializer(data=pythondata)\n if serializer.is_valid():\n serializer.save()\n res = {'msg':'data inserted','code':200}\n json_data = JSONRenderer().render(res)\n return HttpResponse(json_data)\n else:\n json_data = JSONRenderer().render(serializer.errors)\n return HttpResponse(json_data)\n ",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AbstractGraphGenerator(object):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AbstractGraphGenerator(object):
def generate(self, graph):
Util.abstract()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from apgl.util.Util import Util
class AbstractGraphGenerator(object):
def generate(self, graph):
Util.abstract()
<|reserved_special_token_1|>
'''
Created on 3 Jul 2009
@author: charanpal
An abstract base class which represents a graph generator. The graph generator
takes an existing empty graph and produces edges over it.
'''
from apgl.util.Util import Util
class AbstractGraphGenerator(object):
def generate(self, graph):
Util.abstract()
|
flexible
|
{
"blob_id": "e37e468d8a41b8711fb0eb4ddec7db67691f9156",
"index": 488,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass AbstractGraphGenerator(object):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass AbstractGraphGenerator(object):\n\n def generate(self, graph):\n Util.abstract()\n",
"step-4": "<mask token>\nfrom apgl.util.Util import Util\n\n\nclass AbstractGraphGenerator(object):\n\n def generate(self, graph):\n Util.abstract()\n",
"step-5": "'''\r\nCreated on 3 Jul 2009\r\n\r\n@author: charanpal\r\n\r\nAn abstract base class which represents a graph generator. The graph generator\r\ntakes an existing empty graph and produces edges over it. \r\n'''\r\nfrom apgl.util.Util import Util\r\n\r\nclass AbstractGraphGenerator(object):\r\n def generate(self, graph):\r\n Util.abstract() ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def get_new_pay_records(process_at, limit=200):
with zeus_session() as session:
result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.
restaurant_id, SubsidyProcessRecord.card_id,
SubsidyProcessRecord.processed_at, SubsidyPayRecord.status
).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord.
pay_record_id == SubsidyPayRecord.id).filter(SubsidyPayRecord.
id > process_at).filter(SubsidyProcessRecord.status !=
SubsidyProcessRecord.STATUS_FAIL).order_by(SubsidyPayRecord.id.
asc()).limit(limit).all()
return result
<|reserved_special_token_0|>
def get_activity_stats(pay_record_id):
with zeus_session() as session:
results = session.query(ActivityStats.activity_id, ActivityStats.
activity_category_id, func.sum(ActivityStats.total_subsidy),
func.min(ActivityStats.date), func.max(ActivityStats.date),
func.sum(ActivityStats.quantity)).group_by(ActivityStats.
restaurant_id, ActivityStats.activity_id, ActivityStats.
activity_category_id).filter(ActivityStats.pay_record_id ==
pay_record_id).filter(ActivityStats.status == ActivityStats.
STATUS_PAY_SUCCESS).all()
return results
def get_success_record_ids_by_restaurant(restaurant_id, activity_id=None,
activity_category_id=None):
with zeus_session() as session:
query = session.query(SubsidyPayRecord.id).filter(SubsidyPayRecord.
restaurant_id == restaurant_id).filter(SubsidyPayRecord.status ==
SubsidyPayRecord.STATUS_SUCCESS)
if activity_id is not None:
query.filter(SubsidyPayRecord.activity_id == activity_id)
if activity_category_id is not None:
query.filter(SubsidyPayRecord.activity_category_id ==
activity_category_id)
record_ids = query.all()
return [r[0] for r in record_ids]
<|reserved_special_token_0|>
@zeus_db_handler
def query_paylog_by_rst(restaurant_id, activity_id=None,
activity_category_id=None, offset=None, limit=None):
""" Except ActivityStats.STATUS_PENDING (未审核状态)
"""
q = session.query(ActivityStats.pay_record_id, ActivityStats.
activity_id, ActivityStats.activity_category_id, ActivityStats.
status, func.min(ActivityStats.date), func.max(ActivityStats.date),
func.sum(ActivityStats.quantity), func.sum(ActivityStats.
total_subsidy), SubsidyPayRecord.created_at, func.max(
SubsidyProcessRecord.id)).group_by(ActivityStats.pay_record_id,
ActivityStats.activity_id, ActivityStats.activity_category_id
).outerjoin(SubsidyPayRecord, SubsidyPayRecord.id == ActivityStats.
pay_record_id).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord
.pay_record_id == SubsidyPayRecord.id).filter(ActivityStats.
restaurant_id == restaurant_id).filter(ActivityStats.status.in_(
PAYLOG_STATUS_LIST)).order_by(SubsidyPayRecord.created_at.desc())
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id
)
if limit is not None:
q = q.limit(min(limit, MAX_LIST_SIZE))
else:
q = q.limit(DEFAULT_LIST_SIZE)
if offset is not None:
q = q.offset(offset)
return q
@zeus_db_handler
def query_pay_records(restaurant_id, offset=None, limit=None):
q = session.query(SubsidyPayRecord).filter(SubsidyPayRecord.
restaurant_id == restaurant_id).order_by(SubsidyPayRecord.
created_at.desc())
if limit is not None:
q = q.limit(min(limit, MAX_LIST_SIZE))
else:
q = q.limit(DEFAULT_LIST_SIZE)
if offset is not None:
q = q.offset(offset)
return q.all()
@zeus_db_handler
def query_paylog(pay_record_ids, activity_id=None, activity_category_id=
None, offset=None, limit=None):
q = session.query(ActivityStats.pay_record_id, ActivityStats.
activity_id, ActivityStats.activity_category_id, ActivityStats.
status, func.min(ActivityStats.date), func.max(ActivityStats.date),
func.sum(ActivityStats.quantity), func.sum(ActivityStats.total_subsidy)
).group_by(ActivityStats.pay_record_id, ActivityStats.activity_id,
ActivityStats.activity_category_id).filter(ActivityStats.
pay_record_id.in_(pay_record_ids)).filter(ActivityStats.status.in_(
PAYLOG_STATUS_LIST)).order_by(ActivityStats.created_at.desc())
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id
)
if limit is not None:
q = q.limit(min(limit, MAX_LIST_SIZE))
else:
q = q.limit(DEFAULT_LIST_SIZE)
if offset is not None:
q = q.offset(offset)
return q
<|reserved_special_token_0|>
@zeus_db_handler
def count_paylog_by_rst(restaurant_id, activity_id=None,
activity_category_id=None):
""" Except ActivityStats.STATUS_PENDING (未审核状态)
"""
q = session.query(ActivityStats.id).group_by(ActivityStats.
pay_record_id, ActivityStats.activity_id, ActivityStats.
activity_category_id).filter(ActivityStats.restaurant_id ==
restaurant_id).filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST))
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id
)
return len(q.all())
<|reserved_special_token_0|>
@zeus_db_handler
def get_subsidy_record_process_time(record_ids, status):
return session.query(SubsidyProcessRecord.pay_record_id,
SubsidyProcessRecord.processed_at).filter(SubsidyProcessRecord.
pay_record_id.in_(record_ids)).filter(SubsidyProcessRecord.status ==
status).all()
<|reserved_special_token_0|>
def query_sms_send_info(start_time=None, end_time=None, phone=None,
restaurant_id=None, card_num_tail=None, status=None):
with walis_session() as session:
query = session.query(NoticeRecord)
if phone:
query = query.filter(NoticeRecord.phone == phone)
if restaurant_id:
query = query.filter(NoticeRecord.restaurant_id == restaurant_id)
if card_num_tail:
query = query.filter(NoticeRecord.card_num_tail == card_num_tail)
if status:
query = query.filter(NoticeRecord.status == status)
if not start_time:
start_time = get_today_begin_time()
if not end_time:
end_time = get_today_end_time()
query = query.filter(NoticeRecord.created_at > start_time).filter(
NoticeRecord.created_at < end_time)
return query.all()
def query_sms_send_count(start_time=None, end_time=None, status=None):
with walis_session() as session:
if not start_time:
start_time = get_today_begin_time()
if not end_time:
end_time = get_today_end_time()
query = session.query(func.count(NoticeRecord.record_id)).filter(
NoticeRecord.created_at > start_time).filter(NoticeRecord.
created_at < end_time)
if status is not None:
query = query.filter(NoticeRecord.status == status)
return query.scalar()
<|reserved_special_token_0|>
def _query_activity_stats(q, city_ids=None, restaurant_ids=None,
activity_id=None, activity_category_id=None, from_date=None, to_date=
None, statuses=None, with_subsidy=None, offset=None, limit=None):
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id
)
if city_ids is not None:
q = q.filter(ActivityStats.city_id.in_(city_ids))
if restaurant_ids is not None:
q = q.filter(ActivityStats.restaurant_id.in_(restaurant_ids))
if from_date is not None:
q = q.filter(ActivityStats.date >= from_date)
if to_date is not None:
q = q.filter(ActivityStats.date <= to_date)
if statuses is not None:
q = q.filter(ActivityStats.status.in_(statuses))
if with_subsidy is not None:
if with_subsidy:
q = q.filter(ActivityStats.total_subsidy > 0)
else:
q = q.filter(ActivityStats.total_subsidy == 0)
if offset is not None:
q = q.offset(offset)
q = q.limit(1000)
return q
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_new_pay_records(process_at, limit=200):
with zeus_session() as session:
result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.
restaurant_id, SubsidyProcessRecord.card_id,
SubsidyProcessRecord.processed_at, SubsidyPayRecord.status
).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord.
pay_record_id == SubsidyPayRecord.id).filter(SubsidyPayRecord.
id > process_at).filter(SubsidyProcessRecord.status !=
SubsidyProcessRecord.STATUS_FAIL).order_by(SubsidyPayRecord.id.
asc()).limit(limit).all()
return result
<|reserved_special_token_0|>
def get_activity_stats(pay_record_id):
with zeus_session() as session:
results = session.query(ActivityStats.activity_id, ActivityStats.
activity_category_id, func.sum(ActivityStats.total_subsidy),
func.min(ActivityStats.date), func.max(ActivityStats.date),
func.sum(ActivityStats.quantity)).group_by(ActivityStats.
restaurant_id, ActivityStats.activity_id, ActivityStats.
activity_category_id).filter(ActivityStats.pay_record_id ==
pay_record_id).filter(ActivityStats.status == ActivityStats.
STATUS_PAY_SUCCESS).all()
return results
def get_success_record_ids_by_restaurant(restaurant_id, activity_id=None,
activity_category_id=None):
with zeus_session() as session:
query = session.query(SubsidyPayRecord.id).filter(SubsidyPayRecord.
restaurant_id == restaurant_id).filter(SubsidyPayRecord.status ==
SubsidyPayRecord.STATUS_SUCCESS)
if activity_id is not None:
query.filter(SubsidyPayRecord.activity_id == activity_id)
if activity_category_id is not None:
query.filter(SubsidyPayRecord.activity_category_id ==
activity_category_id)
record_ids = query.all()
return [r[0] for r in record_ids]
<|reserved_special_token_0|>
@zeus_db_handler
def query_paylog_by_rst(restaurant_id, activity_id=None,
activity_category_id=None, offset=None, limit=None):
""" Except ActivityStats.STATUS_PENDING (未审核状态)
"""
q = session.query(ActivityStats.pay_record_id, ActivityStats.
activity_id, ActivityStats.activity_category_id, ActivityStats.
status, func.min(ActivityStats.date), func.max(ActivityStats.date),
func.sum(ActivityStats.quantity), func.sum(ActivityStats.
total_subsidy), SubsidyPayRecord.created_at, func.max(
SubsidyProcessRecord.id)).group_by(ActivityStats.pay_record_id,
ActivityStats.activity_id, ActivityStats.activity_category_id
).outerjoin(SubsidyPayRecord, SubsidyPayRecord.id == ActivityStats.
pay_record_id).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord
.pay_record_id == SubsidyPayRecord.id).filter(ActivityStats.
restaurant_id == restaurant_id).filter(ActivityStats.status.in_(
PAYLOG_STATUS_LIST)).order_by(SubsidyPayRecord.created_at.desc())
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id
)
if limit is not None:
q = q.limit(min(limit, MAX_LIST_SIZE))
else:
q = q.limit(DEFAULT_LIST_SIZE)
if offset is not None:
q = q.offset(offset)
return q
@zeus_db_handler
def query_pay_records(restaurant_id, offset=None, limit=None):
q = session.query(SubsidyPayRecord).filter(SubsidyPayRecord.
restaurant_id == restaurant_id).order_by(SubsidyPayRecord.
created_at.desc())
if limit is not None:
q = q.limit(min(limit, MAX_LIST_SIZE))
else:
q = q.limit(DEFAULT_LIST_SIZE)
if offset is not None:
q = q.offset(offset)
return q.all()
@zeus_db_handler
def query_paylog(pay_record_ids, activity_id=None, activity_category_id=
None, offset=None, limit=None):
q = session.query(ActivityStats.pay_record_id, ActivityStats.
activity_id, ActivityStats.activity_category_id, ActivityStats.
status, func.min(ActivityStats.date), func.max(ActivityStats.date),
func.sum(ActivityStats.quantity), func.sum(ActivityStats.total_subsidy)
).group_by(ActivityStats.pay_record_id, ActivityStats.activity_id,
ActivityStats.activity_category_id).filter(ActivityStats.
pay_record_id.in_(pay_record_ids)).filter(ActivityStats.status.in_(
PAYLOG_STATUS_LIST)).order_by(ActivityStats.created_at.desc())
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id
)
if limit is not None:
q = q.limit(min(limit, MAX_LIST_SIZE))
else:
q = q.limit(DEFAULT_LIST_SIZE)
if offset is not None:
q = q.offset(offset)
return q
<|reserved_special_token_0|>
@zeus_db_handler
def count_paylog_by_rst(restaurant_id, activity_id=None,
activity_category_id=None):
""" Except ActivityStats.STATUS_PENDING (未审核状态)
"""
q = session.query(ActivityStats.id).group_by(ActivityStats.
pay_record_id, ActivityStats.activity_id, ActivityStats.
activity_category_id).filter(ActivityStats.restaurant_id ==
restaurant_id).filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST))
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id
)
return len(q.all())
<|reserved_special_token_0|>
@zeus_db_handler
def get_subsidy_record_process_time(record_ids, status):
return session.query(SubsidyProcessRecord.pay_record_id,
SubsidyProcessRecord.processed_at).filter(SubsidyProcessRecord.
pay_record_id.in_(record_ids)).filter(SubsidyProcessRecord.status ==
status).all()
def get_pay_activities_by_restaurant(rst_id):
with zeus_session() as session:
query = session.query(ActivityStats.activity_id, ActivityStats.
activity_category_id).group_by(ActivityStats.activity_id,
ActivityStats.activity_category_id).filter(ActivityStats.
restaurant_id == rst_id)
return query.all()
def query_sms_send_info(start_time=None, end_time=None, phone=None,
restaurant_id=None, card_num_tail=None, status=None):
with walis_session() as session:
query = session.query(NoticeRecord)
if phone:
query = query.filter(NoticeRecord.phone == phone)
if restaurant_id:
query = query.filter(NoticeRecord.restaurant_id == restaurant_id)
if card_num_tail:
query = query.filter(NoticeRecord.card_num_tail == card_num_tail)
if status:
query = query.filter(NoticeRecord.status == status)
if not start_time:
start_time = get_today_begin_time()
if not end_time:
end_time = get_today_end_time()
query = query.filter(NoticeRecord.created_at > start_time).filter(
NoticeRecord.created_at < end_time)
return query.all()
def query_sms_send_count(start_time=None, end_time=None, status=None):
with walis_session() as session:
if not start_time:
start_time = get_today_begin_time()
if not end_time:
end_time = get_today_end_time()
query = session.query(func.count(NoticeRecord.record_id)).filter(
NoticeRecord.created_at > start_time).filter(NoticeRecord.
created_at < end_time)
if status is not None:
query = query.filter(NoticeRecord.status == status)
return query.scalar()
@zeus_db_handler
def query_auto_pay_activity_stats_result(city_ids=None, restaurant_ids=None,
activity_id=None, activity_category_id=None, from_date=None, to_date=
None, statuses=None, offset=None, limit=None, with_subsidy=None):
q = session.query(ActivityStats.restaurant_id, ActivityStats.
activity_id, ActivityStats.activity_category_id, func.sum(
ActivityStats.quantity), func.sum(ActivityStats.total_subsidy),
func.min(ActivityStats.date), func.max(ActivityStats.date)).group_by(
ActivityStats.restaurant_id, ActivityStats.activity_id,
ActivityStats.activity_category_id).order_by(ActivityStats.
restaurant_id.desc())
return _query_activity_stats(q, city_ids, restaurant_ids, activity_id,
activity_category_id, from_date, to_date, statuses, with_subsidy,
offset, limit)
def _query_activity_stats(q, city_ids=None, restaurant_ids=None,
activity_id=None, activity_category_id=None, from_date=None, to_date=
None, statuses=None, with_subsidy=None, offset=None, limit=None):
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id
)
if city_ids is not None:
q = q.filter(ActivityStats.city_id.in_(city_ids))
if restaurant_ids is not None:
q = q.filter(ActivityStats.restaurant_id.in_(restaurant_ids))
if from_date is not None:
q = q.filter(ActivityStats.date >= from_date)
if to_date is not None:
q = q.filter(ActivityStats.date <= to_date)
if statuses is not None:
q = q.filter(ActivityStats.status.in_(statuses))
if with_subsidy is not None:
if with_subsidy:
q = q.filter(ActivityStats.total_subsidy > 0)
else:
q = q.filter(ActivityStats.total_subsidy == 0)
if offset is not None:
q = q.offset(offset)
q = q.limit(1000)
return q
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_new_pay_records(process_at, limit=200):
with zeus_session() as session:
result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.
restaurant_id, SubsidyProcessRecord.card_id,
SubsidyProcessRecord.processed_at, SubsidyPayRecord.status
).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord.
pay_record_id == SubsidyPayRecord.id).filter(SubsidyPayRecord.
id > process_at).filter(SubsidyProcessRecord.status !=
SubsidyProcessRecord.STATUS_FAIL).order_by(SubsidyPayRecord.id.
asc()).limit(limit).all()
return result
def get_success_pay_records(record_ids):
with zeus_session() as session:
result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.
restaurant_id, SubsidyProcessRecord.card_id,
SubsidyProcessRecord.processed_at).outerjoin(SubsidyProcessRecord,
SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id).filter(
SubsidyPayRecord.status == SubsidyPayRecord.STATUS_SUCCESS).filter(
SubsidyProcessRecord.status != SubsidyProcessRecord.STATUS_FAIL
).filter(SubsidyPayRecord.id.in_(record_ids)).all()
return result
def get_activity_stats(pay_record_id):
with zeus_session() as session:
results = session.query(ActivityStats.activity_id, ActivityStats.
activity_category_id, func.sum(ActivityStats.total_subsidy),
func.min(ActivityStats.date), func.max(ActivityStats.date),
func.sum(ActivityStats.quantity)).group_by(ActivityStats.
restaurant_id, ActivityStats.activity_id, ActivityStats.
activity_category_id).filter(ActivityStats.pay_record_id ==
pay_record_id).filter(ActivityStats.status == ActivityStats.
STATUS_PAY_SUCCESS).all()
return results
def get_success_record_ids_by_restaurant(restaurant_id, activity_id=None,
activity_category_id=None):
with zeus_session() as session:
query = session.query(SubsidyPayRecord.id).filter(SubsidyPayRecord.
restaurant_id == restaurant_id).filter(SubsidyPayRecord.status ==
SubsidyPayRecord.STATUS_SUCCESS)
if activity_id is not None:
query.filter(SubsidyPayRecord.activity_id == activity_id)
if activity_category_id is not None:
query.filter(SubsidyPayRecord.activity_category_id ==
activity_category_id)
record_ids = query.all()
return [r[0] for r in record_ids]
<|reserved_special_token_0|>
@zeus_db_handler
def query_paylog_by_rst(restaurant_id, activity_id=None,
activity_category_id=None, offset=None, limit=None):
""" Except ActivityStats.STATUS_PENDING (未审核状态)
"""
q = session.query(ActivityStats.pay_record_id, ActivityStats.
activity_id, ActivityStats.activity_category_id, ActivityStats.
status, func.min(ActivityStats.date), func.max(ActivityStats.date),
func.sum(ActivityStats.quantity), func.sum(ActivityStats.
total_subsidy), SubsidyPayRecord.created_at, func.max(
SubsidyProcessRecord.id)).group_by(ActivityStats.pay_record_id,
ActivityStats.activity_id, ActivityStats.activity_category_id
).outerjoin(SubsidyPayRecord, SubsidyPayRecord.id == ActivityStats.
pay_record_id).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord
.pay_record_id == SubsidyPayRecord.id).filter(ActivityStats.
restaurant_id == restaurant_id).filter(ActivityStats.status.in_(
PAYLOG_STATUS_LIST)).order_by(SubsidyPayRecord.created_at.desc())
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id
)
if limit is not None:
q = q.limit(min(limit, MAX_LIST_SIZE))
else:
q = q.limit(DEFAULT_LIST_SIZE)
if offset is not None:
q = q.offset(offset)
return q
@zeus_db_handler
def query_pay_records(restaurant_id, offset=None, limit=None):
q = session.query(SubsidyPayRecord).filter(SubsidyPayRecord.
restaurant_id == restaurant_id).order_by(SubsidyPayRecord.
created_at.desc())
if limit is not None:
q = q.limit(min(limit, MAX_LIST_SIZE))
else:
q = q.limit(DEFAULT_LIST_SIZE)
if offset is not None:
q = q.offset(offset)
return q.all()
@zeus_db_handler
def query_paylog(pay_record_ids, activity_id=None, activity_category_id=
None, offset=None, limit=None):
q = session.query(ActivityStats.pay_record_id, ActivityStats.
activity_id, ActivityStats.activity_category_id, ActivityStats.
status, func.min(ActivityStats.date), func.max(ActivityStats.date),
func.sum(ActivityStats.quantity), func.sum(ActivityStats.total_subsidy)
).group_by(ActivityStats.pay_record_id, ActivityStats.activity_id,
ActivityStats.activity_category_id).filter(ActivityStats.
pay_record_id.in_(pay_record_ids)).filter(ActivityStats.status.in_(
PAYLOG_STATUS_LIST)).order_by(ActivityStats.created_at.desc())
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id
)
if limit is not None:
q = q.limit(min(limit, MAX_LIST_SIZE))
else:
q = q.limit(DEFAULT_LIST_SIZE)
if offset is not None:
q = q.offset(offset)
return q
<|reserved_special_token_0|>
@zeus_db_handler
def count_paylog_by_rst(restaurant_id, activity_id=None,
activity_category_id=None):
""" Except ActivityStats.STATUS_PENDING (未审核状态)
"""
q = session.query(ActivityStats.id).group_by(ActivityStats.
pay_record_id, ActivityStats.activity_id, ActivityStats.
activity_category_id).filter(ActivityStats.restaurant_id ==
restaurant_id).filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST))
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id
)
return len(q.all())
<|reserved_special_token_0|>
@zeus_db_handler
def get_subsidy_record_process_time(record_ids, status):
return session.query(SubsidyProcessRecord.pay_record_id,
SubsidyProcessRecord.processed_at).filter(SubsidyProcessRecord.
pay_record_id.in_(record_ids)).filter(SubsidyProcessRecord.status ==
status).all()
def get_pay_activities_by_restaurant(rst_id):
with zeus_session() as session:
query = session.query(ActivityStats.activity_id, ActivityStats.
activity_category_id).group_by(ActivityStats.activity_id,
ActivityStats.activity_category_id).filter(ActivityStats.
restaurant_id == rst_id)
return query.all()
def query_sms_send_info(start_time=None, end_time=None, phone=None,
restaurant_id=None, card_num_tail=None, status=None):
with walis_session() as session:
query = session.query(NoticeRecord)
if phone:
query = query.filter(NoticeRecord.phone == phone)
if restaurant_id:
query = query.filter(NoticeRecord.restaurant_id == restaurant_id)
if card_num_tail:
query = query.filter(NoticeRecord.card_num_tail == card_num_tail)
if status:
query = query.filter(NoticeRecord.status == status)
if not start_time:
start_time = get_today_begin_time()
if not end_time:
end_time = get_today_end_time()
query = query.filter(NoticeRecord.created_at > start_time).filter(
NoticeRecord.created_at < end_time)
return query.all()
def query_sms_send_count(start_time=None, end_time=None, status=None):
with walis_session() as session:
if not start_time:
start_time = get_today_begin_time()
if not end_time:
end_time = get_today_end_time()
query = session.query(func.count(NoticeRecord.record_id)).filter(
NoticeRecord.created_at > start_time).filter(NoticeRecord.
created_at < end_time)
if status is not None:
query = query.filter(NoticeRecord.status == status)
return query.scalar()
@zeus_db_handler
def query_auto_pay_activity_stats_result(city_ids=None, restaurant_ids=None,
activity_id=None, activity_category_id=None, from_date=None, to_date=
None, statuses=None, offset=None, limit=None, with_subsidy=None):
q = session.query(ActivityStats.restaurant_id, ActivityStats.
activity_id, ActivityStats.activity_category_id, func.sum(
ActivityStats.quantity), func.sum(ActivityStats.total_subsidy),
func.min(ActivityStats.date), func.max(ActivityStats.date)).group_by(
ActivityStats.restaurant_id, ActivityStats.activity_id,
ActivityStats.activity_category_id).order_by(ActivityStats.
restaurant_id.desc())
return _query_activity_stats(q, city_ids, restaurant_ids, activity_id,
activity_category_id, from_date, to_date, statuses, with_subsidy,
offset, limit)
def _query_activity_stats(q, city_ids=None, restaurant_ids=None,
activity_id=None, activity_category_id=None, from_date=None, to_date=
None, statuses=None, with_subsidy=None, offset=None, limit=None):
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id
)
if city_ids is not None:
q = q.filter(ActivityStats.city_id.in_(city_ids))
if restaurant_ids is not None:
q = q.filter(ActivityStats.restaurant_id.in_(restaurant_ids))
if from_date is not None:
q = q.filter(ActivityStats.date >= from_date)
if to_date is not None:
q = q.filter(ActivityStats.date <= to_date)
if statuses is not None:
q = q.filter(ActivityStats.status.in_(statuses))
if with_subsidy is not None:
if with_subsidy:
q = q.filter(ActivityStats.total_subsidy > 0)
else:
q = q.filter(ActivityStats.total_subsidy == 0)
if offset is not None:
q = q.offset(offset)
q = q.limit(1000)
return q
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_new_pay_records(process_at, limit=200):
with zeus_session() as session:
result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.
restaurant_id, SubsidyProcessRecord.card_id,
SubsidyProcessRecord.processed_at, SubsidyPayRecord.status
).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord.
pay_record_id == SubsidyPayRecord.id).filter(SubsidyPayRecord.
id > process_at).filter(SubsidyProcessRecord.status !=
SubsidyProcessRecord.STATUS_FAIL).order_by(SubsidyPayRecord.id.
asc()).limit(limit).all()
return result
def get_success_pay_records(record_ids):
with zeus_session() as session:
result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.
restaurant_id, SubsidyProcessRecord.card_id,
SubsidyProcessRecord.processed_at).outerjoin(SubsidyProcessRecord,
SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id).filter(
SubsidyPayRecord.status == SubsidyPayRecord.STATUS_SUCCESS).filter(
SubsidyProcessRecord.status != SubsidyProcessRecord.STATUS_FAIL
).filter(SubsidyPayRecord.id.in_(record_ids)).all()
return result
def get_activity_stats(pay_record_id):
with zeus_session() as session:
results = session.query(ActivityStats.activity_id, ActivityStats.
activity_category_id, func.sum(ActivityStats.total_subsidy),
func.min(ActivityStats.date), func.max(ActivityStats.date),
func.sum(ActivityStats.quantity)).group_by(ActivityStats.
restaurant_id, ActivityStats.activity_id, ActivityStats.
activity_category_id).filter(ActivityStats.pay_record_id ==
pay_record_id).filter(ActivityStats.status == ActivityStats.
STATUS_PAY_SUCCESS).all()
return results
def get_success_record_ids_by_restaurant(restaurant_id, activity_id=None,
activity_category_id=None):
with zeus_session() as session:
query = session.query(SubsidyPayRecord.id).filter(SubsidyPayRecord.
restaurant_id == restaurant_id).filter(SubsidyPayRecord.status ==
SubsidyPayRecord.STATUS_SUCCESS)
if activity_id is not None:
query.filter(SubsidyPayRecord.activity_id == activity_id)
if activity_category_id is not None:
query.filter(SubsidyPayRecord.activity_category_id ==
activity_category_id)
record_ids = query.all()
return [r[0] for r in record_ids]
<|reserved_special_token_0|>
@zeus_db_handler
def query_paylog_by_rst(restaurant_id, activity_id=None,
activity_category_id=None, offset=None, limit=None):
""" Except ActivityStats.STATUS_PENDING (未审核状态)
"""
q = session.query(ActivityStats.pay_record_id, ActivityStats.
activity_id, ActivityStats.activity_category_id, ActivityStats.
status, func.min(ActivityStats.date), func.max(ActivityStats.date),
func.sum(ActivityStats.quantity), func.sum(ActivityStats.
total_subsidy), SubsidyPayRecord.created_at, func.max(
SubsidyProcessRecord.id)).group_by(ActivityStats.pay_record_id,
ActivityStats.activity_id, ActivityStats.activity_category_id
).outerjoin(SubsidyPayRecord, SubsidyPayRecord.id == ActivityStats.
pay_record_id).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord
.pay_record_id == SubsidyPayRecord.id).filter(ActivityStats.
restaurant_id == restaurant_id).filter(ActivityStats.status.in_(
PAYLOG_STATUS_LIST)).order_by(SubsidyPayRecord.created_at.desc())
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id
)
if limit is not None:
q = q.limit(min(limit, MAX_LIST_SIZE))
else:
q = q.limit(DEFAULT_LIST_SIZE)
if offset is not None:
q = q.offset(offset)
return q
@zeus_db_handler
def query_pay_records(restaurant_id, offset=None, limit=None):
q = session.query(SubsidyPayRecord).filter(SubsidyPayRecord.
restaurant_id == restaurant_id).order_by(SubsidyPayRecord.
created_at.desc())
if limit is not None:
q = q.limit(min(limit, MAX_LIST_SIZE))
else:
q = q.limit(DEFAULT_LIST_SIZE)
if offset is not None:
q = q.offset(offset)
return q.all()
@zeus_db_handler
def query_paylog(pay_record_ids, activity_id=None, activity_category_id=
None, offset=None, limit=None):
q = session.query(ActivityStats.pay_record_id, ActivityStats.
activity_id, ActivityStats.activity_category_id, ActivityStats.
status, func.min(ActivityStats.date), func.max(ActivityStats.date),
func.sum(ActivityStats.quantity), func.sum(ActivityStats.total_subsidy)
).group_by(ActivityStats.pay_record_id, ActivityStats.activity_id,
ActivityStats.activity_category_id).filter(ActivityStats.
pay_record_id.in_(pay_record_ids)).filter(ActivityStats.status.in_(
PAYLOG_STATUS_LIST)).order_by(ActivityStats.created_at.desc())
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id
)
if limit is not None:
q = q.limit(min(limit, MAX_LIST_SIZE))
else:
q = q.limit(DEFAULT_LIST_SIZE)
if offset is not None:
q = q.offset(offset)
return q
@zeus_db_handler
def get_max_subsidy_process_record_ids(pay_record_ids):
q = session.query(func.max(SubsidyProcessRecord.id)).group_by(
SubsidyProcessRecord.pay_record_id).filter(SubsidyProcessRecord.
pay_record_id.in_(pay_record_ids))
return q
@zeus_db_handler
def count_paylog_by_rst(restaurant_id, activity_id=None,
activity_category_id=None):
""" Except ActivityStats.STATUS_PENDING (未审核状态)
"""
q = session.query(ActivityStats.id).group_by(ActivityStats.
pay_record_id, ActivityStats.activity_id, ActivityStats.
activity_category_id).filter(ActivityStats.restaurant_id ==
restaurant_id).filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST))
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id
)
return len(q.all())
@zeus_db_handler
def query_process_records_by_ids(process_ids):
query = session.query(SubsidyProcessRecord).filter(SubsidyProcessRecord
.id.in_(process_ids))
return query.all()
@zeus_db_handler
def get_subsidy_record_process_time(record_ids, status):
return session.query(SubsidyProcessRecord.pay_record_id,
SubsidyProcessRecord.processed_at).filter(SubsidyProcessRecord.
pay_record_id.in_(record_ids)).filter(SubsidyProcessRecord.status ==
status).all()
def get_pay_activities_by_restaurant(rst_id):
with zeus_session() as session:
query = session.query(ActivityStats.activity_id, ActivityStats.
activity_category_id).group_by(ActivityStats.activity_id,
ActivityStats.activity_category_id).filter(ActivityStats.
restaurant_id == rst_id)
return query.all()
def query_sms_send_info(start_time=None, end_time=None, phone=None,
restaurant_id=None, card_num_tail=None, status=None):
with walis_session() as session:
query = session.query(NoticeRecord)
if phone:
query = query.filter(NoticeRecord.phone == phone)
if restaurant_id:
query = query.filter(NoticeRecord.restaurant_id == restaurant_id)
if card_num_tail:
query = query.filter(NoticeRecord.card_num_tail == card_num_tail)
if status:
query = query.filter(NoticeRecord.status == status)
if not start_time:
start_time = get_today_begin_time()
if not end_time:
end_time = get_today_end_time()
query = query.filter(NoticeRecord.created_at > start_time).filter(
NoticeRecord.created_at < end_time)
return query.all()
def query_sms_send_count(start_time=None, end_time=None, status=None):
with walis_session() as session:
if not start_time:
start_time = get_today_begin_time()
if not end_time:
end_time = get_today_end_time()
query = session.query(func.count(NoticeRecord.record_id)).filter(
NoticeRecord.created_at > start_time).filter(NoticeRecord.
created_at < end_time)
if status is not None:
query = query.filter(NoticeRecord.status == status)
return query.scalar()
@zeus_db_handler
def query_auto_pay_activity_stats_result(city_ids=None, restaurant_ids=None,
activity_id=None, activity_category_id=None, from_date=None, to_date=
None, statuses=None, offset=None, limit=None, with_subsidy=None):
q = session.query(ActivityStats.restaurant_id, ActivityStats.
activity_id, ActivityStats.activity_category_id, func.sum(
ActivityStats.quantity), func.sum(ActivityStats.total_subsidy),
func.min(ActivityStats.date), func.max(ActivityStats.date)).group_by(
ActivityStats.restaurant_id, ActivityStats.activity_id,
ActivityStats.activity_category_id).order_by(ActivityStats.
restaurant_id.desc())
return _query_activity_stats(q, city_ids, restaurant_ids, activity_id,
activity_category_id, from_date, to_date, statuses, with_subsidy,
offset, limit)
def _query_activity_stats(q, city_ids=None, restaurant_ids=None,
activity_id=None, activity_category_id=None, from_date=None, to_date=
None, statuses=None, with_subsidy=None, offset=None, limit=None):
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id
)
if city_ids is not None:
q = q.filter(ActivityStats.city_id.in_(city_ids))
if restaurant_ids is not None:
q = q.filter(ActivityStats.restaurant_id.in_(restaurant_ids))
if from_date is not None:
q = q.filter(ActivityStats.date >= from_date)
if to_date is not None:
q = q.filter(ActivityStats.date <= to_date)
if statuses is not None:
q = q.filter(ActivityStats.status.in_(statuses))
if with_subsidy is not None:
if with_subsidy:
q = q.filter(ActivityStats.total_subsidy > 0)
else:
q = q.filter(ActivityStats.total_subsidy == 0)
if offset is not None:
q = q.offset(offset)
q = q.limit(1000)
return q
<|reserved_special_token_1|>
#!/usr/bin/env python2
# coding=utf8
from __future__ import absolute_import, division, print_function
from sqlalchemy import func
from walis.model.walis import walis_session
from walis.model.zeus import zeus_session, zeus_db_handler
from walis.model.zeus.activity import (
SubsidyProcessRecord,
SubsidyPayRecord,
ActivityStats,
)
from walis.model.walis.activity import PaymentNoticeRecord as NoticeRecord
from walis.utils.time import get_today_begin_time, get_today_end_time
MAX_LIST_SIZE = 1000
DEFAULT_LIST_SIZE = 200
def get_new_pay_records(process_at, limit=200):
with zeus_session() as session:
result = session.query(SubsidyPayRecord.id,
SubsidyPayRecord.restaurant_id,
SubsidyProcessRecord.card_id,
SubsidyProcessRecord.processed_at,
SubsidyPayRecord.status). \
outerjoin(SubsidyProcessRecord,
SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id). \
filter(SubsidyPayRecord.id > process_at). \
filter(SubsidyProcessRecord.status != SubsidyProcessRecord.STATUS_FAIL). \
order_by(SubsidyPayRecord.id.asc()).limit(limit).all()
return result
def get_success_pay_records(record_ids):
with zeus_session() as session:
result = session.query(SubsidyPayRecord.id,
SubsidyPayRecord.restaurant_id,
SubsidyProcessRecord.card_id,
SubsidyProcessRecord.processed_at,). \
outerjoin(SubsidyProcessRecord,
SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id). \
filter(SubsidyPayRecord.status == SubsidyPayRecord.STATUS_SUCCESS). \
filter(SubsidyProcessRecord.status != SubsidyProcessRecord.STATUS_FAIL). \
filter(SubsidyPayRecord.id.in_(record_ids)).all()
return result
def get_activity_stats(pay_record_id):
with zeus_session() as session:
results = session.query(ActivityStats.activity_id,
ActivityStats.activity_category_id,
func.sum(ActivityStats.total_subsidy),
func.min(ActivityStats.date),
func.max(ActivityStats.date),
func.sum(ActivityStats.quantity), ).group_by(
ActivityStats.restaurant_id, ActivityStats.activity_id,
ActivityStats.activity_category_id). \
filter(ActivityStats.pay_record_id == pay_record_id). \
filter(ActivityStats.status == ActivityStats.STATUS_PAY_SUCCESS).all()
return results
def get_success_record_ids_by_restaurant(
restaurant_id, activity_id=None, activity_category_id=None):
with zeus_session() as session:
query = session.query(SubsidyPayRecord.id). \
filter(SubsidyPayRecord.restaurant_id == restaurant_id). \
filter(SubsidyPayRecord.status == SubsidyPayRecord.STATUS_SUCCESS)
if activity_id is not None:
query.filter(SubsidyPayRecord.activity_id == activity_id)
if activity_category_id is not None:
query.filter(
SubsidyPayRecord.activity_category_id == activity_category_id)
record_ids = query.all()
return [r[0] for r in record_ids]
PAYLOG_STATUS_LIST = {
ActivityStats.STATUS_PAY_RECORD_GENERATED,
ActivityStats.STATUS_PAY_SUCCESS,
ActivityStats.STATUS_PAY_FAIL,
}
@zeus_db_handler
def query_paylog_by_rst(restaurant_id, activity_id=None,
activity_category_id=None, offset=None, limit=None):
""" Except ActivityStats.STATUS_PENDING (未审核状态)
"""
q = session.query(
ActivityStats.pay_record_id,
ActivityStats.activity_id,
ActivityStats.activity_category_id,
ActivityStats.status,
func.min(ActivityStats.date),
func.max(ActivityStats.date),
func.sum(ActivityStats.quantity),
func.sum(ActivityStats.total_subsidy),
SubsidyPayRecord.created_at,
func.max(SubsidyProcessRecord.id)). \
group_by(ActivityStats.pay_record_id,
ActivityStats.activity_id,
ActivityStats.activity_category_id). \
outerjoin(SubsidyPayRecord,
SubsidyPayRecord.id == ActivityStats.pay_record_id). \
outerjoin(SubsidyProcessRecord,
SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id). \
filter(ActivityStats.restaurant_id == restaurant_id).\
filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST)).\
order_by(SubsidyPayRecord.created_at.desc())
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id)
if limit is not None:
q = q.limit(min(limit, MAX_LIST_SIZE))
else:
q = q.limit(DEFAULT_LIST_SIZE)
if offset is not None:
q = q.offset(offset)
return q
@zeus_db_handler
def query_pay_records(restaurant_id, offset=None, limit=None):
q = session.query(SubsidyPayRecord).\
filter(SubsidyPayRecord.restaurant_id == restaurant_id).\
order_by(SubsidyPayRecord.created_at.desc())
if limit is not None:
q = q.limit(min(limit, MAX_LIST_SIZE))
else:
q = q.limit(DEFAULT_LIST_SIZE)
if offset is not None:
q = q.offset(offset)
return q.all()
@zeus_db_handler
def query_paylog(pay_record_ids, activity_id=None, activity_category_id=None,
offset=None, limit=None):
q = session.query(
ActivityStats.pay_record_id,
ActivityStats.activity_id,
ActivityStats.activity_category_id,
ActivityStats.status,
func.min(ActivityStats.date),
func.max(ActivityStats.date),
func.sum(ActivityStats.quantity),
func.sum(ActivityStats.total_subsidy)).\
group_by(ActivityStats.pay_record_id,
ActivityStats.activity_id,
ActivityStats.activity_category_id). \
filter(ActivityStats.pay_record_id.in_(pay_record_ids)).\
filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST)).\
order_by(ActivityStats.created_at.desc())
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id)
if limit is not None:
q = q.limit(min(limit, MAX_LIST_SIZE))
else:
q = q.limit(DEFAULT_LIST_SIZE)
if offset is not None:
q = q.offset(offset)
return q
@zeus_db_handler
def get_max_subsidy_process_record_ids(pay_record_ids):
q = session.query(func.max(SubsidyProcessRecord.id)).\
group_by(SubsidyProcessRecord.pay_record_id).\
filter(SubsidyProcessRecord.pay_record_id.in_(pay_record_ids))
return q
@zeus_db_handler
def count_paylog_by_rst(restaurant_id, activity_id=None,
activity_category_id=None):
""" Except ActivityStats.STATUS_PENDING (未审核状态)
"""
q = session.query(ActivityStats.id). \
group_by(ActivityStats.pay_record_id,
ActivityStats.activity_id,
ActivityStats.activity_category_id). \
filter(ActivityStats.restaurant_id == restaurant_id).\
filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST))
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id)
return len(q.all())
@zeus_db_handler
def query_process_records_by_ids(process_ids):
query = session.query(SubsidyProcessRecord).\
filter(SubsidyProcessRecord.id.in_(process_ids))
return query.all()
@zeus_db_handler
def get_subsidy_record_process_time(record_ids, status):
return session.query(
SubsidyProcessRecord.pay_record_id,
SubsidyProcessRecord.processed_at).\
filter(SubsidyProcessRecord.pay_record_id.in_(record_ids)).\
filter(SubsidyProcessRecord.status == status).all()
def get_pay_activities_by_restaurant(rst_id):
with zeus_session() as session:
query = session.query(
ActivityStats.activity_id,
ActivityStats.activity_category_id,). \
group_by(ActivityStats.activity_id,
ActivityStats.activity_category_id). \
filter(ActivityStats.restaurant_id == rst_id)
return query.all()
# javis model begins
def query_sms_send_info(start_time=None, end_time=None, phone=None,
restaurant_id=None, card_num_tail=None, status=None):
with walis_session() as session:
query = session.query(NoticeRecord)
if phone:
query = query.filter(NoticeRecord.phone == phone)
if restaurant_id:
query = query.filter(NoticeRecord.restaurant_id == restaurant_id)
if card_num_tail:
query = query.filter(NoticeRecord.card_num_tail == card_num_tail)
if status:
query = query.filter(NoticeRecord.status == status)
if not start_time:
start_time = get_today_begin_time()
if not end_time:
end_time = get_today_end_time()
query = query.filter(NoticeRecord.created_at > start_time).\
filter(NoticeRecord.created_at < end_time)
return query.all()
def query_sms_send_count(start_time=None, end_time=None, status=None):
with walis_session() as session:
if not start_time:
start_time = get_today_begin_time()
if not end_time:
end_time = get_today_end_time()
query = session.query(func.count(NoticeRecord.record_id)).\
filter(NoticeRecord.created_at > start_time).\
filter(NoticeRecord.created_at < end_time)
if status is not None:
query = query.filter(NoticeRecord.status == status)
return query.scalar()
@zeus_db_handler
def query_auto_pay_activity_stats_result(
city_ids=None, restaurant_ids=None, activity_id=None,
activity_category_id=None, from_date=None, to_date=None, statuses=None,
offset=None, limit=None, with_subsidy=None):
q = session.query(ActivityStats.restaurant_id,
ActivityStats.activity_id,
ActivityStats.activity_category_id,
func.sum(ActivityStats.quantity),
func.sum(ActivityStats.total_subsidy),
func.min(ActivityStats.date),
func.max(ActivityStats.date)).\
group_by(ActivityStats.restaurant_id,
ActivityStats.activity_id,
ActivityStats.activity_category_id).\
order_by(ActivityStats.restaurant_id.desc())
return _query_activity_stats(
q, city_ids, restaurant_ids, activity_id,
activity_category_id, from_date, to_date, statuses,
with_subsidy, offset, limit)
def _query_activity_stats(
q, city_ids=None, restaurant_ids=None, activity_id=None,
activity_category_id=None, from_date=None, to_date=None, statuses=None,
with_subsidy=None, offset=None, limit=None):
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id) # noqa
if city_ids is not None:
q = q.filter(ActivityStats.city_id.in_(city_ids))
if restaurant_ids is not None:
q = q.filter(ActivityStats.restaurant_id.in_(restaurant_ids))
if from_date is not None:
q = q.filter(ActivityStats.date >= from_date)
if to_date is not None:
q = q.filter(ActivityStats.date <= to_date)
if statuses is not None:
q = q.filter(ActivityStats.status.in_(statuses))
if with_subsidy is not None:
if with_subsidy:
q = q.filter(ActivityStats.total_subsidy > 0)
else:
q = q.filter(ActivityStats.total_subsidy == 0)
if offset is not None:
q = q.offset(offset)
q = q.limit(1000)
return q
|
flexible
|
{
"blob_id": "68d537cb8488ae4f2c8300e885be78540952dec0",
"index": 450,
"step-1": "<mask token>\n\n\ndef get_new_pay_records(process_at, limit=200):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.\n restaurant_id, SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at, SubsidyPayRecord.status\n ).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord.\n pay_record_id == SubsidyPayRecord.id).filter(SubsidyPayRecord.\n id > process_at).filter(SubsidyProcessRecord.status !=\n SubsidyProcessRecord.STATUS_FAIL).order_by(SubsidyPayRecord.id.\n asc()).limit(limit).all()\n return result\n\n\n<mask token>\n\n\ndef get_activity_stats(pay_record_id):\n with zeus_session() as session:\n results = session.query(ActivityStats.activity_id, ActivityStats.\n activity_category_id, func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity)).group_by(ActivityStats.\n restaurant_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.pay_record_id ==\n pay_record_id).filter(ActivityStats.status == ActivityStats.\n STATUS_PAY_SUCCESS).all()\n return results\n\n\ndef get_success_record_ids_by_restaurant(restaurant_id, activity_id=None,\n activity_category_id=None):\n with zeus_session() as session:\n query = session.query(SubsidyPayRecord.id).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).filter(SubsidyPayRecord.status ==\n SubsidyPayRecord.STATUS_SUCCESS)\n if activity_id is not None:\n query.filter(SubsidyPayRecord.activity_id == activity_id)\n if activity_category_id is not None:\n query.filter(SubsidyPayRecord.activity_category_id ==\n activity_category_id)\n record_ids = query.all()\n return [r[0] for r in record_ids]\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef query_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None, offset=None, limit=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.\n total_subsidy), SubsidyPayRecord.created_at, func.max(\n SubsidyProcessRecord.id)).group_by(ActivityStats.pay_record_id,\n ActivityStats.activity_id, ActivityStats.activity_category_id\n ).outerjoin(SubsidyPayRecord, SubsidyPayRecord.id == ActivityStats.\n pay_record_id).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord\n .pay_record_id == SubsidyPayRecord.id).filter(ActivityStats.\n restaurant_id == restaurant_id).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(SubsidyPayRecord.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n@zeus_db_handler\ndef query_pay_records(restaurant_id, offset=None, limit=None):\n q = session.query(SubsidyPayRecord).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).order_by(SubsidyPayRecord.\n created_at.desc())\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q.all()\n\n\n@zeus_db_handler\ndef query_paylog(pay_record_ids, activity_id=None, activity_category_id=\n None, offset=None, limit=None):\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.total_subsidy)\n ).group_by(ActivityStats.pay_record_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id).filter(ActivityStats.\n pay_record_id.in_(pay_record_ids)).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(ActivityStats.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef count_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.id).group_by(ActivityStats.\n pay_record_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.restaurant_id ==\n restaurant_id).filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST))\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n return len(q.all())\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef get_subsidy_record_process_time(record_ids, status):\n return session.query(SubsidyProcessRecord.pay_record_id,\n SubsidyProcessRecord.processed_at).filter(SubsidyProcessRecord.\n pay_record_id.in_(record_ids)).filter(SubsidyProcessRecord.status ==\n status).all()\n\n\n<mask token>\n\n\ndef query_sms_send_info(start_time=None, end_time=None, phone=None,\n restaurant_id=None, card_num_tail=None, status=None):\n with walis_session() as session:\n query = session.query(NoticeRecord)\n if phone:\n query = query.filter(NoticeRecord.phone == phone)\n if restaurant_id:\n query = query.filter(NoticeRecord.restaurant_id == restaurant_id)\n if card_num_tail:\n query = query.filter(NoticeRecord.card_num_tail == card_num_tail)\n if status:\n query = query.filter(NoticeRecord.status == status)\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = query.filter(NoticeRecord.created_at > start_time).filter(\n NoticeRecord.created_at < end_time)\n return query.all()\n\n\ndef query_sms_send_count(start_time=None, end_time=None, status=None):\n with walis_session() as session:\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = session.query(func.count(NoticeRecord.record_id)).filter(\n NoticeRecord.created_at > start_time).filter(NoticeRecord.\n created_at < end_time)\n if status is not None:\n query = query.filter(NoticeRecord.status == status)\n return query.scalar()\n\n\n<mask token>\n\n\ndef _query_activity_stats(q, city_ids=None, restaurant_ids=None,\n activity_id=None, activity_category_id=None, from_date=None, to_date=\n None, statuses=None, with_subsidy=None, offset=None, limit=None):\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if city_ids is not None:\n q = q.filter(ActivityStats.city_id.in_(city_ids))\n if restaurant_ids is not None:\n q = q.filter(ActivityStats.restaurant_id.in_(restaurant_ids))\n if from_date is not None:\n q = q.filter(ActivityStats.date >= from_date)\n if to_date is not None:\n q = q.filter(ActivityStats.date <= to_date)\n if statuses is not None:\n q = q.filter(ActivityStats.status.in_(statuses))\n if with_subsidy is not None:\n if with_subsidy:\n q = q.filter(ActivityStats.total_subsidy > 0)\n else:\n q = q.filter(ActivityStats.total_subsidy == 0)\n if offset is not None:\n q = q.offset(offset)\n q = q.limit(1000)\n return q\n",
"step-2": "<mask token>\n\n\ndef get_new_pay_records(process_at, limit=200):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.\n restaurant_id, SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at, SubsidyPayRecord.status\n ).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord.\n pay_record_id == SubsidyPayRecord.id).filter(SubsidyPayRecord.\n id > process_at).filter(SubsidyProcessRecord.status !=\n SubsidyProcessRecord.STATUS_FAIL).order_by(SubsidyPayRecord.id.\n asc()).limit(limit).all()\n return result\n\n\n<mask token>\n\n\ndef get_activity_stats(pay_record_id):\n with zeus_session() as session:\n results = session.query(ActivityStats.activity_id, ActivityStats.\n activity_category_id, func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity)).group_by(ActivityStats.\n restaurant_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.pay_record_id ==\n pay_record_id).filter(ActivityStats.status == ActivityStats.\n STATUS_PAY_SUCCESS).all()\n return results\n\n\ndef get_success_record_ids_by_restaurant(restaurant_id, activity_id=None,\n activity_category_id=None):\n with zeus_session() as session:\n query = session.query(SubsidyPayRecord.id).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).filter(SubsidyPayRecord.status ==\n SubsidyPayRecord.STATUS_SUCCESS)\n if activity_id is not None:\n query.filter(SubsidyPayRecord.activity_id == activity_id)\n if activity_category_id is not None:\n query.filter(SubsidyPayRecord.activity_category_id ==\n activity_category_id)\n record_ids = query.all()\n return [r[0] for r in record_ids]\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef query_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None, offset=None, limit=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.\n total_subsidy), SubsidyPayRecord.created_at, func.max(\n SubsidyProcessRecord.id)).group_by(ActivityStats.pay_record_id,\n ActivityStats.activity_id, ActivityStats.activity_category_id\n ).outerjoin(SubsidyPayRecord, SubsidyPayRecord.id == ActivityStats.\n pay_record_id).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord\n .pay_record_id == SubsidyPayRecord.id).filter(ActivityStats.\n restaurant_id == restaurant_id).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(SubsidyPayRecord.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n@zeus_db_handler\ndef query_pay_records(restaurant_id, offset=None, limit=None):\n q = session.query(SubsidyPayRecord).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).order_by(SubsidyPayRecord.\n created_at.desc())\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q.all()\n\n\n@zeus_db_handler\ndef query_paylog(pay_record_ids, activity_id=None, activity_category_id=\n None, offset=None, limit=None):\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.total_subsidy)\n ).group_by(ActivityStats.pay_record_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id).filter(ActivityStats.\n pay_record_id.in_(pay_record_ids)).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(ActivityStats.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef count_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.id).group_by(ActivityStats.\n pay_record_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.restaurant_id ==\n restaurant_id).filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST))\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n return len(q.all())\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef get_subsidy_record_process_time(record_ids, status):\n return session.query(SubsidyProcessRecord.pay_record_id,\n SubsidyProcessRecord.processed_at).filter(SubsidyProcessRecord.\n pay_record_id.in_(record_ids)).filter(SubsidyProcessRecord.status ==\n status).all()\n\n\ndef get_pay_activities_by_restaurant(rst_id):\n with zeus_session() as session:\n query = session.query(ActivityStats.activity_id, ActivityStats.\n activity_category_id).group_by(ActivityStats.activity_id,\n ActivityStats.activity_category_id).filter(ActivityStats.\n restaurant_id == rst_id)\n return query.all()\n\n\ndef query_sms_send_info(start_time=None, end_time=None, phone=None,\n restaurant_id=None, card_num_tail=None, status=None):\n with walis_session() as session:\n query = session.query(NoticeRecord)\n if phone:\n query = query.filter(NoticeRecord.phone == phone)\n if restaurant_id:\n query = query.filter(NoticeRecord.restaurant_id == restaurant_id)\n if card_num_tail:\n query = query.filter(NoticeRecord.card_num_tail == card_num_tail)\n if status:\n query = query.filter(NoticeRecord.status == status)\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = query.filter(NoticeRecord.created_at > start_time).filter(\n NoticeRecord.created_at < end_time)\n return query.all()\n\n\ndef query_sms_send_count(start_time=None, end_time=None, status=None):\n with walis_session() as session:\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = session.query(func.count(NoticeRecord.record_id)).filter(\n NoticeRecord.created_at > start_time).filter(NoticeRecord.\n created_at < end_time)\n if status is not None:\n query = query.filter(NoticeRecord.status == status)\n return query.scalar()\n\n\n@zeus_db_handler\ndef query_auto_pay_activity_stats_result(city_ids=None, restaurant_ids=None,\n activity_id=None, activity_category_id=None, from_date=None, to_date=\n None, statuses=None, offset=None, limit=None, with_subsidy=None):\n q = session.query(ActivityStats.restaurant_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, func.sum(\n ActivityStats.quantity), func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date), func.max(ActivityStats.date)).group_by(\n ActivityStats.restaurant_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id).order_by(ActivityStats.\n restaurant_id.desc())\n return _query_activity_stats(q, city_ids, restaurant_ids, activity_id,\n activity_category_id, from_date, to_date, statuses, with_subsidy,\n offset, limit)\n\n\ndef _query_activity_stats(q, city_ids=None, restaurant_ids=None,\n activity_id=None, activity_category_id=None, from_date=None, to_date=\n None, statuses=None, with_subsidy=None, offset=None, limit=None):\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if city_ids is not None:\n q = q.filter(ActivityStats.city_id.in_(city_ids))\n if restaurant_ids is not None:\n q = q.filter(ActivityStats.restaurant_id.in_(restaurant_ids))\n if from_date is not None:\n q = q.filter(ActivityStats.date >= from_date)\n if to_date is not None:\n q = q.filter(ActivityStats.date <= to_date)\n if statuses is not None:\n q = q.filter(ActivityStats.status.in_(statuses))\n if with_subsidy is not None:\n if with_subsidy:\n q = q.filter(ActivityStats.total_subsidy > 0)\n else:\n q = q.filter(ActivityStats.total_subsidy == 0)\n if offset is not None:\n q = q.offset(offset)\n q = q.limit(1000)\n return q\n",
"step-3": "<mask token>\n\n\ndef get_new_pay_records(process_at, limit=200):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.\n restaurant_id, SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at, SubsidyPayRecord.status\n ).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord.\n pay_record_id == SubsidyPayRecord.id).filter(SubsidyPayRecord.\n id > process_at).filter(SubsidyProcessRecord.status !=\n SubsidyProcessRecord.STATUS_FAIL).order_by(SubsidyPayRecord.id.\n asc()).limit(limit).all()\n return result\n\n\ndef get_success_pay_records(record_ids):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.\n restaurant_id, SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at).outerjoin(SubsidyProcessRecord,\n SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id).filter(\n SubsidyPayRecord.status == SubsidyPayRecord.STATUS_SUCCESS).filter(\n SubsidyProcessRecord.status != SubsidyProcessRecord.STATUS_FAIL\n ).filter(SubsidyPayRecord.id.in_(record_ids)).all()\n return result\n\n\ndef get_activity_stats(pay_record_id):\n with zeus_session() as session:\n results = session.query(ActivityStats.activity_id, ActivityStats.\n activity_category_id, func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity)).group_by(ActivityStats.\n restaurant_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.pay_record_id ==\n pay_record_id).filter(ActivityStats.status == ActivityStats.\n STATUS_PAY_SUCCESS).all()\n return results\n\n\ndef get_success_record_ids_by_restaurant(restaurant_id, activity_id=None,\n activity_category_id=None):\n with zeus_session() as session:\n query = session.query(SubsidyPayRecord.id).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).filter(SubsidyPayRecord.status ==\n SubsidyPayRecord.STATUS_SUCCESS)\n if activity_id is not None:\n query.filter(SubsidyPayRecord.activity_id == activity_id)\n if activity_category_id is not None:\n query.filter(SubsidyPayRecord.activity_category_id ==\n activity_category_id)\n record_ids = query.all()\n return [r[0] for r in record_ids]\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef query_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None, offset=None, limit=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.\n total_subsidy), SubsidyPayRecord.created_at, func.max(\n SubsidyProcessRecord.id)).group_by(ActivityStats.pay_record_id,\n ActivityStats.activity_id, ActivityStats.activity_category_id\n ).outerjoin(SubsidyPayRecord, SubsidyPayRecord.id == ActivityStats.\n pay_record_id).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord\n .pay_record_id == SubsidyPayRecord.id).filter(ActivityStats.\n restaurant_id == restaurant_id).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(SubsidyPayRecord.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n@zeus_db_handler\ndef query_pay_records(restaurant_id, offset=None, limit=None):\n q = session.query(SubsidyPayRecord).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).order_by(SubsidyPayRecord.\n created_at.desc())\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q.all()\n\n\n@zeus_db_handler\ndef query_paylog(pay_record_ids, activity_id=None, activity_category_id=\n None, offset=None, limit=None):\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.total_subsidy)\n ).group_by(ActivityStats.pay_record_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id).filter(ActivityStats.\n pay_record_id.in_(pay_record_ids)).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(ActivityStats.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef count_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.id).group_by(ActivityStats.\n pay_record_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.restaurant_id ==\n restaurant_id).filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST))\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n return len(q.all())\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef get_subsidy_record_process_time(record_ids, status):\n return session.query(SubsidyProcessRecord.pay_record_id,\n SubsidyProcessRecord.processed_at).filter(SubsidyProcessRecord.\n pay_record_id.in_(record_ids)).filter(SubsidyProcessRecord.status ==\n status).all()\n\n\ndef get_pay_activities_by_restaurant(rst_id):\n with zeus_session() as session:\n query = session.query(ActivityStats.activity_id, ActivityStats.\n activity_category_id).group_by(ActivityStats.activity_id,\n ActivityStats.activity_category_id).filter(ActivityStats.\n restaurant_id == rst_id)\n return query.all()\n\n\ndef query_sms_send_info(start_time=None, end_time=None, phone=None,\n restaurant_id=None, card_num_tail=None, status=None):\n with walis_session() as session:\n query = session.query(NoticeRecord)\n if phone:\n query = query.filter(NoticeRecord.phone == phone)\n if restaurant_id:\n query = query.filter(NoticeRecord.restaurant_id == restaurant_id)\n if card_num_tail:\n query = query.filter(NoticeRecord.card_num_tail == card_num_tail)\n if status:\n query = query.filter(NoticeRecord.status == status)\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = query.filter(NoticeRecord.created_at > start_time).filter(\n NoticeRecord.created_at < end_time)\n return query.all()\n\n\ndef query_sms_send_count(start_time=None, end_time=None, status=None):\n with walis_session() as session:\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = session.query(func.count(NoticeRecord.record_id)).filter(\n NoticeRecord.created_at > start_time).filter(NoticeRecord.\n created_at < end_time)\n if status is not None:\n query = query.filter(NoticeRecord.status == status)\n return query.scalar()\n\n\n@zeus_db_handler\ndef query_auto_pay_activity_stats_result(city_ids=None, restaurant_ids=None,\n activity_id=None, activity_category_id=None, from_date=None, to_date=\n None, statuses=None, offset=None, limit=None, with_subsidy=None):\n q = session.query(ActivityStats.restaurant_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, func.sum(\n ActivityStats.quantity), func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date), func.max(ActivityStats.date)).group_by(\n ActivityStats.restaurant_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id).order_by(ActivityStats.\n restaurant_id.desc())\n return _query_activity_stats(q, city_ids, restaurant_ids, activity_id,\n activity_category_id, from_date, to_date, statuses, with_subsidy,\n offset, limit)\n\n\ndef _query_activity_stats(q, city_ids=None, restaurant_ids=None,\n activity_id=None, activity_category_id=None, from_date=None, to_date=\n None, statuses=None, with_subsidy=None, offset=None, limit=None):\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if city_ids is not None:\n q = q.filter(ActivityStats.city_id.in_(city_ids))\n if restaurant_ids is not None:\n q = q.filter(ActivityStats.restaurant_id.in_(restaurant_ids))\n if from_date is not None:\n q = q.filter(ActivityStats.date >= from_date)\n if to_date is not None:\n q = q.filter(ActivityStats.date <= to_date)\n if statuses is not None:\n q = q.filter(ActivityStats.status.in_(statuses))\n if with_subsidy is not None:\n if with_subsidy:\n q = q.filter(ActivityStats.total_subsidy > 0)\n else:\n q = q.filter(ActivityStats.total_subsidy == 0)\n if offset is not None:\n q = q.offset(offset)\n q = q.limit(1000)\n return q\n",
"step-4": "<mask token>\n\n\ndef get_new_pay_records(process_at, limit=200):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.\n restaurant_id, SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at, SubsidyPayRecord.status\n ).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord.\n pay_record_id == SubsidyPayRecord.id).filter(SubsidyPayRecord.\n id > process_at).filter(SubsidyProcessRecord.status !=\n SubsidyProcessRecord.STATUS_FAIL).order_by(SubsidyPayRecord.id.\n asc()).limit(limit).all()\n return result\n\n\ndef get_success_pay_records(record_ids):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.\n restaurant_id, SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at).outerjoin(SubsidyProcessRecord,\n SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id).filter(\n SubsidyPayRecord.status == SubsidyPayRecord.STATUS_SUCCESS).filter(\n SubsidyProcessRecord.status != SubsidyProcessRecord.STATUS_FAIL\n ).filter(SubsidyPayRecord.id.in_(record_ids)).all()\n return result\n\n\ndef get_activity_stats(pay_record_id):\n with zeus_session() as session:\n results = session.query(ActivityStats.activity_id, ActivityStats.\n activity_category_id, func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity)).group_by(ActivityStats.\n restaurant_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.pay_record_id ==\n pay_record_id).filter(ActivityStats.status == ActivityStats.\n STATUS_PAY_SUCCESS).all()\n return results\n\n\ndef get_success_record_ids_by_restaurant(restaurant_id, activity_id=None,\n activity_category_id=None):\n with zeus_session() as session:\n query = session.query(SubsidyPayRecord.id).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).filter(SubsidyPayRecord.status ==\n SubsidyPayRecord.STATUS_SUCCESS)\n if activity_id is not None:\n query.filter(SubsidyPayRecord.activity_id == activity_id)\n if activity_category_id is not None:\n query.filter(SubsidyPayRecord.activity_category_id ==\n activity_category_id)\n record_ids = query.all()\n return [r[0] for r in record_ids]\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef query_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None, offset=None, limit=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.\n total_subsidy), SubsidyPayRecord.created_at, func.max(\n SubsidyProcessRecord.id)).group_by(ActivityStats.pay_record_id,\n ActivityStats.activity_id, ActivityStats.activity_category_id\n ).outerjoin(SubsidyPayRecord, SubsidyPayRecord.id == ActivityStats.\n pay_record_id).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord\n .pay_record_id == SubsidyPayRecord.id).filter(ActivityStats.\n restaurant_id == restaurant_id).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(SubsidyPayRecord.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n@zeus_db_handler\ndef query_pay_records(restaurant_id, offset=None, limit=None):\n q = session.query(SubsidyPayRecord).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).order_by(SubsidyPayRecord.\n created_at.desc())\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q.all()\n\n\n@zeus_db_handler\ndef query_paylog(pay_record_ids, activity_id=None, activity_category_id=\n None, offset=None, limit=None):\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.total_subsidy)\n ).group_by(ActivityStats.pay_record_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id).filter(ActivityStats.\n pay_record_id.in_(pay_record_ids)).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(ActivityStats.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n@zeus_db_handler\ndef get_max_subsidy_process_record_ids(pay_record_ids):\n q = session.query(func.max(SubsidyProcessRecord.id)).group_by(\n SubsidyProcessRecord.pay_record_id).filter(SubsidyProcessRecord.\n pay_record_id.in_(pay_record_ids))\n return q\n\n\n@zeus_db_handler\ndef count_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.id).group_by(ActivityStats.\n pay_record_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.restaurant_id ==\n restaurant_id).filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST))\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n return len(q.all())\n\n\n@zeus_db_handler\ndef query_process_records_by_ids(process_ids):\n query = session.query(SubsidyProcessRecord).filter(SubsidyProcessRecord\n .id.in_(process_ids))\n return query.all()\n\n\n@zeus_db_handler\ndef get_subsidy_record_process_time(record_ids, status):\n return session.query(SubsidyProcessRecord.pay_record_id,\n SubsidyProcessRecord.processed_at).filter(SubsidyProcessRecord.\n pay_record_id.in_(record_ids)).filter(SubsidyProcessRecord.status ==\n status).all()\n\n\ndef get_pay_activities_by_restaurant(rst_id):\n with zeus_session() as session:\n query = session.query(ActivityStats.activity_id, ActivityStats.\n activity_category_id).group_by(ActivityStats.activity_id,\n ActivityStats.activity_category_id).filter(ActivityStats.\n restaurant_id == rst_id)\n return query.all()\n\n\ndef query_sms_send_info(start_time=None, end_time=None, phone=None,\n restaurant_id=None, card_num_tail=None, status=None):\n with walis_session() as session:\n query = session.query(NoticeRecord)\n if phone:\n query = query.filter(NoticeRecord.phone == phone)\n if restaurant_id:\n query = query.filter(NoticeRecord.restaurant_id == restaurant_id)\n if card_num_tail:\n query = query.filter(NoticeRecord.card_num_tail == card_num_tail)\n if status:\n query = query.filter(NoticeRecord.status == status)\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = query.filter(NoticeRecord.created_at > start_time).filter(\n NoticeRecord.created_at < end_time)\n return query.all()\n\n\ndef query_sms_send_count(start_time=None, end_time=None, status=None):\n with walis_session() as session:\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = session.query(func.count(NoticeRecord.record_id)).filter(\n NoticeRecord.created_at > start_time).filter(NoticeRecord.\n created_at < end_time)\n if status is not None:\n query = query.filter(NoticeRecord.status == status)\n return query.scalar()\n\n\n@zeus_db_handler\ndef query_auto_pay_activity_stats_result(city_ids=None, restaurant_ids=None,\n activity_id=None, activity_category_id=None, from_date=None, to_date=\n None, statuses=None, offset=None, limit=None, with_subsidy=None):\n q = session.query(ActivityStats.restaurant_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, func.sum(\n ActivityStats.quantity), func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date), func.max(ActivityStats.date)).group_by(\n ActivityStats.restaurant_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id).order_by(ActivityStats.\n restaurant_id.desc())\n return _query_activity_stats(q, city_ids, restaurant_ids, activity_id,\n activity_category_id, from_date, to_date, statuses, with_subsidy,\n offset, limit)\n\n\ndef _query_activity_stats(q, city_ids=None, restaurant_ids=None,\n activity_id=None, activity_category_id=None, from_date=None, to_date=\n None, statuses=None, with_subsidy=None, offset=None, limit=None):\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if city_ids is not None:\n q = q.filter(ActivityStats.city_id.in_(city_ids))\n if restaurant_ids is not None:\n q = q.filter(ActivityStats.restaurant_id.in_(restaurant_ids))\n if from_date is not None:\n q = q.filter(ActivityStats.date >= from_date)\n if to_date is not None:\n q = q.filter(ActivityStats.date <= to_date)\n if statuses is not None:\n q = q.filter(ActivityStats.status.in_(statuses))\n if with_subsidy is not None:\n if with_subsidy:\n q = q.filter(ActivityStats.total_subsidy > 0)\n else:\n q = q.filter(ActivityStats.total_subsidy == 0)\n if offset is not None:\n q = q.offset(offset)\n q = q.limit(1000)\n return q\n",
"step-5": "#!/usr/bin/env python2\n# coding=utf8\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom sqlalchemy import func\n\nfrom walis.model.walis import walis_session\nfrom walis.model.zeus import zeus_session, zeus_db_handler\nfrom walis.model.zeus.activity import (\n SubsidyProcessRecord,\n SubsidyPayRecord,\n ActivityStats,\n)\nfrom walis.model.walis.activity import PaymentNoticeRecord as NoticeRecord\nfrom walis.utils.time import get_today_begin_time, get_today_end_time\n\n\nMAX_LIST_SIZE = 1000\nDEFAULT_LIST_SIZE = 200\n\n\ndef get_new_pay_records(process_at, limit=200):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id,\n SubsidyPayRecord.restaurant_id,\n SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at,\n SubsidyPayRecord.status). \\\n outerjoin(SubsidyProcessRecord,\n SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id). \\\n filter(SubsidyPayRecord.id > process_at). \\\n filter(SubsidyProcessRecord.status != SubsidyProcessRecord.STATUS_FAIL). \\\n order_by(SubsidyPayRecord.id.asc()).limit(limit).all()\n\n return result\n\n\ndef get_success_pay_records(record_ids):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id,\n SubsidyPayRecord.restaurant_id,\n SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at,). \\\n outerjoin(SubsidyProcessRecord,\n SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id). \\\n filter(SubsidyPayRecord.status == SubsidyPayRecord.STATUS_SUCCESS). \\\n filter(SubsidyProcessRecord.status != SubsidyProcessRecord.STATUS_FAIL). \\\n filter(SubsidyPayRecord.id.in_(record_ids)).all()\n\n return result\n\n\ndef get_activity_stats(pay_record_id):\n with zeus_session() as session:\n results = session.query(ActivityStats.activity_id,\n ActivityStats.activity_category_id,\n func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date),\n func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), ).group_by(\n ActivityStats.restaurant_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id). \\\n filter(ActivityStats.pay_record_id == pay_record_id). \\\n filter(ActivityStats.status == ActivityStats.STATUS_PAY_SUCCESS).all()\n\n return results\n\n\ndef get_success_record_ids_by_restaurant(\n restaurant_id, activity_id=None, activity_category_id=None):\n with zeus_session() as session:\n query = session.query(SubsidyPayRecord.id). \\\n filter(SubsidyPayRecord.restaurant_id == restaurant_id). \\\n filter(SubsidyPayRecord.status == SubsidyPayRecord.STATUS_SUCCESS)\n\n if activity_id is not None:\n query.filter(SubsidyPayRecord.activity_id == activity_id)\n\n if activity_category_id is not None:\n query.filter(\n SubsidyPayRecord.activity_category_id == activity_category_id)\n record_ids = query.all()\n\n return [r[0] for r in record_ids]\n\n\nPAYLOG_STATUS_LIST = {\n ActivityStats.STATUS_PAY_RECORD_GENERATED,\n ActivityStats.STATUS_PAY_SUCCESS,\n ActivityStats.STATUS_PAY_FAIL,\n}\n\n\n@zeus_db_handler\ndef query_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None, offset=None, limit=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(\n ActivityStats.pay_record_id,\n ActivityStats.activity_id,\n ActivityStats.activity_category_id,\n ActivityStats.status,\n func.min(ActivityStats.date),\n func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity),\n func.sum(ActivityStats.total_subsidy),\n SubsidyPayRecord.created_at,\n func.max(SubsidyProcessRecord.id)). \\\n group_by(ActivityStats.pay_record_id,\n ActivityStats.activity_id,\n ActivityStats.activity_category_id). \\\n outerjoin(SubsidyPayRecord,\n SubsidyPayRecord.id == ActivityStats.pay_record_id). \\\n outerjoin(SubsidyProcessRecord,\n SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id). \\\n filter(ActivityStats.restaurant_id == restaurant_id).\\\n filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST)).\\\n order_by(SubsidyPayRecord.created_at.desc())\n\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id)\n\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n\n if offset is not None:\n q = q.offset(offset)\n\n return q\n\n\n@zeus_db_handler\ndef query_pay_records(restaurant_id, offset=None, limit=None):\n q = session.query(SubsidyPayRecord).\\\n filter(SubsidyPayRecord.restaurant_id == restaurant_id).\\\n order_by(SubsidyPayRecord.created_at.desc())\n\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n\n if offset is not None:\n q = q.offset(offset)\n\n return q.all()\n\n\n@zeus_db_handler\ndef query_paylog(pay_record_ids, activity_id=None, activity_category_id=None,\n offset=None, limit=None):\n q = session.query(\n ActivityStats.pay_record_id,\n ActivityStats.activity_id,\n ActivityStats.activity_category_id,\n ActivityStats.status,\n func.min(ActivityStats.date),\n func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity),\n func.sum(ActivityStats.total_subsidy)).\\\n group_by(ActivityStats.pay_record_id,\n ActivityStats.activity_id,\n ActivityStats.activity_category_id). \\\n filter(ActivityStats.pay_record_id.in_(pay_record_ids)).\\\n filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST)).\\\n order_by(ActivityStats.created_at.desc())\n\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id)\n\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n\n if offset is not None:\n q = q.offset(offset)\n\n return q\n\n\n@zeus_db_handler\ndef get_max_subsidy_process_record_ids(pay_record_ids):\n q = session.query(func.max(SubsidyProcessRecord.id)).\\\n group_by(SubsidyProcessRecord.pay_record_id).\\\n filter(SubsidyProcessRecord.pay_record_id.in_(pay_record_ids))\n\n return q\n\n\n@zeus_db_handler\ndef count_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.id). \\\n group_by(ActivityStats.pay_record_id,\n ActivityStats.activity_id,\n ActivityStats.activity_category_id). \\\n filter(ActivityStats.restaurant_id == restaurant_id).\\\n filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST))\n\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id)\n\n return len(q.all())\n\n\n@zeus_db_handler\ndef query_process_records_by_ids(process_ids):\n query = session.query(SubsidyProcessRecord).\\\n filter(SubsidyProcessRecord.id.in_(process_ids))\n return query.all()\n\n\n@zeus_db_handler\ndef get_subsidy_record_process_time(record_ids, status):\n return session.query(\n SubsidyProcessRecord.pay_record_id,\n SubsidyProcessRecord.processed_at).\\\n filter(SubsidyProcessRecord.pay_record_id.in_(record_ids)).\\\n filter(SubsidyProcessRecord.status == status).all()\n\n\ndef get_pay_activities_by_restaurant(rst_id):\n with zeus_session() as session:\n query = session.query(\n ActivityStats.activity_id,\n ActivityStats.activity_category_id,). \\\n group_by(ActivityStats.activity_id,\n ActivityStats.activity_category_id). \\\n filter(ActivityStats.restaurant_id == rst_id)\n\n return query.all()\n\n\n# javis model begins\ndef query_sms_send_info(start_time=None, end_time=None, phone=None,\n restaurant_id=None, card_num_tail=None, status=None):\n\n with walis_session() as session:\n query = session.query(NoticeRecord)\n\n if phone:\n query = query.filter(NoticeRecord.phone == phone)\n\n if restaurant_id:\n query = query.filter(NoticeRecord.restaurant_id == restaurant_id)\n\n if card_num_tail:\n query = query.filter(NoticeRecord.card_num_tail == card_num_tail)\n\n if status:\n query = query.filter(NoticeRecord.status == status)\n\n if not start_time:\n start_time = get_today_begin_time()\n\n if not end_time:\n end_time = get_today_end_time()\n\n query = query.filter(NoticeRecord.created_at > start_time).\\\n filter(NoticeRecord.created_at < end_time)\n\n return query.all()\n\n\ndef query_sms_send_count(start_time=None, end_time=None, status=None):\n with walis_session() as session:\n\n if not start_time:\n start_time = get_today_begin_time()\n\n if not end_time:\n end_time = get_today_end_time()\n\n query = session.query(func.count(NoticeRecord.record_id)).\\\n filter(NoticeRecord.created_at > start_time).\\\n filter(NoticeRecord.created_at < end_time)\n\n if status is not None:\n query = query.filter(NoticeRecord.status == status)\n\n return query.scalar()\n\n\n@zeus_db_handler\ndef query_auto_pay_activity_stats_result(\n city_ids=None, restaurant_ids=None, activity_id=None,\n activity_category_id=None, from_date=None, to_date=None, statuses=None,\n offset=None, limit=None, with_subsidy=None):\n q = session.query(ActivityStats.restaurant_id,\n ActivityStats.activity_id,\n ActivityStats.activity_category_id,\n func.sum(ActivityStats.quantity),\n func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date),\n func.max(ActivityStats.date)).\\\n group_by(ActivityStats.restaurant_id,\n ActivityStats.activity_id,\n ActivityStats.activity_category_id).\\\n order_by(ActivityStats.restaurant_id.desc())\n\n return _query_activity_stats(\n q, city_ids, restaurant_ids, activity_id,\n activity_category_id, from_date, to_date, statuses,\n with_subsidy, offset, limit)\n\n\ndef _query_activity_stats(\n q, city_ids=None, restaurant_ids=None, activity_id=None,\n activity_category_id=None, from_date=None, to_date=None, statuses=None,\n with_subsidy=None, offset=None, limit=None):\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id) # noqa\n\n if city_ids is not None:\n q = q.filter(ActivityStats.city_id.in_(city_ids))\n\n if restaurant_ids is not None:\n q = q.filter(ActivityStats.restaurant_id.in_(restaurant_ids))\n\n if from_date is not None:\n q = q.filter(ActivityStats.date >= from_date)\n\n if to_date is not None:\n q = q.filter(ActivityStats.date <= to_date)\n\n if statuses is not None:\n q = q.filter(ActivityStats.status.in_(statuses))\n\n if with_subsidy is not None:\n if with_subsidy:\n q = q.filter(ActivityStats.total_subsidy > 0)\n else:\n q = q.filter(ActivityStats.total_subsidy == 0)\n\n if offset is not None:\n q = q.offset(offset)\n\n q = q.limit(1000)\n\n return q\n",
"step-ids": [
11,
13,
14,
16,
19
]
}
|
[
11,
13,
14,
16,
19
] |
<|reserved_special_token_0|>
class Student(andy.Lesson_7.exercise_1.Human):
def __init__(self, firstname, lastname, grade):
super().__init__(firstname, lastname)
self.grade = grade
def do_hobby(self):
return self.full_name + ' ebet Petra Kovarskogo'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Student(andy.Lesson_7.exercise_1.Human):
def __init__(self, firstname, lastname, grade):
super().__init__(firstname, lastname)
self.grade = grade
def do_hobby(self):
return self.full_name + ' ebet Petra Kovarskogo'
<|reserved_special_token_0|>
print(a.do_hobby())
print(a.grade)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Student(andy.Lesson_7.exercise_1.Human):
def __init__(self, firstname, lastname, grade):
super().__init__(firstname, lastname)
self.grade = grade
def do_hobby(self):
return self.full_name + ' ebet Petra Kovarskogo'
a = Student('Artem', 'Nizhnik', 'Shkolnik')
print(a.do_hobby())
print(a.grade)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import andy.Lesson_7.exercise_1
class Student(andy.Lesson_7.exercise_1.Human):
def __init__(self, firstname, lastname, grade):
super().__init__(firstname, lastname)
self.grade = grade
def do_hobby(self):
return self.full_name + ' ebet Petra Kovarskogo'
a = Student('Artem', 'Nizhnik', 'Shkolnik')
print(a.do_hobby())
print(a.grade)
<|reserved_special_token_1|>
"""
- Define a new class Student which is derived from Human and has:
grade field.
do_hobby - print 'dancing' or some another hobby
"""
import andy.Lesson_7.exercise_1
class Student(andy.Lesson_7.exercise_1.Human):
def __init__(self, firstname, lastname, grade):
super().__init__(firstname, lastname)
self.grade = grade
def do_hobby(self):
return self.full_name + " ebet Petra Kovarskogo"
a = Student("Artem", "Nizhnik", "Shkolnik")
print(a.do_hobby())
print(a.grade)
|
flexible
|
{
"blob_id": "497f56891670f635feff983058e86055e54be493",
"index": 2618,
"step-1": "<mask token>\n\n\nclass Student(andy.Lesson_7.exercise_1.Human):\n\n def __init__(self, firstname, lastname, grade):\n super().__init__(firstname, lastname)\n self.grade = grade\n\n def do_hobby(self):\n return self.full_name + ' ebet Petra Kovarskogo'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Student(andy.Lesson_7.exercise_1.Human):\n\n def __init__(self, firstname, lastname, grade):\n super().__init__(firstname, lastname)\n self.grade = grade\n\n def do_hobby(self):\n return self.full_name + ' ebet Petra Kovarskogo'\n\n\n<mask token>\nprint(a.do_hobby())\nprint(a.grade)\n",
"step-3": "<mask token>\n\n\nclass Student(andy.Lesson_7.exercise_1.Human):\n\n def __init__(self, firstname, lastname, grade):\n super().__init__(firstname, lastname)\n self.grade = grade\n\n def do_hobby(self):\n return self.full_name + ' ebet Petra Kovarskogo'\n\n\na = Student('Artem', 'Nizhnik', 'Shkolnik')\nprint(a.do_hobby())\nprint(a.grade)\n",
"step-4": "<mask token>\nimport andy.Lesson_7.exercise_1\n\n\nclass Student(andy.Lesson_7.exercise_1.Human):\n\n def __init__(self, firstname, lastname, grade):\n super().__init__(firstname, lastname)\n self.grade = grade\n\n def do_hobby(self):\n return self.full_name + ' ebet Petra Kovarskogo'\n\n\na = Student('Artem', 'Nizhnik', 'Shkolnik')\nprint(a.do_hobby())\nprint(a.grade)\n",
"step-5": "\"\"\"\n- Define a new class Student which is derived from Human and has:\n grade field.\n do_hobby - print 'dancing' or some another hobby\n\"\"\"\nimport andy.Lesson_7.exercise_1\n\n\nclass Student(andy.Lesson_7.exercise_1.Human):\n\n def __init__(self, firstname, lastname, grade):\n super().__init__(firstname, lastname)\n self.grade = grade\n\n def do_hobby(self):\n return self.full_name + \" ebet Petra Kovarskogo\"\n\n\na = Student(\"Artem\", \"Nizhnik\", \"Shkolnik\")\nprint(a.do_hobby())\nprint(a.grade)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
<|reserved_special_token_0|>
def solve_equation(self, m, n):
k_l, k_h = 2, n - 1
while k_l <= k_h:
mid = (k_l + k_h) / 2
val = mid ** m - n * mid + n - 1
if val == 0:
return mid
elif val < 0:
k_l = mid + 1
else:
k_h = mid - 1
return False
<|reserved_special_token_1|>
class Solution(object):
def smallestGoodBase(self, n):
"""
:type n: str
:rtype: str
"""
m_max = int(math.ceil(math.log(1 + int(n), 2)))
for m in range(m_max, 1, -1):
res = self.solve_equation(m, int(n))
if res != False:
return str(res)
def solve_equation(self, m, n):
k_l, k_h = 2, n - 1
while k_l <= k_h:
mid = (k_l + k_h) / 2
val = mid ** m - n * mid + n - 1
if val == 0:
return mid
elif val < 0:
k_l = mid + 1
else:
k_h = mid - 1
return False
<|reserved_special_token_1|>
class Solution(object):
def smallestGoodBase(self, n):
"""
:type n: str
:rtype: str
"""
# k is the base and the representation is
# m bits of 1
# We then have from math
# (k**m - 1) / (k-1) = n
# m = log_k (n * k - n + 1)
# m needs to be integer
# we know that k = 2 m will be largest
m_max = int(math.ceil(math.log(1 + int(n), 2)))
for m in range(m_max, 1, -1):
# solve high order equation
# k**m - nk + n - 1 = 0
# Find k using newton approach
res = self.solve_equation(m, int(n))
if res != False:
return str(res)
# k**m - nk + n - 1 = 0
# TODO: Why newton approach always work here.
# Hard to prove they are always monotonic
def solve_equation(self, m, n):
k_l, k_h = 2, n - 1
while k_l <= k_h:
mid = (k_l + k_h) / 2
val = mid ** m - n * mid + n - 1
if val == 0:
return mid
elif val < 0:
k_l = mid + 1
else:
k_h = mid - 1
return False
|
flexible
|
{
"blob_id": "de287d1bc644fdfd0f47bd8667580786b74444d0",
"index": 8863,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n <mask token>\n",
"step-3": "class Solution(object):\n <mask token>\n\n def solve_equation(self, m, n):\n k_l, k_h = 2, n - 1\n while k_l <= k_h:\n mid = (k_l + k_h) / 2\n val = mid ** m - n * mid + n - 1\n if val == 0:\n return mid\n elif val < 0:\n k_l = mid + 1\n else:\n k_h = mid - 1\n return False\n",
"step-4": "class Solution(object):\n\n def smallestGoodBase(self, n):\n \"\"\"\n :type n: str\n :rtype: str\n \"\"\"\n m_max = int(math.ceil(math.log(1 + int(n), 2)))\n for m in range(m_max, 1, -1):\n res = self.solve_equation(m, int(n))\n if res != False:\n return str(res)\n\n def solve_equation(self, m, n):\n k_l, k_h = 2, n - 1\n while k_l <= k_h:\n mid = (k_l + k_h) / 2\n val = mid ** m - n * mid + n - 1\n if val == 0:\n return mid\n elif val < 0:\n k_l = mid + 1\n else:\n k_h = mid - 1\n return False\n",
"step-5": "class Solution(object):\n def smallestGoodBase(self, n):\n \"\"\"\n :type n: str\n :rtype: str\n \"\"\"\n # k is the base and the representation is\n # m bits of 1\n # We then have from math\n # (k**m - 1) / (k-1) = n\n # m = log_k (n * k - n + 1)\n # m needs to be integer\n \n # we know that k = 2 m will be largest\n m_max = int(math.ceil(math.log(1 + int(n), 2)))\n for m in range(m_max, 1, -1):\n # solve high order equation\n # k**m - nk + n - 1 = 0\n \n # Find k using newton approach\n res = self.solve_equation(m, int(n))\n if res != False:\n return str(res)\n \n\n # k**m - nk + n - 1 = 0\n # TODO: Why newton approach always work here.\n # Hard to prove they are always monotonic\n def solve_equation(self, m, n):\n k_l, k_h = 2, n - 1\n while k_l <= k_h:\n mid = (k_l + k_h) / 2\n val = mid ** m - n * mid + n - 1 \n if val == 0:\n return mid\n elif val < 0:\n k_l = mid + 1\n else:\n k_h = mid - 1\n return False\n \n\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TimestechConfig(AppConfig):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TimestechConfig(AppConfig):
name = 'TimesTech'
<|reserved_special_token_1|>
from django.apps import AppConfig
class TimestechConfig(AppConfig):
name = 'TimesTech'
|
flexible
|
{
"blob_id": "94f50e371ef65e86d0d2d40a3ed16946f8811be3",
"index": 2601,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TimestechConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TimestechConfig(AppConfig):\n name = 'TimesTech'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass TimestechConfig(AppConfig):\n name = 'TimesTech'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def isPrime(num):
if num <= 1:
return False
elif num == 2:
return True
elif num % 2 == 0:
return False
else:
sqrt_num = math.sqrt(num)
bound = int(sqrt_num) + 1
for i in range(3, bound, 2):
if num % i == 0:
return False
return True
def permutate(arr, n):
if n == len(arr):
str_num = ''
for j in range(n):
str_num += str(arr[j])
num = int(str_num)
if isPrime(num):
global maxPandigitalPrime
if num > maxPandigitalPrime:
maxPandigitalPrime = num
else:
for i in range(n, len(arr)):
temp = arr[i]
arr[i] = arr[n]
arr[n] = temp
permutate(arr, n + 1)
temp = arr[i]
arr[i] = arr[n]
arr[n] = temp
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def isPrime(num):
if num <= 1:
return False
elif num == 2:
return True
elif num % 2 == 0:
return False
else:
sqrt_num = math.sqrt(num)
bound = int(sqrt_num) + 1
for i in range(3, bound, 2):
if num % i == 0:
return False
return True
def permutate(arr, n):
if n == len(arr):
str_num = ''
for j in range(n):
str_num += str(arr[j])
num = int(str_num)
if isPrime(num):
global maxPandigitalPrime
if num > maxPandigitalPrime:
maxPandigitalPrime = num
else:
for i in range(n, len(arr)):
temp = arr[i]
arr[i] = arr[n]
arr[n] = temp
permutate(arr, n + 1)
temp = arr[i]
arr[i] = arr[n]
arr[n] = temp
<|reserved_special_token_0|>
for digit in range(2, 9):
arr = list(range(1, digit + 1))
permutate(arr, 0)
print(maxPandigitalPrime)
<|reserved_special_token_0|>
print(toc - tic)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
maxPandigitalPrime = 2
def isPrime(num):
if num <= 1:
return False
elif num == 2:
return True
elif num % 2 == 0:
return False
else:
sqrt_num = math.sqrt(num)
bound = int(sqrt_num) + 1
for i in range(3, bound, 2):
if num % i == 0:
return False
return True
def permutate(arr, n):
if n == len(arr):
str_num = ''
for j in range(n):
str_num += str(arr[j])
num = int(str_num)
if isPrime(num):
global maxPandigitalPrime
if num > maxPandigitalPrime:
maxPandigitalPrime = num
else:
for i in range(n, len(arr)):
temp = arr[i]
arr[i] = arr[n]
arr[n] = temp
permutate(arr, n + 1)
temp = arr[i]
arr[i] = arr[n]
arr[n] = temp
tic = time.time()
for digit in range(2, 9):
arr = list(range(1, digit + 1))
permutate(arr, 0)
print(maxPandigitalPrime)
toc = time.time()
print(toc - tic)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import time
import math
maxPandigitalPrime = 2
def isPrime(num):
if num <= 1:
return False
elif num == 2:
return True
elif num % 2 == 0:
return False
else:
sqrt_num = math.sqrt(num)
bound = int(sqrt_num) + 1
for i in range(3, bound, 2):
if num % i == 0:
return False
return True
def permutate(arr, n):
if n == len(arr):
str_num = ''
for j in range(n):
str_num += str(arr[j])
num = int(str_num)
if isPrime(num):
global maxPandigitalPrime
if num > maxPandigitalPrime:
maxPandigitalPrime = num
else:
for i in range(n, len(arr)):
temp = arr[i]
arr[i] = arr[n]
arr[n] = temp
permutate(arr, n + 1)
temp = arr[i]
arr[i] = arr[n]
arr[n] = temp
tic = time.time()
for digit in range(2, 9):
arr = list(range(1, digit + 1))
permutate(arr, 0)
print(maxPandigitalPrime)
toc = time.time()
print(toc - tic)
<|reserved_special_token_1|>
'''
Project Euler
Problem #41 - Pandigital prime
David 07/06/2017
'''
import time
import math
maxPandigitalPrime = 2
def isPrime(num):
if(num<=1):
return False
elif(num==2):
return True
elif(num%2==0):
return False
else:
sqrt_num = math.sqrt(num)
bound = int(sqrt_num)+1
for i in range(3,bound,2):
if(num%i==0):
return False
return True
def permutate(arr,n):
if(n==len(arr)):
#print(arr)
str_num = ''
for j in range(n):
str_num += str(arr[j])
num = int(str_num)
if(isPrime(num)):
global maxPandigitalPrime
if(num>maxPandigitalPrime):
maxPandigitalPrime = num
else:
for i in range(n,len(arr)):
# swap index n(head), i
temp = arr[i]
arr[i] = arr[n]
arr[n] = temp
permutate(arr,n+1)
# swap back to resume arr
temp = arr[i]
arr[i] = arr[n]
arr[n] = temp
# main
tic = time.time()
for digit in range(2,9):
arr = list(range(1,digit+1))
permutate(arr,0)
print(maxPandigitalPrime)
toc = time.time()
print(toc-tic)
|
flexible
|
{
"blob_id": "7ca7693b842700a7b15242b656648e8a7e58cd23",
"index": 1691,
"step-1": "<mask token>\n\n\ndef isPrime(num):\n if num <= 1:\n return False\n elif num == 2:\n return True\n elif num % 2 == 0:\n return False\n else:\n sqrt_num = math.sqrt(num)\n bound = int(sqrt_num) + 1\n for i in range(3, bound, 2):\n if num % i == 0:\n return False\n return True\n\n\ndef permutate(arr, n):\n if n == len(arr):\n str_num = ''\n for j in range(n):\n str_num += str(arr[j])\n num = int(str_num)\n if isPrime(num):\n global maxPandigitalPrime\n if num > maxPandigitalPrime:\n maxPandigitalPrime = num\n else:\n for i in range(n, len(arr)):\n temp = arr[i]\n arr[i] = arr[n]\n arr[n] = temp\n permutate(arr, n + 1)\n temp = arr[i]\n arr[i] = arr[n]\n arr[n] = temp\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef isPrime(num):\n if num <= 1:\n return False\n elif num == 2:\n return True\n elif num % 2 == 0:\n return False\n else:\n sqrt_num = math.sqrt(num)\n bound = int(sqrt_num) + 1\n for i in range(3, bound, 2):\n if num % i == 0:\n return False\n return True\n\n\ndef permutate(arr, n):\n if n == len(arr):\n str_num = ''\n for j in range(n):\n str_num += str(arr[j])\n num = int(str_num)\n if isPrime(num):\n global maxPandigitalPrime\n if num > maxPandigitalPrime:\n maxPandigitalPrime = num\n else:\n for i in range(n, len(arr)):\n temp = arr[i]\n arr[i] = arr[n]\n arr[n] = temp\n permutate(arr, n + 1)\n temp = arr[i]\n arr[i] = arr[n]\n arr[n] = temp\n\n\n<mask token>\nfor digit in range(2, 9):\n arr = list(range(1, digit + 1))\n permutate(arr, 0)\nprint(maxPandigitalPrime)\n<mask token>\nprint(toc - tic)\n",
"step-3": "<mask token>\nmaxPandigitalPrime = 2\n\n\ndef isPrime(num):\n if num <= 1:\n return False\n elif num == 2:\n return True\n elif num % 2 == 0:\n return False\n else:\n sqrt_num = math.sqrt(num)\n bound = int(sqrt_num) + 1\n for i in range(3, bound, 2):\n if num % i == 0:\n return False\n return True\n\n\ndef permutate(arr, n):\n if n == len(arr):\n str_num = ''\n for j in range(n):\n str_num += str(arr[j])\n num = int(str_num)\n if isPrime(num):\n global maxPandigitalPrime\n if num > maxPandigitalPrime:\n maxPandigitalPrime = num\n else:\n for i in range(n, len(arr)):\n temp = arr[i]\n arr[i] = arr[n]\n arr[n] = temp\n permutate(arr, n + 1)\n temp = arr[i]\n arr[i] = arr[n]\n arr[n] = temp\n\n\ntic = time.time()\nfor digit in range(2, 9):\n arr = list(range(1, digit + 1))\n permutate(arr, 0)\nprint(maxPandigitalPrime)\ntoc = time.time()\nprint(toc - tic)\n",
"step-4": "<mask token>\nimport time\nimport math\nmaxPandigitalPrime = 2\n\n\ndef isPrime(num):\n if num <= 1:\n return False\n elif num == 2:\n return True\n elif num % 2 == 0:\n return False\n else:\n sqrt_num = math.sqrt(num)\n bound = int(sqrt_num) + 1\n for i in range(3, bound, 2):\n if num % i == 0:\n return False\n return True\n\n\ndef permutate(arr, n):\n if n == len(arr):\n str_num = ''\n for j in range(n):\n str_num += str(arr[j])\n num = int(str_num)\n if isPrime(num):\n global maxPandigitalPrime\n if num > maxPandigitalPrime:\n maxPandigitalPrime = num\n else:\n for i in range(n, len(arr)):\n temp = arr[i]\n arr[i] = arr[n]\n arr[n] = temp\n permutate(arr, n + 1)\n temp = arr[i]\n arr[i] = arr[n]\n arr[n] = temp\n\n\ntic = time.time()\nfor digit in range(2, 9):\n arr = list(range(1, digit + 1))\n permutate(arr, 0)\nprint(maxPandigitalPrime)\ntoc = time.time()\nprint(toc - tic)\n",
"step-5": "'''\nProject Euler\n\nProblem #41 - Pandigital prime\n\nDavid 07/06/2017\n'''\n\nimport time\nimport math\n\nmaxPandigitalPrime = 2\n\ndef isPrime(num):\n if(num<=1):\n return False\n elif(num==2):\n return True\n elif(num%2==0):\n return False\n else:\n sqrt_num = math.sqrt(num)\n bound = int(sqrt_num)+1\n for i in range(3,bound,2):\n if(num%i==0):\n return False\n return True\n\n\ndef permutate(arr,n):\n if(n==len(arr)):\n #print(arr)\n str_num = ''\n for j in range(n):\n str_num += str(arr[j])\n num = int(str_num)\n if(isPrime(num)):\n global maxPandigitalPrime\n if(num>maxPandigitalPrime):\n maxPandigitalPrime = num\n else:\n for i in range(n,len(arr)):\n # swap index n(head), i\n temp = arr[i]\n arr[i] = arr[n]\n arr[n] = temp\n permutate(arr,n+1)\n # swap back to resume arr\n temp = arr[i]\n arr[i] = arr[n]\n arr[n] = temp\n\n\n# main\ntic = time.time()\nfor digit in range(2,9):\n arr = list(range(1,digit+1))\n permutate(arr,0)\n\nprint(maxPandigitalPrime)\ntoc = time.time()\nprint(toc-tic)\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def ref_mod2(x0, x1, fmod):
if x0.dtype == np.float32 or fmod == True:
return np.fmod(x0, x1)
else:
return np.mod(x0, x1)
@pytest.mark.parametrize('ctx, func_name', ctxs)
@pytest.mark.parametrize('x0_shape, x1_shape', [((2, 3, 4), (2, 3, 4)), ((2,
3, 4), (1, 1, 1)), ((1, 1, 1), (2, 3, 4))])
@pytest.mark.parametrize('fmod', [False, True])
@pytest.mark.parametrize('dtype', [np.float32, np.int32])
@pytest.mark.parametrize('seed', [313])
def test_mod2_forward(seed, x0_shape, x1_shape, fmod, dtype, ctx, func_name):
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
if dtype == np.float32:
inputs = [rng.randn(*x0_shape).astype(dtype), rng.randn(*x1_shape).
astype(dtype)]
else:
inputs = [rng.randint(np.iinfo(dtype).min, np.iinfo(dtype).max,
x0_shape).astype(dtype), rng.randint(np.iinfo(dtype).min, np.
iinfo(dtype).max, x1_shape).astype(dtype)]
backward = [False, False]
func_args = [fmod]
function_tester(rng, F.mod2, ref_mod2, inputs, func_name=func_name,
func_args=func_args, atol_f=0, ctx=ctx, backward=backward)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ctxs = list_context('Mod2')
def ref_mod2(x0, x1, fmod):
if x0.dtype == np.float32 or fmod == True:
return np.fmod(x0, x1)
else:
return np.mod(x0, x1)
@pytest.mark.parametrize('ctx, func_name', ctxs)
@pytest.mark.parametrize('x0_shape, x1_shape', [((2, 3, 4), (2, 3, 4)), ((2,
3, 4), (1, 1, 1)), ((1, 1, 1), (2, 3, 4))])
@pytest.mark.parametrize('fmod', [False, True])
@pytest.mark.parametrize('dtype', [np.float32, np.int32])
@pytest.mark.parametrize('seed', [313])
def test_mod2_forward(seed, x0_shape, x1_shape, fmod, dtype, ctx, func_name):
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
if dtype == np.float32:
inputs = [rng.randn(*x0_shape).astype(dtype), rng.randn(*x1_shape).
astype(dtype)]
else:
inputs = [rng.randint(np.iinfo(dtype).min, np.iinfo(dtype).max,
x0_shape).astype(dtype), rng.randint(np.iinfo(dtype).min, np.
iinfo(dtype).max, x1_shape).astype(dtype)]
backward = [False, False]
func_args = [fmod]
function_tester(rng, F.mod2, ref_mod2, inputs, func_name=func_name,
func_args=func_args, atol_f=0, ctx=ctx, backward=backward)
<|reserved_special_token_1|>
import pytest
import numpy as np
import nnabla.functions as F
from nbla_test_utils import list_context
ctxs = list_context('Mod2')
def ref_mod2(x0, x1, fmod):
if x0.dtype == np.float32 or fmod == True:
return np.fmod(x0, x1)
else:
return np.mod(x0, x1)
@pytest.mark.parametrize('ctx, func_name', ctxs)
@pytest.mark.parametrize('x0_shape, x1_shape', [((2, 3, 4), (2, 3, 4)), ((2,
3, 4), (1, 1, 1)), ((1, 1, 1), (2, 3, 4))])
@pytest.mark.parametrize('fmod', [False, True])
@pytest.mark.parametrize('dtype', [np.float32, np.int32])
@pytest.mark.parametrize('seed', [313])
def test_mod2_forward(seed, x0_shape, x1_shape, fmod, dtype, ctx, func_name):
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
if dtype == np.float32:
inputs = [rng.randn(*x0_shape).astype(dtype), rng.randn(*x1_shape).
astype(dtype)]
else:
inputs = [rng.randint(np.iinfo(dtype).min, np.iinfo(dtype).max,
x0_shape).astype(dtype), rng.randint(np.iinfo(dtype).min, np.
iinfo(dtype).max, x1_shape).astype(dtype)]
backward = [False, False]
func_args = [fmod]
function_tester(rng, F.mod2, ref_mod2, inputs, func_name=func_name,
func_args=func_args, atol_f=0, ctx=ctx, backward=backward)
<|reserved_special_token_1|>
# Copyright 2023 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla.functions as F
from nbla_test_utils import list_context
ctxs = list_context('Mod2')
def ref_mod2(x0, x1, fmod):
if x0.dtype == np.float32 or fmod == True:
return np.fmod(x0, x1)
else:
return np.mod(x0, x1)
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("x0_shape, x1_shape", [
((2, 3, 4), (2, 3, 4)),
((2, 3, 4), (1, 1, 1)),
((1, 1, 1), (2, 3, 4)),
])
@pytest.mark.parametrize('fmod', [False, True])
@pytest.mark.parametrize('dtype', [np.float32, np.int32])
@pytest.mark.parametrize("seed", [313])
def test_mod2_forward(seed, x0_shape, x1_shape, fmod, dtype, ctx, func_name):
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
if dtype == np.float32:
inputs = [rng.randn(*x0_shape).astype(dtype),
rng.randn(*x1_shape).astype(dtype)]
else:
inputs = [rng.randint(np.iinfo(dtype).min, np.iinfo(dtype).max, x0_shape).astype(dtype),
rng.randint(np.iinfo(dtype).min, np.iinfo(dtype).max, x1_shape).astype(dtype)]
backward = [False, False]
func_args = [fmod]
function_tester(rng, F.mod2, ref_mod2, inputs,
func_name=func_name, func_args=func_args,
atol_f=0, ctx=ctx, backward=backward)
|
flexible
|
{
"blob_id": "32f10c3e73a3d792416f6b2841a80f8b3c390e8c",
"index": 9194,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef ref_mod2(x0, x1, fmod):\n if x0.dtype == np.float32 or fmod == True:\n return np.fmod(x0, x1)\n else:\n return np.mod(x0, x1)\n\n\n@pytest.mark.parametrize('ctx, func_name', ctxs)\n@pytest.mark.parametrize('x0_shape, x1_shape', [((2, 3, 4), (2, 3, 4)), ((2,\n 3, 4), (1, 1, 1)), ((1, 1, 1), (2, 3, 4))])\n@pytest.mark.parametrize('fmod', [False, True])\n@pytest.mark.parametrize('dtype', [np.float32, np.int32])\n@pytest.mark.parametrize('seed', [313])\ndef test_mod2_forward(seed, x0_shape, x1_shape, fmod, dtype, ctx, func_name):\n from nbla_test_utils import function_tester\n rng = np.random.RandomState(seed)\n if dtype == np.float32:\n inputs = [rng.randn(*x0_shape).astype(dtype), rng.randn(*x1_shape).\n astype(dtype)]\n else:\n inputs = [rng.randint(np.iinfo(dtype).min, np.iinfo(dtype).max,\n x0_shape).astype(dtype), rng.randint(np.iinfo(dtype).min, np.\n iinfo(dtype).max, x1_shape).astype(dtype)]\n backward = [False, False]\n func_args = [fmod]\n function_tester(rng, F.mod2, ref_mod2, inputs, func_name=func_name,\n func_args=func_args, atol_f=0, ctx=ctx, backward=backward)\n",
"step-3": "<mask token>\nctxs = list_context('Mod2')\n\n\ndef ref_mod2(x0, x1, fmod):\n if x0.dtype == np.float32 or fmod == True:\n return np.fmod(x0, x1)\n else:\n return np.mod(x0, x1)\n\n\n@pytest.mark.parametrize('ctx, func_name', ctxs)\n@pytest.mark.parametrize('x0_shape, x1_shape', [((2, 3, 4), (2, 3, 4)), ((2,\n 3, 4), (1, 1, 1)), ((1, 1, 1), (2, 3, 4))])\n@pytest.mark.parametrize('fmod', [False, True])\n@pytest.mark.parametrize('dtype', [np.float32, np.int32])\n@pytest.mark.parametrize('seed', [313])\ndef test_mod2_forward(seed, x0_shape, x1_shape, fmod, dtype, ctx, func_name):\n from nbla_test_utils import function_tester\n rng = np.random.RandomState(seed)\n if dtype == np.float32:\n inputs = [rng.randn(*x0_shape).astype(dtype), rng.randn(*x1_shape).\n astype(dtype)]\n else:\n inputs = [rng.randint(np.iinfo(dtype).min, np.iinfo(dtype).max,\n x0_shape).astype(dtype), rng.randint(np.iinfo(dtype).min, np.\n iinfo(dtype).max, x1_shape).astype(dtype)]\n backward = [False, False]\n func_args = [fmod]\n function_tester(rng, F.mod2, ref_mod2, inputs, func_name=func_name,\n func_args=func_args, atol_f=0, ctx=ctx, backward=backward)\n",
"step-4": "import pytest\nimport numpy as np\nimport nnabla.functions as F\nfrom nbla_test_utils import list_context\nctxs = list_context('Mod2')\n\n\ndef ref_mod2(x0, x1, fmod):\n if x0.dtype == np.float32 or fmod == True:\n return np.fmod(x0, x1)\n else:\n return np.mod(x0, x1)\n\n\n@pytest.mark.parametrize('ctx, func_name', ctxs)\n@pytest.mark.parametrize('x0_shape, x1_shape', [((2, 3, 4), (2, 3, 4)), ((2,\n 3, 4), (1, 1, 1)), ((1, 1, 1), (2, 3, 4))])\n@pytest.mark.parametrize('fmod', [False, True])\n@pytest.mark.parametrize('dtype', [np.float32, np.int32])\n@pytest.mark.parametrize('seed', [313])\ndef test_mod2_forward(seed, x0_shape, x1_shape, fmod, dtype, ctx, func_name):\n from nbla_test_utils import function_tester\n rng = np.random.RandomState(seed)\n if dtype == np.float32:\n inputs = [rng.randn(*x0_shape).astype(dtype), rng.randn(*x1_shape).\n astype(dtype)]\n else:\n inputs = [rng.randint(np.iinfo(dtype).min, np.iinfo(dtype).max,\n x0_shape).astype(dtype), rng.randint(np.iinfo(dtype).min, np.\n iinfo(dtype).max, x1_shape).astype(dtype)]\n backward = [False, False]\n func_args = [fmod]\n function_tester(rng, F.mod2, ref_mod2, inputs, func_name=func_name,\n func_args=func_args, atol_f=0, ctx=ctx, backward=backward)\n",
"step-5": "# Copyright 2023 Sony Group Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nimport numpy as np\nimport nnabla.functions as F\nfrom nbla_test_utils import list_context\n\nctxs = list_context('Mod2')\n\n\ndef ref_mod2(x0, x1, fmod):\n if x0.dtype == np.float32 or fmod == True:\n return np.fmod(x0, x1)\n else:\n return np.mod(x0, x1)\n\n\n@pytest.mark.parametrize(\"ctx, func_name\", ctxs)\n@pytest.mark.parametrize(\"x0_shape, x1_shape\", [\n ((2, 3, 4), (2, 3, 4)),\n ((2, 3, 4), (1, 1, 1)),\n ((1, 1, 1), (2, 3, 4)),\n])\n@pytest.mark.parametrize('fmod', [False, True])\n@pytest.mark.parametrize('dtype', [np.float32, np.int32])\n@pytest.mark.parametrize(\"seed\", [313])\ndef test_mod2_forward(seed, x0_shape, x1_shape, fmod, dtype, ctx, func_name):\n from nbla_test_utils import function_tester\n rng = np.random.RandomState(seed)\n if dtype == np.float32:\n inputs = [rng.randn(*x0_shape).astype(dtype),\n rng.randn(*x1_shape).astype(dtype)]\n else:\n inputs = [rng.randint(np.iinfo(dtype).min, np.iinfo(dtype).max, x0_shape).astype(dtype),\n rng.randint(np.iinfo(dtype).min, np.iinfo(dtype).max, x1_shape).astype(dtype)]\n backward = [False, False]\n func_args = [fmod]\n function_tester(rng, F.mod2, ref_mod2, inputs,\n func_name=func_name, func_args=func_args,\n atol_f=0, ctx=ctx, backward=backward)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
@require_GET
def Follow(request, shorturl):
link = get_object_or_404(Link, shorturl=shorturl)
link.vi += 1
print(link.vi)
link.save()
return HttpResponseRedirect(link.link)
def FormView(request):
toplink = Link.objects.annotate(Count('vi')).order_by('-vi__count')[:5]
if request.user.is_authenticated:
yl = Link.objects.filter(user=request.user)
else:
yl = None
context = {'form': UrlForm, 'links': yl, 't': toplink}
return render(request, 'shortu.html', context)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@require_GET
def Follow(request, shorturl):
link = get_object_or_404(Link, shorturl=shorturl)
link.vi += 1
print(link.vi)
link.save()
return HttpResponseRedirect(link.link)
def FormView(request):
toplink = Link.objects.annotate(Count('vi')).order_by('-vi__count')[:5]
if request.user.is_authenticated:
yl = Link.objects.filter(user=request.user)
else:
yl = None
context = {'form': UrlForm, 'links': yl, 't': toplink}
return render(request, 'shortu.html', context)
@require_GET
def info(request, shorturl):
link = get_object_or_404(Link, shorturl=shorturl)
return render(request, 'info.html', {'link': link})
@require_POST
def Submit(request):
form = UrlForm(request.POST)
if form.is_valid():
link = form.cleaned_data['url']
costom = form.cleaned_data['costom']
if costom:
if Link.objects.filter(shorturl=costom).exists():
pass
else:
shorturl = costom
newlink = Link.objects.create(link=link, user=request.user,
shorturl=shorturl)
return render(request, 'info.html', {'link': newlink})
j = 1
while j < 11:
newshort = short_url_gen(j)
if Link.objects.filter(shorturl=costom).exists():
j += 1
continue
newlink = Link.objects.create(link=link, shorturl=newshort,
user=request.user)
return render(request, 'info.html', {'link': newlink})
return render(request, 'home.html')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def short_url_gen(stringLength=5):
"""Generate a random string of fixed length """
letters = string.ascii_letters + string.digits
return ''.join(random.choice(letters) for i in range(stringLength))
@require_GET
def Follow(request, shorturl):
link = get_object_or_404(Link, shorturl=shorturl)
link.vi += 1
print(link.vi)
link.save()
return HttpResponseRedirect(link.link)
def FormView(request):
toplink = Link.objects.annotate(Count('vi')).order_by('-vi__count')[:5]
if request.user.is_authenticated:
yl = Link.objects.filter(user=request.user)
else:
yl = None
context = {'form': UrlForm, 'links': yl, 't': toplink}
return render(request, 'shortu.html', context)
@require_GET
def info(request, shorturl):
link = get_object_or_404(Link, shorturl=shorturl)
return render(request, 'info.html', {'link': link})
@require_POST
def Submit(request):
form = UrlForm(request.POST)
if form.is_valid():
link = form.cleaned_data['url']
costom = form.cleaned_data['costom']
if costom:
if Link.objects.filter(shorturl=costom).exists():
pass
else:
shorturl = costom
newlink = Link.objects.create(link=link, user=request.user,
shorturl=shorturl)
return render(request, 'info.html', {'link': newlink})
j = 1
while j < 11:
newshort = short_url_gen(j)
if Link.objects.filter(shorturl=costom).exists():
j += 1
continue
newlink = Link.objects.create(link=link, shorturl=newshort,
user=request.user)
return render(request, 'info.html', {'link': newlink})
return render(request, 'home.html')
<|reserved_special_token_1|>
from django.shortcuts import render
from django.contrib import messages
from django.views.generic import View
from django.views.decorators.http import require_GET, require_POST
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponsePermanentRedirect, HttpResponseRedirect
from django.db.models import Count
from .forms import UrlForm
from .models import Link
import random
import string
def short_url_gen(stringLength=5):
"""Generate a random string of fixed length """
letters = string.ascii_letters + string.digits
return ''.join(random.choice(letters) for i in range(stringLength))
@require_GET
def Follow(request, shorturl):
link = get_object_or_404(Link, shorturl=shorturl)
link.vi += 1
print(link.vi)
link.save()
return HttpResponseRedirect(link.link)
def FormView(request):
toplink = Link.objects.annotate(Count('vi')).order_by('-vi__count')[:5]
if request.user.is_authenticated:
yl = Link.objects.filter(user=request.user)
else:
yl = None
context = {'form': UrlForm, 'links': yl, 't': toplink}
return render(request, 'shortu.html', context)
@require_GET
def info(request, shorturl):
link = get_object_or_404(Link, shorturl=shorturl)
return render(request, 'info.html', {'link': link})
@require_POST
def Submit(request):
form = UrlForm(request.POST)
if form.is_valid():
link = form.cleaned_data['url']
costom = form.cleaned_data['costom']
if costom:
if Link.objects.filter(shorturl=costom).exists():
pass
else:
shorturl = costom
newlink = Link.objects.create(link=link, user=request.user,
shorturl=shorturl)
return render(request, 'info.html', {'link': newlink})
j = 1
while j < 11:
newshort = short_url_gen(j)
if Link.objects.filter(shorturl=costom).exists():
j += 1
continue
newlink = Link.objects.create(link=link, shorturl=newshort,
user=request.user)
return render(request, 'info.html', {'link': newlink})
return render(request, 'home.html')
<|reserved_special_token_1|>
from django.shortcuts import render
from django.contrib import messages
from django.views.generic import View
from django.views.decorators.http import require_GET, require_POST
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse,HttpResponsePermanentRedirect,HttpResponseRedirect
from django.db.models import Count
from .forms import UrlForm
from .models import Link
import random
import string
def short_url_gen(stringLength=5):
"""Generate a random string of fixed length """
letters = string.ascii_letters + string.digits
return ''.join(random.choice(letters) for i in range(stringLength))
@require_GET
def Follow(request,shorturl):
link = get_object_or_404(Link,shorturl=shorturl)
link.vi += 1
print(link.vi)
link.save()
return HttpResponseRedirect(link.link)
def FormView(request):
toplink = Link.objects.annotate(Count('vi')).order_by('-vi__count')[:5]
if request.user.is_authenticated:
yl = Link.objects.filter(user = request.user)
else:
yl = None
context = {
'form' :UrlForm,
'links':yl,
't':toplink
}
return render(request, 'shortu.html', context)
@require_GET
def info(request,shorturl):
link = get_object_or_404(Link,shorturl=shorturl)
return render(request,'info.html',{'link':link})
@require_POST
def Submit(request):
form = UrlForm(request.POST)
if form.is_valid():
link = form.cleaned_data['url']
costom = form.cleaned_data['costom']
if costom:
if Link.objects.filter(shorturl=costom).exists():
#messages(request,"Costom url aready taken")
pass
else:
shorturl = costom
newlink = Link.objects.create(link= link,user = request.user, shorturl= shorturl)
return render(request,'info.html',{'link':newlink})
j=1
while j<11:
newshort = short_url_gen(j)
if Link.objects.filter(shorturl=costom).exists():
j+=1
continue
newlink = Link.objects.create(link= link, shorturl= newshort,user = request.user)
return render(request,'info.html',{'link':newlink})
return render(request, 'home.html')
|
flexible
|
{
"blob_id": "11952e60ab95bc1896fd899a5ced126dcafec63a",
"index": 9882,
"step-1": "<mask token>\n\n\n@require_GET\ndef Follow(request, shorturl):\n link = get_object_or_404(Link, shorturl=shorturl)\n link.vi += 1\n print(link.vi)\n link.save()\n return HttpResponseRedirect(link.link)\n\n\ndef FormView(request):\n toplink = Link.objects.annotate(Count('vi')).order_by('-vi__count')[:5]\n if request.user.is_authenticated:\n yl = Link.objects.filter(user=request.user)\n else:\n yl = None\n context = {'form': UrlForm, 'links': yl, 't': toplink}\n return render(request, 'shortu.html', context)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@require_GET\ndef Follow(request, shorturl):\n link = get_object_or_404(Link, shorturl=shorturl)\n link.vi += 1\n print(link.vi)\n link.save()\n return HttpResponseRedirect(link.link)\n\n\ndef FormView(request):\n toplink = Link.objects.annotate(Count('vi')).order_by('-vi__count')[:5]\n if request.user.is_authenticated:\n yl = Link.objects.filter(user=request.user)\n else:\n yl = None\n context = {'form': UrlForm, 'links': yl, 't': toplink}\n return render(request, 'shortu.html', context)\n\n\n@require_GET\ndef info(request, shorturl):\n link = get_object_or_404(Link, shorturl=shorturl)\n return render(request, 'info.html', {'link': link})\n\n\n@require_POST\ndef Submit(request):\n form = UrlForm(request.POST)\n if form.is_valid():\n link = form.cleaned_data['url']\n costom = form.cleaned_data['costom']\n if costom:\n if Link.objects.filter(shorturl=costom).exists():\n pass\n else:\n shorturl = costom\n newlink = Link.objects.create(link=link, user=request.user,\n shorturl=shorturl)\n return render(request, 'info.html', {'link': newlink})\n j = 1\n while j < 11:\n newshort = short_url_gen(j)\n if Link.objects.filter(shorturl=costom).exists():\n j += 1\n continue\n newlink = Link.objects.create(link=link, shorturl=newshort,\n user=request.user)\n return render(request, 'info.html', {'link': newlink})\n return render(request, 'home.html')\n",
"step-3": "<mask token>\n\n\ndef short_url_gen(stringLength=5):\n \"\"\"Generate a random string of fixed length \"\"\"\n letters = string.ascii_letters + string.digits\n return ''.join(random.choice(letters) for i in range(stringLength))\n\n\n@require_GET\ndef Follow(request, shorturl):\n link = get_object_or_404(Link, shorturl=shorturl)\n link.vi += 1\n print(link.vi)\n link.save()\n return HttpResponseRedirect(link.link)\n\n\ndef FormView(request):\n toplink = Link.objects.annotate(Count('vi')).order_by('-vi__count')[:5]\n if request.user.is_authenticated:\n yl = Link.objects.filter(user=request.user)\n else:\n yl = None\n context = {'form': UrlForm, 'links': yl, 't': toplink}\n return render(request, 'shortu.html', context)\n\n\n@require_GET\ndef info(request, shorturl):\n link = get_object_or_404(Link, shorturl=shorturl)\n return render(request, 'info.html', {'link': link})\n\n\n@require_POST\ndef Submit(request):\n form = UrlForm(request.POST)\n if form.is_valid():\n link = form.cleaned_data['url']\n costom = form.cleaned_data['costom']\n if costom:\n if Link.objects.filter(shorturl=costom).exists():\n pass\n else:\n shorturl = costom\n newlink = Link.objects.create(link=link, user=request.user,\n shorturl=shorturl)\n return render(request, 'info.html', {'link': newlink})\n j = 1\n while j < 11:\n newshort = short_url_gen(j)\n if Link.objects.filter(shorturl=costom).exists():\n j += 1\n continue\n newlink = Link.objects.create(link=link, shorturl=newshort,\n user=request.user)\n return render(request, 'info.html', {'link': newlink})\n return render(request, 'home.html')\n",
"step-4": "from django.shortcuts import render\nfrom django.contrib import messages\nfrom django.views.generic import View\nfrom django.views.decorators.http import require_GET, require_POST\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, HttpResponsePermanentRedirect, HttpResponseRedirect\nfrom django.db.models import Count\nfrom .forms import UrlForm\nfrom .models import Link\nimport random\nimport string\n\n\ndef short_url_gen(stringLength=5):\n \"\"\"Generate a random string of fixed length \"\"\"\n letters = string.ascii_letters + string.digits\n return ''.join(random.choice(letters) for i in range(stringLength))\n\n\n@require_GET\ndef Follow(request, shorturl):\n link = get_object_or_404(Link, shorturl=shorturl)\n link.vi += 1\n print(link.vi)\n link.save()\n return HttpResponseRedirect(link.link)\n\n\ndef FormView(request):\n toplink = Link.objects.annotate(Count('vi')).order_by('-vi__count')[:5]\n if request.user.is_authenticated:\n yl = Link.objects.filter(user=request.user)\n else:\n yl = None\n context = {'form': UrlForm, 'links': yl, 't': toplink}\n return render(request, 'shortu.html', context)\n\n\n@require_GET\ndef info(request, shorturl):\n link = get_object_or_404(Link, shorturl=shorturl)\n return render(request, 'info.html', {'link': link})\n\n\n@require_POST\ndef Submit(request):\n form = UrlForm(request.POST)\n if form.is_valid():\n link = form.cleaned_data['url']\n costom = form.cleaned_data['costom']\n if costom:\n if Link.objects.filter(shorturl=costom).exists():\n pass\n else:\n shorturl = costom\n newlink = Link.objects.create(link=link, user=request.user,\n shorturl=shorturl)\n return render(request, 'info.html', {'link': newlink})\n j = 1\n while j < 11:\n newshort = short_url_gen(j)\n if Link.objects.filter(shorturl=costom).exists():\n j += 1\n continue\n newlink = Link.objects.create(link=link, shorturl=newshort,\n user=request.user)\n return render(request, 'info.html', {'link': newlink})\n return render(request, 'home.html')\n",
"step-5": "from django.shortcuts import render\nfrom django.contrib import messages\nfrom django.views.generic import View\nfrom django.views.decorators.http import require_GET, require_POST\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse,HttpResponsePermanentRedirect,HttpResponseRedirect\nfrom django.db.models import Count\n\nfrom .forms import UrlForm\nfrom .models import Link\n\nimport random\nimport string\n\ndef short_url_gen(stringLength=5):\n \"\"\"Generate a random string of fixed length \"\"\"\n letters = string.ascii_letters + string.digits\n return ''.join(random.choice(letters) for i in range(stringLength))\n@require_GET\ndef Follow(request,shorturl):\n link = get_object_or_404(Link,shorturl=shorturl)\n link.vi += 1\n print(link.vi)\n link.save()\n return HttpResponseRedirect(link.link)\n\ndef FormView(request):\n toplink = Link.objects.annotate(Count('vi')).order_by('-vi__count')[:5]\n if request.user.is_authenticated:\n yl = Link.objects.filter(user = request.user)\n else:\n yl = None\n context = {\n 'form' :UrlForm,\n 'links':yl,\n 't':toplink\n }\n\n return render(request, 'shortu.html', context)\n@require_GET\ndef info(request,shorturl):\n link = get_object_or_404(Link,shorturl=shorturl)\n return render(request,'info.html',{'link':link})\n\n@require_POST\ndef Submit(request):\n form = UrlForm(request.POST)\n if form.is_valid():\n link = form.cleaned_data['url']\n costom = form.cleaned_data['costom']\n if costom:\n if Link.objects.filter(shorturl=costom).exists():\n #messages(request,\"Costom url aready taken\")\n pass\n else: \n shorturl = costom\n newlink = Link.objects.create(link= link,user = request.user, shorturl= shorturl)\n return render(request,'info.html',{'link':newlink})\n j=1\n while j<11:\n newshort = short_url_gen(j)\n if Link.objects.filter(shorturl=costom).exists():\n j+=1\n continue\n newlink = Link.objects.create(link= link, shorturl= newshort,user = request.user)\n return render(request,'info.html',{'link':newlink})\n \n\n return render(request, 'home.html')",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import subprocess
import re
class Command:
InputSize = 1
OutputSize = 2
MultiThreadable = True
ShareResources = False
def __init__(self, bin, config, showerr=False):
self.travatar = subprocess.Popen([bin, "-config_file", config, "-trace_out", "STDOUT", "-in_format", "egret", "-buffer", "false"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None if showerr else subprocess.PIPE, universal_newlines=True)
self.span_reg = re.compile(r"\[([0-9]+), ([0-9]+)\]")
def routine(self, instream):
egret_tree = instream[0]
if not egret_tree.startswith("success\n"):
return (egret_tree, "",)
egret_tree = egret_tree[8:]
self.travatar.stdin.write(egret_tree)
self.travatar.stdin.flush()
travatar_trace = self.travatar.stdout.readline()
spltrace = travatar_trace.split(" ||| ")
m = self.span_reg.match(spltrace[1])
inputlen = int(m.group(2))
while True:
travatar_trace_line = self.travatar.stdout.readline()
spltrace = travatar_trace_line.split(" ||| ")
spltree = spltrace[2].split(" ")
for x in spltree:
if x and x[0] == x[-1] == "\"":
inputlen -= 1
spltrace[4] = ".\n"
travatar_trace += " ||| ".join(spltrace)
if not inputlen:
break
travatar_output = self.travatar.stdout.readline().rstrip("\n")
return ("success\n" + travatar_output + "\n" + travatar_trace, travatar_output,)
|
normal
|
{
"blob_id": "91cef72962332e7efcc86f1b19da4382bd72a466",
"index": 9278,
"step-1": "<mask token>\n\n\nclass Command:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Command:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, bin, config, showerr=False):\n self.travatar = subprocess.Popen([bin, '-config_file', config,\n '-trace_out', 'STDOUT', '-in_format', 'egret', '-buffer',\n 'false'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr\n =None if showerr else subprocess.PIPE, universal_newlines=True)\n self.span_reg = re.compile('\\\\[([0-9]+), ([0-9]+)\\\\]')\n\n def routine(self, instream):\n egret_tree = instream[0]\n if not egret_tree.startswith('success\\n'):\n return egret_tree, ''\n egret_tree = egret_tree[8:]\n self.travatar.stdin.write(egret_tree)\n self.travatar.stdin.flush()\n travatar_trace = self.travatar.stdout.readline()\n spltrace = travatar_trace.split(' ||| ')\n m = self.span_reg.match(spltrace[1])\n inputlen = int(m.group(2))\n while True:\n travatar_trace_line = self.travatar.stdout.readline()\n spltrace = travatar_trace_line.split(' ||| ')\n spltree = spltrace[2].split(' ')\n for x in spltree:\n if x and x[0] == x[-1] == '\"':\n inputlen -= 1\n spltrace[4] = '.\\n'\n travatar_trace += ' ||| '.join(spltrace)\n if not inputlen:\n break\n travatar_output = self.travatar.stdout.readline().rstrip('\\n')\n return ('success\\n' + travatar_output + '\\n' + travatar_trace,\n travatar_output)\n",
"step-3": "<mask token>\n\n\nclass Command:\n InputSize = 1\n OutputSize = 2\n MultiThreadable = True\n ShareResources = False\n\n def __init__(self, bin, config, showerr=False):\n self.travatar = subprocess.Popen([bin, '-config_file', config,\n '-trace_out', 'STDOUT', '-in_format', 'egret', '-buffer',\n 'false'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr\n =None if showerr else subprocess.PIPE, universal_newlines=True)\n self.span_reg = re.compile('\\\\[([0-9]+), ([0-9]+)\\\\]')\n\n def routine(self, instream):\n egret_tree = instream[0]\n if not egret_tree.startswith('success\\n'):\n return egret_tree, ''\n egret_tree = egret_tree[8:]\n self.travatar.stdin.write(egret_tree)\n self.travatar.stdin.flush()\n travatar_trace = self.travatar.stdout.readline()\n spltrace = travatar_trace.split(' ||| ')\n m = self.span_reg.match(spltrace[1])\n inputlen = int(m.group(2))\n while True:\n travatar_trace_line = self.travatar.stdout.readline()\n spltrace = travatar_trace_line.split(' ||| ')\n spltree = spltrace[2].split(' ')\n for x in spltree:\n if x and x[0] == x[-1] == '\"':\n inputlen -= 1\n spltrace[4] = '.\\n'\n travatar_trace += ' ||| '.join(spltrace)\n if not inputlen:\n break\n travatar_output = self.travatar.stdout.readline().rstrip('\\n')\n return ('success\\n' + travatar_output + '\\n' + travatar_trace,\n travatar_output)\n",
"step-4": "import subprocess\nimport re\n\n\nclass Command:\n InputSize = 1\n OutputSize = 2\n MultiThreadable = True\n ShareResources = False\n\n def __init__(self, bin, config, showerr=False):\n self.travatar = subprocess.Popen([bin, '-config_file', config,\n '-trace_out', 'STDOUT', '-in_format', 'egret', '-buffer',\n 'false'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr\n =None if showerr else subprocess.PIPE, universal_newlines=True)\n self.span_reg = re.compile('\\\\[([0-9]+), ([0-9]+)\\\\]')\n\n def routine(self, instream):\n egret_tree = instream[0]\n if not egret_tree.startswith('success\\n'):\n return egret_tree, ''\n egret_tree = egret_tree[8:]\n self.travatar.stdin.write(egret_tree)\n self.travatar.stdin.flush()\n travatar_trace = self.travatar.stdout.readline()\n spltrace = travatar_trace.split(' ||| ')\n m = self.span_reg.match(spltrace[1])\n inputlen = int(m.group(2))\n while True:\n travatar_trace_line = self.travatar.stdout.readline()\n spltrace = travatar_trace_line.split(' ||| ')\n spltree = spltrace[2].split(' ')\n for x in spltree:\n if x and x[0] == x[-1] == '\"':\n inputlen -= 1\n spltrace[4] = '.\\n'\n travatar_trace += ' ||| '.join(spltrace)\n if not inputlen:\n break\n travatar_output = self.travatar.stdout.readline().rstrip('\\n')\n return ('success\\n' + travatar_output + '\\n' + travatar_trace,\n travatar_output)\n",
"step-5": "import subprocess\nimport re\n\n\nclass Command:\n\n InputSize = 1\n OutputSize = 2\n MultiThreadable = True\n ShareResources = False\n\n def __init__(self, bin, config, showerr=False):\n self.travatar = subprocess.Popen([bin, \"-config_file\", config, \"-trace_out\", \"STDOUT\", \"-in_format\", \"egret\", \"-buffer\", \"false\"],\n stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None if showerr else subprocess.PIPE, universal_newlines=True)\n\n self.span_reg = re.compile(r\"\\[([0-9]+), ([0-9]+)\\]\")\n\n def routine(self, instream):\n egret_tree = instream[0]\n if not egret_tree.startswith(\"success\\n\"):\n return (egret_tree, \"\",)\n\n egret_tree = egret_tree[8:]\n self.travatar.stdin.write(egret_tree)\n self.travatar.stdin.flush()\n\n travatar_trace = self.travatar.stdout.readline()\n spltrace = travatar_trace.split(\" ||| \")\n m = self.span_reg.match(spltrace[1])\n\n inputlen = int(m.group(2))\n\n while True:\n travatar_trace_line = self.travatar.stdout.readline()\n spltrace = travatar_trace_line.split(\" ||| \")\n spltree = spltrace[2].split(\" \")\n for x in spltree:\n if x and x[0] == x[-1] == \"\\\"\":\n inputlen -= 1\n spltrace[4] = \".\\n\"\n travatar_trace += \" ||| \".join(spltrace)\n if not inputlen:\n break\n \n travatar_output = self.travatar.stdout.readline().rstrip(\"\\n\")\n\n return (\"success\\n\" + travatar_output + \"\\n\" + travatar_trace, travatar_output,)\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def commonFactors(self, a: int, b: int) ->int:
gcd = math.gcd(a, b)
return sum(a % i == 0 and b % i == 0 for i in range(1, gcd + 1))
|
flexible
|
{
"blob_id": "ea696329a0cfd558fb592ffaf6339a35e8950a3c",
"index": 6721,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def commonFactors(self, a: int, b: int) ->int:\n gcd = math.gcd(a, b)\n return sum(a % i == 0 and b % i == 0 for i in range(1, gcd + 1))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#Define a function max_of_three() that takes three numbers as
#arguments and returns the largest of them.
def max_of_three(a,b,c):
max=0
if a > b:
max = a
else:
max = b
if max > c :
return max
else:
return c
print max(234,124,43)
def max_of_three2(a, b, c):
if a > b and a > c:
print a
elif b > c:
print b
else:
print c
print max_of_three2(0, 15, 2)
|
normal
|
{
"blob_id": "00b4a57537358797bfe37eee76bbf73ef42de081",
"index": 9775,
"step-1": "\n\n\n#Define a function max_of_three() that takes three numbers as\n#arguments and returns the largest of them.\n\n\n\n\ndef max_of_three(a,b,c):\n\n max=0\n if a > b:\n max = a\n else:\n max = b\n\n if max > c :\n return max\n else:\n return c\n\n\n\nprint max(234,124,43)\n\n\ndef max_of_three2(a, b, c):\n if a > b and a > c:\n print a\n elif b > c:\n print b\n else:\n print c\n\n\nprint max_of_three2(0, 15, 2)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class TestFileDisplayPane(TestCase):
def setUp(self):
self.file_display = FileDisplayPane(supported_readers={'Probe':
ProbeMultiImageReader()}, supported_parsers={'Probe':
ProbeParser()})
self.file_path = test_image_path
<|reserved_special_token_0|>
def test_add_directory(self):
self.file_display.add_files(directory)
self.assertEqual(1, len(self.file_display.file_table))
table_row = self.file_display.file_table[0]
self.assertEqual('/path/to/some/file', table_row.name)
self.assertEqual('Probe', table_row.tag)
self.assertDictEqual({'Probe': test_image_path}, table_row.file_set
.registry)
def test_remove_file(self):
self.file_display.add_files(self.file_path)
self.file_display.remove_file([self.file_display.file_table[0]])
self.assertEqual(0, len(self.file_display.file_table))
self.file_display.add_files(self.file_path)
self.assertEqual(1, len(self.file_display.file_table))
def test_filter_files(self):
self.file_display.add_files(self.file_path)
self.file_display.filter_files('some')
self.assertEqual(1, len(self.file_display.file_table))
self.file_display.filter_files('sci-pyfibre')
self.assertEqual(0, len(self.file_display.file_table))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestFileDisplayPane(TestCase):
def setUp(self):
self.file_display = FileDisplayPane(supported_readers={'Probe':
ProbeMultiImageReader()}, supported_parsers={'Probe':
ProbeParser()})
self.file_path = test_image_path
def test_add_file(self):
self.file_display.add_files(self.file_path)
self.assertEqual(1, len(self.file_display.file_table))
table_row = self.file_display.file_table[0]
self.assertEqual('/path/to/some/file', table_row.name)
self.assertEqual('Probe', table_row.tag)
self.assertDictEqual({'Probe': test_image_path}, table_row.file_set
.registry)
self.file_display.add_files(test_image_path)
self.assertEqual(1, len(self.file_display.file_table))
def test_add_directory(self):
self.file_display.add_files(directory)
self.assertEqual(1, len(self.file_display.file_table))
table_row = self.file_display.file_table[0]
self.assertEqual('/path/to/some/file', table_row.name)
self.assertEqual('Probe', table_row.tag)
self.assertDictEqual({'Probe': test_image_path}, table_row.file_set
.registry)
def test_remove_file(self):
self.file_display.add_files(self.file_path)
self.file_display.remove_file([self.file_display.file_table[0]])
self.assertEqual(0, len(self.file_display.file_table))
self.file_display.add_files(self.file_path)
self.assertEqual(1, len(self.file_display.file_table))
def test_filter_files(self):
self.file_display.add_files(self.file_path)
self.file_display.filter_files('some')
self.assertEqual(1, len(self.file_display.file_table))
self.file_display.filter_files('sci-pyfibre')
self.assertEqual(0, len(self.file_display.file_table))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
source_dir = os.path.dirname(os.path.realpath(__file__))
pyfibre_dir = os.path.dirname(os.path.dirname(source_dir))
class TestFileDisplayPane(TestCase):
def setUp(self):
self.file_display = FileDisplayPane(supported_readers={'Probe':
ProbeMultiImageReader()}, supported_parsers={'Probe':
ProbeParser()})
self.file_path = test_image_path
def test_add_file(self):
self.file_display.add_files(self.file_path)
self.assertEqual(1, len(self.file_display.file_table))
table_row = self.file_display.file_table[0]
self.assertEqual('/path/to/some/file', table_row.name)
self.assertEqual('Probe', table_row.tag)
self.assertDictEqual({'Probe': test_image_path}, table_row.file_set
.registry)
self.file_display.add_files(test_image_path)
self.assertEqual(1, len(self.file_display.file_table))
def test_add_directory(self):
self.file_display.add_files(directory)
self.assertEqual(1, len(self.file_display.file_table))
table_row = self.file_display.file_table[0]
self.assertEqual('/path/to/some/file', table_row.name)
self.assertEqual('Probe', table_row.tag)
self.assertDictEqual({'Probe': test_image_path}, table_row.file_set
.registry)
def test_remove_file(self):
self.file_display.add_files(self.file_path)
self.file_display.remove_file([self.file_display.file_table[0]])
self.assertEqual(0, len(self.file_display.file_table))
self.file_display.add_files(self.file_path)
self.assertEqual(1, len(self.file_display.file_table))
def test_filter_files(self):
self.file_display.add_files(self.file_path)
self.file_display.filter_files('some')
self.assertEqual(1, len(self.file_display.file_table))
self.file_display.filter_files('sci-pyfibre')
self.assertEqual(0, len(self.file_display.file_table))
<|reserved_special_token_1|>
import os
from unittest import TestCase
from pyfibre.gui.file_display_pane import FileDisplayPane
from pyfibre.tests.fixtures import directory, test_image_path
from pyfibre.tests.probe_classes.parsers import ProbeParser
from pyfibre.tests.probe_classes.readers import ProbeMultiImageReader
source_dir = os.path.dirname(os.path.realpath(__file__))
pyfibre_dir = os.path.dirname(os.path.dirname(source_dir))
class TestFileDisplayPane(TestCase):
def setUp(self):
self.file_display = FileDisplayPane(supported_readers={'Probe':
ProbeMultiImageReader()}, supported_parsers={'Probe':
ProbeParser()})
self.file_path = test_image_path
def test_add_file(self):
self.file_display.add_files(self.file_path)
self.assertEqual(1, len(self.file_display.file_table))
table_row = self.file_display.file_table[0]
self.assertEqual('/path/to/some/file', table_row.name)
self.assertEqual('Probe', table_row.tag)
self.assertDictEqual({'Probe': test_image_path}, table_row.file_set
.registry)
self.file_display.add_files(test_image_path)
self.assertEqual(1, len(self.file_display.file_table))
def test_add_directory(self):
self.file_display.add_files(directory)
self.assertEqual(1, len(self.file_display.file_table))
table_row = self.file_display.file_table[0]
self.assertEqual('/path/to/some/file', table_row.name)
self.assertEqual('Probe', table_row.tag)
self.assertDictEqual({'Probe': test_image_path}, table_row.file_set
.registry)
def test_remove_file(self):
self.file_display.add_files(self.file_path)
self.file_display.remove_file([self.file_display.file_table[0]])
self.assertEqual(0, len(self.file_display.file_table))
self.file_display.add_files(self.file_path)
self.assertEqual(1, len(self.file_display.file_table))
def test_filter_files(self):
self.file_display.add_files(self.file_path)
self.file_display.filter_files('some')
self.assertEqual(1, len(self.file_display.file_table))
self.file_display.filter_files('sci-pyfibre')
self.assertEqual(0, len(self.file_display.file_table))
<|reserved_special_token_1|>
import os
from unittest import TestCase
from pyfibre.gui.file_display_pane import FileDisplayPane
from pyfibre.tests.fixtures import (
directory,
test_image_path)
from pyfibre.tests.probe_classes.parsers import ProbeParser
from pyfibre.tests.probe_classes.readers import ProbeMultiImageReader
source_dir = os.path.dirname(os.path.realpath(__file__))
pyfibre_dir = os.path.dirname(os.path.dirname(source_dir))
class TestFileDisplayPane(TestCase):
def setUp(self):
self.file_display = FileDisplayPane(
supported_readers={'Probe': ProbeMultiImageReader()},
supported_parsers={'Probe': ProbeParser()}
)
self.file_path = test_image_path
def test_add_file(self):
self.file_display.add_files(self.file_path)
self.assertEqual(1, len(self.file_display.file_table))
table_row = self.file_display.file_table[0]
self.assertEqual('/path/to/some/file', table_row.name)
self.assertEqual('Probe', table_row.tag)
self.assertDictEqual(
{'Probe': test_image_path},
table_row.file_set.registry)
self.file_display.add_files(test_image_path)
self.assertEqual(1, len(self.file_display.file_table))
def test_add_directory(self):
self.file_display.add_files(directory)
self.assertEqual(1, len(self.file_display.file_table))
table_row = self.file_display.file_table[0]
self.assertEqual('/path/to/some/file', table_row.name)
self.assertEqual('Probe', table_row.tag)
self.assertDictEqual(
{'Probe': test_image_path},
table_row.file_set.registry)
def test_remove_file(self):
self.file_display.add_files(self.file_path)
self.file_display.remove_file(
[self.file_display.file_table[0]])
self.assertEqual(0, len(self.file_display.file_table))
self.file_display.add_files(self.file_path)
self.assertEqual(1, len(self.file_display.file_table))
def test_filter_files(self):
self.file_display.add_files(self.file_path)
self.file_display.filter_files('some')
self.assertEqual(1, len(self.file_display.file_table))
self.file_display.filter_files('sci-pyfibre')
self.assertEqual(0, len(self.file_display.file_table))
|
flexible
|
{
"blob_id": "7c65d0bdd4fd808b3d87706357a651601368e43b",
"index": 8596,
"step-1": "<mask token>\n\n\nclass TestFileDisplayPane(TestCase):\n\n def setUp(self):\n self.file_display = FileDisplayPane(supported_readers={'Probe':\n ProbeMultiImageReader()}, supported_parsers={'Probe':\n ProbeParser()})\n self.file_path = test_image_path\n <mask token>\n\n def test_add_directory(self):\n self.file_display.add_files(directory)\n self.assertEqual(1, len(self.file_display.file_table))\n table_row = self.file_display.file_table[0]\n self.assertEqual('/path/to/some/file', table_row.name)\n self.assertEqual('Probe', table_row.tag)\n self.assertDictEqual({'Probe': test_image_path}, table_row.file_set\n .registry)\n\n def test_remove_file(self):\n self.file_display.add_files(self.file_path)\n self.file_display.remove_file([self.file_display.file_table[0]])\n self.assertEqual(0, len(self.file_display.file_table))\n self.file_display.add_files(self.file_path)\n self.assertEqual(1, len(self.file_display.file_table))\n\n def test_filter_files(self):\n self.file_display.add_files(self.file_path)\n self.file_display.filter_files('some')\n self.assertEqual(1, len(self.file_display.file_table))\n self.file_display.filter_files('sci-pyfibre')\n self.assertEqual(0, len(self.file_display.file_table))\n",
"step-2": "<mask token>\n\n\nclass TestFileDisplayPane(TestCase):\n\n def setUp(self):\n self.file_display = FileDisplayPane(supported_readers={'Probe':\n ProbeMultiImageReader()}, supported_parsers={'Probe':\n ProbeParser()})\n self.file_path = test_image_path\n\n def test_add_file(self):\n self.file_display.add_files(self.file_path)\n self.assertEqual(1, len(self.file_display.file_table))\n table_row = self.file_display.file_table[0]\n self.assertEqual('/path/to/some/file', table_row.name)\n self.assertEqual('Probe', table_row.tag)\n self.assertDictEqual({'Probe': test_image_path}, table_row.file_set\n .registry)\n self.file_display.add_files(test_image_path)\n self.assertEqual(1, len(self.file_display.file_table))\n\n def test_add_directory(self):\n self.file_display.add_files(directory)\n self.assertEqual(1, len(self.file_display.file_table))\n table_row = self.file_display.file_table[0]\n self.assertEqual('/path/to/some/file', table_row.name)\n self.assertEqual('Probe', table_row.tag)\n self.assertDictEqual({'Probe': test_image_path}, table_row.file_set\n .registry)\n\n def test_remove_file(self):\n self.file_display.add_files(self.file_path)\n self.file_display.remove_file([self.file_display.file_table[0]])\n self.assertEqual(0, len(self.file_display.file_table))\n self.file_display.add_files(self.file_path)\n self.assertEqual(1, len(self.file_display.file_table))\n\n def test_filter_files(self):\n self.file_display.add_files(self.file_path)\n self.file_display.filter_files('some')\n self.assertEqual(1, len(self.file_display.file_table))\n self.file_display.filter_files('sci-pyfibre')\n self.assertEqual(0, len(self.file_display.file_table))\n",
"step-3": "<mask token>\nsource_dir = os.path.dirname(os.path.realpath(__file__))\npyfibre_dir = os.path.dirname(os.path.dirname(source_dir))\n\n\nclass TestFileDisplayPane(TestCase):\n\n def setUp(self):\n self.file_display = FileDisplayPane(supported_readers={'Probe':\n ProbeMultiImageReader()}, supported_parsers={'Probe':\n ProbeParser()})\n self.file_path = test_image_path\n\n def test_add_file(self):\n self.file_display.add_files(self.file_path)\n self.assertEqual(1, len(self.file_display.file_table))\n table_row = self.file_display.file_table[0]\n self.assertEqual('/path/to/some/file', table_row.name)\n self.assertEqual('Probe', table_row.tag)\n self.assertDictEqual({'Probe': test_image_path}, table_row.file_set\n .registry)\n self.file_display.add_files(test_image_path)\n self.assertEqual(1, len(self.file_display.file_table))\n\n def test_add_directory(self):\n self.file_display.add_files(directory)\n self.assertEqual(1, len(self.file_display.file_table))\n table_row = self.file_display.file_table[0]\n self.assertEqual('/path/to/some/file', table_row.name)\n self.assertEqual('Probe', table_row.tag)\n self.assertDictEqual({'Probe': test_image_path}, table_row.file_set\n .registry)\n\n def test_remove_file(self):\n self.file_display.add_files(self.file_path)\n self.file_display.remove_file([self.file_display.file_table[0]])\n self.assertEqual(0, len(self.file_display.file_table))\n self.file_display.add_files(self.file_path)\n self.assertEqual(1, len(self.file_display.file_table))\n\n def test_filter_files(self):\n self.file_display.add_files(self.file_path)\n self.file_display.filter_files('some')\n self.assertEqual(1, len(self.file_display.file_table))\n self.file_display.filter_files('sci-pyfibre')\n self.assertEqual(0, len(self.file_display.file_table))\n",
"step-4": "import os\nfrom unittest import TestCase\nfrom pyfibre.gui.file_display_pane import FileDisplayPane\nfrom pyfibre.tests.fixtures import directory, test_image_path\nfrom pyfibre.tests.probe_classes.parsers import ProbeParser\nfrom pyfibre.tests.probe_classes.readers import ProbeMultiImageReader\nsource_dir = os.path.dirname(os.path.realpath(__file__))\npyfibre_dir = os.path.dirname(os.path.dirname(source_dir))\n\n\nclass TestFileDisplayPane(TestCase):\n\n def setUp(self):\n self.file_display = FileDisplayPane(supported_readers={'Probe':\n ProbeMultiImageReader()}, supported_parsers={'Probe':\n ProbeParser()})\n self.file_path = test_image_path\n\n def test_add_file(self):\n self.file_display.add_files(self.file_path)\n self.assertEqual(1, len(self.file_display.file_table))\n table_row = self.file_display.file_table[0]\n self.assertEqual('/path/to/some/file', table_row.name)\n self.assertEqual('Probe', table_row.tag)\n self.assertDictEqual({'Probe': test_image_path}, table_row.file_set\n .registry)\n self.file_display.add_files(test_image_path)\n self.assertEqual(1, len(self.file_display.file_table))\n\n def test_add_directory(self):\n self.file_display.add_files(directory)\n self.assertEqual(1, len(self.file_display.file_table))\n table_row = self.file_display.file_table[0]\n self.assertEqual('/path/to/some/file', table_row.name)\n self.assertEqual('Probe', table_row.tag)\n self.assertDictEqual({'Probe': test_image_path}, table_row.file_set\n .registry)\n\n def test_remove_file(self):\n self.file_display.add_files(self.file_path)\n self.file_display.remove_file([self.file_display.file_table[0]])\n self.assertEqual(0, len(self.file_display.file_table))\n self.file_display.add_files(self.file_path)\n self.assertEqual(1, len(self.file_display.file_table))\n\n def test_filter_files(self):\n self.file_display.add_files(self.file_path)\n self.file_display.filter_files('some')\n self.assertEqual(1, len(self.file_display.file_table))\n self.file_display.filter_files('sci-pyfibre')\n self.assertEqual(0, len(self.file_display.file_table))\n",
"step-5": "import os\nfrom unittest import TestCase\n\nfrom pyfibre.gui.file_display_pane import FileDisplayPane\nfrom pyfibre.tests.fixtures import (\n directory,\n test_image_path)\nfrom pyfibre.tests.probe_classes.parsers import ProbeParser\nfrom pyfibre.tests.probe_classes.readers import ProbeMultiImageReader\n\nsource_dir = os.path.dirname(os.path.realpath(__file__))\npyfibre_dir = os.path.dirname(os.path.dirname(source_dir))\n\n\nclass TestFileDisplayPane(TestCase):\n\n def setUp(self):\n\n self.file_display = FileDisplayPane(\n supported_readers={'Probe': ProbeMultiImageReader()},\n supported_parsers={'Probe': ProbeParser()}\n )\n self.file_path = test_image_path\n\n def test_add_file(self):\n\n self.file_display.add_files(self.file_path)\n self.assertEqual(1, len(self.file_display.file_table))\n\n table_row = self.file_display.file_table[0]\n self.assertEqual('/path/to/some/file', table_row.name)\n self.assertEqual('Probe', table_row.tag)\n self.assertDictEqual(\n {'Probe': test_image_path},\n table_row.file_set.registry)\n\n self.file_display.add_files(test_image_path)\n self.assertEqual(1, len(self.file_display.file_table))\n\n def test_add_directory(self):\n\n self.file_display.add_files(directory)\n self.assertEqual(1, len(self.file_display.file_table))\n\n table_row = self.file_display.file_table[0]\n self.assertEqual('/path/to/some/file', table_row.name)\n self.assertEqual('Probe', table_row.tag)\n self.assertDictEqual(\n {'Probe': test_image_path},\n table_row.file_set.registry)\n\n def test_remove_file(self):\n\n self.file_display.add_files(self.file_path)\n self.file_display.remove_file(\n [self.file_display.file_table[0]])\n\n self.assertEqual(0, len(self.file_display.file_table))\n\n self.file_display.add_files(self.file_path)\n\n self.assertEqual(1, len(self.file_display.file_table))\n\n def test_filter_files(self):\n\n self.file_display.add_files(self.file_path)\n self.file_display.filter_files('some')\n\n self.assertEqual(1, len(self.file_display.file_table))\n\n self.file_display.filter_files('sci-pyfibre')\n\n self.assertEqual(0, len(self.file_display.file_table))\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import numpy as np
import matplotlib.pyplot as plt
def sample_1(N):
numeros=np.array([-10, -5, 3, 9])
return np.random.choice(numeros, N, p=[0.1, 0.4, 0.2, 0.3])#devuelve distro aleatoria con las probabilidades indicadas
def sample_2(N):
return np.random.exponential(0.5,N)#devuelve numeros aleatorios con distro exp con beta = 0.5
def get_mean(sampling_fun,N,M):
medias=np.zeros(M)#arreglo de medias
for i in range(M):#recorrido para sacar las m medias
medias[i]=np.mean(sampling_fun(N))
return medias
n=np.array([10,100,1000])#arreglo con los distintos valores de n
m=10000#valor de M
medias_1=np.zeros((m,3))#arreglo que guarta las m medias para 3 enes de sample1
medias_2=np.zeros((m,3))#lo de arriba pero con sample 2
texto='sample_'#texto que me da pereza escribir dos veces
for i in range(3):#recorrido para cada n
medias_1[:,i]=get_mean(sample_1,n[i],m)
medias_2[:,i]=get_mean(sample_2,n[i],m)
np.savetxt(texto+'1_'+str(n[i])+'.txt',medias_1[:,i])#archivo con las m medias para cada n
np.savetxt(texto+'2_'+str(n[i])+'.txt',medias_2[:,i])
|
normal
|
{
"blob_id": "d2d04686b3d7f8d01ca195750ca625baa06ed098",
"index": 2835,
"step-1": "<mask token>\n\n\ndef sample_1(N):\n numeros = np.array([-10, -5, 3, 9])\n return np.random.choice(numeros, N, p=[0.1, 0.4, 0.2, 0.3])\n\n\ndef sample_2(N):\n return np.random.exponential(0.5, N)\n\n\ndef get_mean(sampling_fun, N, M):\n medias = np.zeros(M)\n for i in range(M):\n medias[i] = np.mean(sampling_fun(N))\n return medias\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef sample_1(N):\n numeros = np.array([-10, -5, 3, 9])\n return np.random.choice(numeros, N, p=[0.1, 0.4, 0.2, 0.3])\n\n\ndef sample_2(N):\n return np.random.exponential(0.5, N)\n\n\ndef get_mean(sampling_fun, N, M):\n medias = np.zeros(M)\n for i in range(M):\n medias[i] = np.mean(sampling_fun(N))\n return medias\n\n\n<mask token>\nfor i in range(3):\n medias_1[:, i] = get_mean(sample_1, n[i], m)\n medias_2[:, i] = get_mean(sample_2, n[i], m)\n np.savetxt(texto + '1_' + str(n[i]) + '.txt', medias_1[:, i])\n np.savetxt(texto + '2_' + str(n[i]) + '.txt', medias_2[:, i])\n",
"step-3": "<mask token>\n\n\ndef sample_1(N):\n numeros = np.array([-10, -5, 3, 9])\n return np.random.choice(numeros, N, p=[0.1, 0.4, 0.2, 0.3])\n\n\ndef sample_2(N):\n return np.random.exponential(0.5, N)\n\n\ndef get_mean(sampling_fun, N, M):\n medias = np.zeros(M)\n for i in range(M):\n medias[i] = np.mean(sampling_fun(N))\n return medias\n\n\nn = np.array([10, 100, 1000])\nm = 10000\nmedias_1 = np.zeros((m, 3))\nmedias_2 = np.zeros((m, 3))\ntexto = 'sample_'\nfor i in range(3):\n medias_1[:, i] = get_mean(sample_1, n[i], m)\n medias_2[:, i] = get_mean(sample_2, n[i], m)\n np.savetxt(texto + '1_' + str(n[i]) + '.txt', medias_1[:, i])\n np.savetxt(texto + '2_' + str(n[i]) + '.txt', medias_2[:, i])\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef sample_1(N):\n numeros = np.array([-10, -5, 3, 9])\n return np.random.choice(numeros, N, p=[0.1, 0.4, 0.2, 0.3])\n\n\ndef sample_2(N):\n return np.random.exponential(0.5, N)\n\n\ndef get_mean(sampling_fun, N, M):\n medias = np.zeros(M)\n for i in range(M):\n medias[i] = np.mean(sampling_fun(N))\n return medias\n\n\nn = np.array([10, 100, 1000])\nm = 10000\nmedias_1 = np.zeros((m, 3))\nmedias_2 = np.zeros((m, 3))\ntexto = 'sample_'\nfor i in range(3):\n medias_1[:, i] = get_mean(sample_1, n[i], m)\n medias_2[:, i] = get_mean(sample_2, n[i], m)\n np.savetxt(texto + '1_' + str(n[i]) + '.txt', medias_1[:, i])\n np.savetxt(texto + '2_' + str(n[i]) + '.txt', medias_2[:, i])\n",
"step-5": "import numpy as np\nimport matplotlib.pyplot as plt\n\ndef sample_1(N):\n\tnumeros=np.array([-10, -5, 3, 9])\n\treturn np.random.choice(numeros, N, p=[0.1, 0.4, 0.2, 0.3])#devuelve distro aleatoria con las probabilidades indicadas\n\ndef sample_2(N):\n\treturn np.random.exponential(0.5,N)#devuelve numeros aleatorios con distro exp con beta = 0.5\n\ndef get_mean(sampling_fun,N,M):\n\tmedias=np.zeros(M)#arreglo de medias\n\tfor i in range(M):#recorrido para sacar las m medias\n\t\tmedias[i]=np.mean(sampling_fun(N))\n\treturn medias\n\nn=np.array([10,100,1000])#arreglo con los distintos valores de n\nm=10000#valor de M\nmedias_1=np.zeros((m,3))#arreglo que guarta las m medias para 3 enes de sample1\nmedias_2=np.zeros((m,3))#lo de arriba pero con sample 2\ntexto='sample_'#texto que me da pereza escribir dos veces\nfor i in range(3):#recorrido para cada n\n\tmedias_1[:,i]=get_mean(sample_1,n[i],m)\n\tmedias_2[:,i]=get_mean(sample_2,n[i],m)\n\tnp.savetxt(texto+'1_'+str(n[i])+'.txt',medias_1[:,i])#archivo con las m medias para cada n\n\tnp.savetxt(texto+'2_'+str(n[i])+'.txt',medias_2[:,i])\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import server.wsgi as flask
import server.grunner as gunicorn
from utils.cfgreader import EnvReader, BoolVar
def use_flask() -> bool:
env_var = BoolVar('USE_FLASK', False)
return EnvReader().safe_read(env_var)
if __name__ == '__main__':
if use_flask(): # dev mode, run the WSGI app in Flask dev server
flask.run()
else: # prod mode, run the WSGI app in Gunicorn
gunicorn.run()
|
normal
|
{
"blob_id": "ffe10ee8b2ebaad565e9aef5047440a067d4e239",
"index": 7528,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef use_flask() ->bool:\n env_var = BoolVar('USE_FLASK', False)\n return EnvReader().safe_read(env_var)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef use_flask() ->bool:\n env_var = BoolVar('USE_FLASK', False)\n return EnvReader().safe_read(env_var)\n\n\nif __name__ == '__main__':\n if use_flask():\n flask.run()\n else:\n gunicorn.run()\n",
"step-4": "import server.wsgi as flask\nimport server.grunner as gunicorn\nfrom utils.cfgreader import EnvReader, BoolVar\n\n\ndef use_flask() ->bool:\n env_var = BoolVar('USE_FLASK', False)\n return EnvReader().safe_read(env_var)\n\n\nif __name__ == '__main__':\n if use_flask():\n flask.run()\n else:\n gunicorn.run()\n",
"step-5": "import server.wsgi as flask\nimport server.grunner as gunicorn\nfrom utils.cfgreader import EnvReader, BoolVar\n\n\ndef use_flask() -> bool:\n env_var = BoolVar('USE_FLASK', False)\n return EnvReader().safe_read(env_var)\n\n\nif __name__ == '__main__':\n if use_flask(): # dev mode, run the WSGI app in Flask dev server\n flask.run()\n else: # prod mode, run the WSGI app in Gunicorn\n gunicorn.run()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Script for converting the new csv files to the desirable json format
'''
import codecs
import json
import re
def creeper():
'''
Settings for creeper file
'''
ccPrefix = False
inFilename = u'creeper.csv'
outFilename = u'Creeper.json'
mappingFile = u'creeper-mappings.json'
run(inFilename, outFilename, ccPrefix, mappingFile=mappingFile)
def mediaCreeper():
'''
Settings for mediaCreeper file
'''
ccPrefix = True
inFilename = u'mediacreeper.csv'
outFilename = u'MediaCreeper.json'
run(inFilename, outFilename, ccPrefix)
def run(inFilename, outFilename, ccPrefix,
mappingFile=None, source=u'http://b19.se/data/'):
'''
Run either file depending on settings
'''
# load mappings
mappings = {}
if mappingFile:
f = codecs.open(mappingFile, 'r', 'utf-8')
mappings = json.load(f)
f.close()
# load csv
f = codecs.open(inFilename, 'r', 'utf-8')
lines = f.read().split('\n')
f.close()
data = {}
dates = []
for l in lines:
if len(l) == 0 or l.startswith(u'#'):
continue
start, end, cc, caption, updated = l.split(';')
if ccPrefix:
caption = u'[%s] %s' % (cc, caption)
if caption in mappings.keys():
caption = mappings[caption]
if caption in data.keys():
data[caption].append([start, end])
else:
data[caption] = [[start, end], ]
dates.append(updated)
# create metadata entry
dates = sorted(list(set(dates)))
metadata = {
'source': source,
'oldest data': dates[0],
'newest data': dates[-1]}
data[u'@metadata'] = metadata
# output
f = codecs.open(outFilename, 'w', 'utf-8')
# f.write(json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False))
# compactify it without minimizing
txt = json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False)
txt = re.sub(
r'\[\n "([^"]*)", \n "([^"]*)"\n \]',
r'["\1", "\2"]',
txt)
txt = txt.replace(u', \n [', u',\n [')
f.write(txt)
f.close()
if __name__ == '__main__':
creeper()
mediaCreeper()
|
normal
|
{
"blob_id": "5a5b2d0ade5b66981218b4ecf15a2253b7d665f9",
"index": 3273,
"step-1": "<mask token>\n\n\ndef mediaCreeper():\n \"\"\"\n Settings for mediaCreeper file\n \"\"\"\n ccPrefix = True\n inFilename = u'mediacreeper.csv'\n outFilename = u'MediaCreeper.json'\n run(inFilename, outFilename, ccPrefix)\n\n\ndef run(inFilename, outFilename, ccPrefix, mappingFile=None, source=\n u'http://b19.se/data/'):\n \"\"\"\n Run either file depending on settings\n \"\"\"\n mappings = {}\n if mappingFile:\n f = codecs.open(mappingFile, 'r', 'utf-8')\n mappings = json.load(f)\n f.close()\n f = codecs.open(inFilename, 'r', 'utf-8')\n lines = f.read().split('\\n')\n f.close()\n data = {}\n dates = []\n for l in lines:\n if len(l) == 0 or l.startswith(u'#'):\n continue\n start, end, cc, caption, updated = l.split(';')\n if ccPrefix:\n caption = u'[%s] %s' % (cc, caption)\n if caption in mappings.keys():\n caption = mappings[caption]\n if caption in data.keys():\n data[caption].append([start, end])\n else:\n data[caption] = [[start, end]]\n dates.append(updated)\n dates = sorted(list(set(dates)))\n metadata = {'source': source, 'oldest data': dates[0], 'newest data':\n dates[-1]}\n data[u'@metadata'] = metadata\n f = codecs.open(outFilename, 'w', 'utf-8')\n txt = json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False)\n txt = re.sub(\n '\\\\[\\\\n \"([^\"]*)\", \\\\n \"([^\"]*)\"\\\\n \\\\]',\n '[\"\\\\1\", \"\\\\2\"]', txt)\n txt = txt.replace(u', \\n [', u',\\n [')\n f.write(txt)\n f.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef creeper():\n \"\"\"\n Settings for creeper file\n \"\"\"\n ccPrefix = False\n inFilename = u'creeper.csv'\n outFilename = u'Creeper.json'\n mappingFile = u'creeper-mappings.json'\n run(inFilename, outFilename, ccPrefix, mappingFile=mappingFile)\n\n\ndef mediaCreeper():\n \"\"\"\n Settings for mediaCreeper file\n \"\"\"\n ccPrefix = True\n inFilename = u'mediacreeper.csv'\n outFilename = u'MediaCreeper.json'\n run(inFilename, outFilename, ccPrefix)\n\n\ndef run(inFilename, outFilename, ccPrefix, mappingFile=None, source=\n u'http://b19.se/data/'):\n \"\"\"\n Run either file depending on settings\n \"\"\"\n mappings = {}\n if mappingFile:\n f = codecs.open(mappingFile, 'r', 'utf-8')\n mappings = json.load(f)\n f.close()\n f = codecs.open(inFilename, 'r', 'utf-8')\n lines = f.read().split('\\n')\n f.close()\n data = {}\n dates = []\n for l in lines:\n if len(l) == 0 or l.startswith(u'#'):\n continue\n start, end, cc, caption, updated = l.split(';')\n if ccPrefix:\n caption = u'[%s] %s' % (cc, caption)\n if caption in mappings.keys():\n caption = mappings[caption]\n if caption in data.keys():\n data[caption].append([start, end])\n else:\n data[caption] = [[start, end]]\n dates.append(updated)\n dates = sorted(list(set(dates)))\n metadata = {'source': source, 'oldest data': dates[0], 'newest data':\n dates[-1]}\n data[u'@metadata'] = metadata\n f = codecs.open(outFilename, 'w', 'utf-8')\n txt = json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False)\n txt = re.sub(\n '\\\\[\\\\n \"([^\"]*)\", \\\\n \"([^\"]*)\"\\\\n \\\\]',\n '[\"\\\\1\", \"\\\\2\"]', txt)\n txt = txt.replace(u', \\n [', u',\\n [')\n f.write(txt)\n f.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef creeper():\n \"\"\"\n Settings for creeper file\n \"\"\"\n ccPrefix = False\n inFilename = u'creeper.csv'\n outFilename = u'Creeper.json'\n mappingFile = u'creeper-mappings.json'\n run(inFilename, outFilename, ccPrefix, mappingFile=mappingFile)\n\n\ndef mediaCreeper():\n \"\"\"\n Settings for mediaCreeper file\n \"\"\"\n ccPrefix = True\n inFilename = u'mediacreeper.csv'\n outFilename = u'MediaCreeper.json'\n run(inFilename, outFilename, ccPrefix)\n\n\ndef run(inFilename, outFilename, ccPrefix, mappingFile=None, source=\n u'http://b19.se/data/'):\n \"\"\"\n Run either file depending on settings\n \"\"\"\n mappings = {}\n if mappingFile:\n f = codecs.open(mappingFile, 'r', 'utf-8')\n mappings = json.load(f)\n f.close()\n f = codecs.open(inFilename, 'r', 'utf-8')\n lines = f.read().split('\\n')\n f.close()\n data = {}\n dates = []\n for l in lines:\n if len(l) == 0 or l.startswith(u'#'):\n continue\n start, end, cc, caption, updated = l.split(';')\n if ccPrefix:\n caption = u'[%s] %s' % (cc, caption)\n if caption in mappings.keys():\n caption = mappings[caption]\n if caption in data.keys():\n data[caption].append([start, end])\n else:\n data[caption] = [[start, end]]\n dates.append(updated)\n dates = sorted(list(set(dates)))\n metadata = {'source': source, 'oldest data': dates[0], 'newest data':\n dates[-1]}\n data[u'@metadata'] = metadata\n f = codecs.open(outFilename, 'w', 'utf-8')\n txt = json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False)\n txt = re.sub(\n '\\\\[\\\\n \"([^\"]*)\", \\\\n \"([^\"]*)\"\\\\n \\\\]',\n '[\"\\\\1\", \"\\\\2\"]', txt)\n txt = txt.replace(u', \\n [', u',\\n [')\n f.write(txt)\n f.close()\n\n\nif __name__ == '__main__':\n creeper()\n mediaCreeper()\n",
"step-4": "<mask token>\nimport codecs\nimport json\nimport re\n\n\ndef creeper():\n \"\"\"\n Settings for creeper file\n \"\"\"\n ccPrefix = False\n inFilename = u'creeper.csv'\n outFilename = u'Creeper.json'\n mappingFile = u'creeper-mappings.json'\n run(inFilename, outFilename, ccPrefix, mappingFile=mappingFile)\n\n\ndef mediaCreeper():\n \"\"\"\n Settings for mediaCreeper file\n \"\"\"\n ccPrefix = True\n inFilename = u'mediacreeper.csv'\n outFilename = u'MediaCreeper.json'\n run(inFilename, outFilename, ccPrefix)\n\n\ndef run(inFilename, outFilename, ccPrefix, mappingFile=None, source=\n u'http://b19.se/data/'):\n \"\"\"\n Run either file depending on settings\n \"\"\"\n mappings = {}\n if mappingFile:\n f = codecs.open(mappingFile, 'r', 'utf-8')\n mappings = json.load(f)\n f.close()\n f = codecs.open(inFilename, 'r', 'utf-8')\n lines = f.read().split('\\n')\n f.close()\n data = {}\n dates = []\n for l in lines:\n if len(l) == 0 or l.startswith(u'#'):\n continue\n start, end, cc, caption, updated = l.split(';')\n if ccPrefix:\n caption = u'[%s] %s' % (cc, caption)\n if caption in mappings.keys():\n caption = mappings[caption]\n if caption in data.keys():\n data[caption].append([start, end])\n else:\n data[caption] = [[start, end]]\n dates.append(updated)\n dates = sorted(list(set(dates)))\n metadata = {'source': source, 'oldest data': dates[0], 'newest data':\n dates[-1]}\n data[u'@metadata'] = metadata\n f = codecs.open(outFilename, 'w', 'utf-8')\n txt = json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False)\n txt = re.sub(\n '\\\\[\\\\n \"([^\"]*)\", \\\\n \"([^\"]*)\"\\\\n \\\\]',\n '[\"\\\\1\", \"\\\\2\"]', txt)\n txt = txt.replace(u', \\n [', u',\\n [')\n f.write(txt)\n f.close()\n\n\nif __name__ == '__main__':\n creeper()\n mediaCreeper()\n",
"step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n'''\nScript for converting the new csv files to the desirable json format\n'''\nimport codecs\nimport json\nimport re\n\n\ndef creeper():\n '''\n Settings for creeper file\n '''\n ccPrefix = False\n inFilename = u'creeper.csv'\n outFilename = u'Creeper.json'\n mappingFile = u'creeper-mappings.json'\n run(inFilename, outFilename, ccPrefix, mappingFile=mappingFile)\n\n\ndef mediaCreeper():\n '''\n Settings for mediaCreeper file\n '''\n ccPrefix = True\n inFilename = u'mediacreeper.csv'\n outFilename = u'MediaCreeper.json'\n run(inFilename, outFilename, ccPrefix)\n\n\ndef run(inFilename, outFilename, ccPrefix,\n mappingFile=None, source=u'http://b19.se/data/'):\n '''\n Run either file depending on settings\n '''\n # load mappings\n mappings = {}\n if mappingFile:\n f = codecs.open(mappingFile, 'r', 'utf-8')\n mappings = json.load(f)\n f.close()\n\n # load csv\n f = codecs.open(inFilename, 'r', 'utf-8')\n lines = f.read().split('\\n')\n f.close()\n data = {}\n dates = []\n for l in lines:\n if len(l) == 0 or l.startswith(u'#'):\n continue\n start, end, cc, caption, updated = l.split(';')\n if ccPrefix:\n caption = u'[%s] %s' % (cc, caption)\n if caption in mappings.keys():\n caption = mappings[caption]\n if caption in data.keys():\n data[caption].append([start, end])\n else:\n data[caption] = [[start, end], ]\n dates.append(updated)\n\n # create metadata entry\n dates = sorted(list(set(dates)))\n metadata = {\n 'source': source,\n 'oldest data': dates[0],\n 'newest data': dates[-1]}\n data[u'@metadata'] = metadata\n\n # output\n f = codecs.open(outFilename, 'w', 'utf-8')\n # f.write(json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False))\n\n # compactify it without minimizing\n txt = json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False)\n txt = re.sub(\n r'\\[\\n \"([^\"]*)\", \\n \"([^\"]*)\"\\n \\]',\n r'[\"\\1\", \"\\2\"]',\n txt)\n txt = txt.replace(u', \\n [', u',\\n [')\n f.write(txt)\n f.close()\n\n\nif __name__ == '__main__':\n creeper()\n mediaCreeper()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class BaseModel(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@classmethod
def resolve_all(cls):
return cls.query.all()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseModel(object):
def get_id(self):
return unicode(self.id)
<|reserved_special_token_0|>
@classmethod
def resolve_all(cls):
return cls.query.all()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseModel(object):
def get_id(self):
return unicode(self.id)
@classmethod
def resolve(cls, id_):
return cls.query.filter_by(id=id_).first()
@classmethod
def resolve_all(cls):
return cls.query.all()
<|reserved_special_token_1|>
from __future__ import unicode_literals
import json
class BaseModel(object):
def get_id(self):
return unicode(self.id)
@classmethod
def resolve(cls, id_):
return cls.query.filter_by(id=id_).first()
@classmethod
def resolve_all(cls):
return cls.query.all()
|
flexible
|
{
"blob_id": "c9079f27e3c0aca09f99fa381af5f35576b4be75",
"index": 4717,
"step-1": "<mask token>\n\n\nclass BaseModel(object):\n <mask token>\n <mask token>\n\n @classmethod\n def resolve_all(cls):\n return cls.query.all()\n",
"step-2": "<mask token>\n\n\nclass BaseModel(object):\n\n def get_id(self):\n return unicode(self.id)\n <mask token>\n\n @classmethod\n def resolve_all(cls):\n return cls.query.all()\n",
"step-3": "<mask token>\n\n\nclass BaseModel(object):\n\n def get_id(self):\n return unicode(self.id)\n\n @classmethod\n def resolve(cls, id_):\n return cls.query.filter_by(id=id_).first()\n\n @classmethod\n def resolve_all(cls):\n return cls.query.all()\n",
"step-4": "from __future__ import unicode_literals\nimport json\n\n\nclass BaseModel(object):\n\n def get_id(self):\n return unicode(self.id)\n\n @classmethod\n def resolve(cls, id_):\n return cls.query.filter_by(id=id_).first()\n\n @classmethod\n def resolve_all(cls):\n return cls.query.all()\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def line(body):
url = 'https://notify-api.line.me/api/notify'
access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'
headers = {'Authorization': 'Bearer ' + access_token}
message = body
payload = {'message': message}
r = requests.post(url, headers=headers, params=payload)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def line(body):
url = 'https://notify-api.line.me/api/notify'
access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'
headers = {'Authorization': 'Bearer ' + access_token}
message = body
payload = {'message': message}
r = requests.post(url, headers=headers, params=payload)
def send_image():
url = 'https://notify-api.line.me/api/notify'
access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'
FILENAME = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'screen.png')
headers = {'Authorization': 'Bearer ' + access_token}
message = 'この画面のエラーで落ちました'
image = FILENAME
payload = {'message': message}
files = {'imageFile': open(image, 'rb')}
r = requests.post(url, headers=headers, params=payload, files=files)
<|reserved_special_token_1|>
import requests
import os
def line(body):
url = 'https://notify-api.line.me/api/notify'
access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'
headers = {'Authorization': 'Bearer ' + access_token}
message = body
payload = {'message': message}
r = requests.post(url, headers=headers, params=payload)
def send_image():
url = 'https://notify-api.line.me/api/notify'
access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'
FILENAME = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'screen.png')
headers = {'Authorization': 'Bearer ' + access_token}
message = 'この画面のエラーで落ちました'
image = FILENAME
payload = {'message': message}
files = {'imageFile': open(image, 'rb')}
r = requests.post(url, headers=headers, params=payload, files=files)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import requests
import os
def line(body):
url = "https://notify-api.line.me/api/notify"
access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'
headers = {'Authorization': 'Bearer ' + access_token}
message = body
payload = {'message': message}
r = requests.post(url, headers=headers, params=payload)
def send_image():
url = "https://notify-api.line.me/api/notify"
access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'
# File Name
FILENAME = os.path.join(os.path.dirname(os.path.abspath(__file__)), "screen.png")
headers = {'Authorization': 'Bearer ' + access_token}
message = 'この画面のエラーで落ちました'
image = FILENAME
payload = {'message': message}
files = {'imageFile': open(image, 'rb')}
r = requests.post(url, headers=headers, params=payload, files=files,)
|
flexible
|
{
"blob_id": "8b598703df67fb8287fe6cdccda5b73bf2892da8",
"index": 4878,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef line(body):\n url = 'https://notify-api.line.me/api/notify'\n access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'\n headers = {'Authorization': 'Bearer ' + access_token}\n message = body\n payload = {'message': message}\n r = requests.post(url, headers=headers, params=payload)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef line(body):\n url = 'https://notify-api.line.me/api/notify'\n access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'\n headers = {'Authorization': 'Bearer ' + access_token}\n message = body\n payload = {'message': message}\n r = requests.post(url, headers=headers, params=payload)\n\n\ndef send_image():\n url = 'https://notify-api.line.me/api/notify'\n access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'\n FILENAME = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'screen.png')\n headers = {'Authorization': 'Bearer ' + access_token}\n message = 'この画面のエラーで落ちました'\n image = FILENAME\n payload = {'message': message}\n files = {'imageFile': open(image, 'rb')}\n r = requests.post(url, headers=headers, params=payload, files=files)\n",
"step-4": "import requests\nimport os\n\n\ndef line(body):\n url = 'https://notify-api.line.me/api/notify'\n access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'\n headers = {'Authorization': 'Bearer ' + access_token}\n message = body\n payload = {'message': message}\n r = requests.post(url, headers=headers, params=payload)\n\n\ndef send_image():\n url = 'https://notify-api.line.me/api/notify'\n access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'\n FILENAME = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'screen.png')\n headers = {'Authorization': 'Bearer ' + access_token}\n message = 'この画面のエラーで落ちました'\n image = FILENAME\n payload = {'message': message}\n files = {'imageFile': open(image, 'rb')}\n r = requests.post(url, headers=headers, params=payload, files=files)\n",
"step-5": "# -*- coding: utf-8 -*-\nimport requests\nimport os\n\n\ndef line(body):\n url = \"https://notify-api.line.me/api/notify\"\n access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'\n headers = {'Authorization': 'Bearer ' + access_token}\n message = body\n payload = {'message': message}\n r = requests.post(url, headers=headers, params=payload)\n\n\ndef send_image():\n url = \"https://notify-api.line.me/api/notify\"\n access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'\n # File Name\n FILENAME = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"screen.png\")\n headers = {'Authorization': 'Bearer ' + access_token}\n message = 'この画面のエラーで落ちました'\n image = FILENAME\n payload = {'message': message}\n files = {'imageFile': open(image, 'rb')}\n r = requests.post(url, headers=headers, params=payload, files=files,)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def getmin(a, b, c):
if a <= b and a <= c:
print(a)
elif b <= a and b <= c:
print(b)
else:
print(c)
def filtername(name):
if len(name) > 3:
return name[:3]
elif len(name) < 3:
return name + " " * (3 - len(name))
return name
def filternames(names):
re = []
for n in names:
if len(n) != 3:
re += [filtername(n)]
return re
def printsort2(x):
for i in range(len(x) - 1):
for j in range(1 + i, len(x)):
if x[i] > x[j]:
x[i], x[j] = x[j], x[i]
for a in x:
print(a, end=" ")
def print_hell(inp):
if "안녕" in inp:
print("Hello")
|
normal
|
{
"blob_id": "917241482dc1f234d5fae9c107a5f21b018fe6d4",
"index": 9843,
"step-1": "<mask token>\n\n\ndef filtername(name):\n if len(name) > 3:\n return name[:3]\n elif len(name) < 3:\n return name + ' ' * (3 - len(name))\n return name\n\n\ndef filternames(names):\n re = []\n for n in names:\n if len(n) != 3:\n re += [filtername(n)]\n return re\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef filtername(name):\n if len(name) > 3:\n return name[:3]\n elif len(name) < 3:\n return name + ' ' * (3 - len(name))\n return name\n\n\ndef filternames(names):\n re = []\n for n in names:\n if len(n) != 3:\n re += [filtername(n)]\n return re\n\n\ndef printsort2(x):\n for i in range(len(x) - 1):\n for j in range(1 + i, len(x)):\n if x[i] > x[j]:\n x[i], x[j] = x[j], x[i]\n for a in x:\n print(a, end=' ')\n\n\n<mask token>\n",
"step-3": "def getmin(a, b, c):\n if a <= b and a <= c:\n print(a)\n elif b <= a and b <= c:\n print(b)\n else:\n print(c)\n\n\ndef filtername(name):\n if len(name) > 3:\n return name[:3]\n elif len(name) < 3:\n return name + ' ' * (3 - len(name))\n return name\n\n\ndef filternames(names):\n re = []\n for n in names:\n if len(n) != 3:\n re += [filtername(n)]\n return re\n\n\ndef printsort2(x):\n for i in range(len(x) - 1):\n for j in range(1 + i, len(x)):\n if x[i] > x[j]:\n x[i], x[j] = x[j], x[i]\n for a in x:\n print(a, end=' ')\n\n\n<mask token>\n",
"step-4": "def getmin(a, b, c):\n if a <= b and a <= c:\n print(a)\n elif b <= a and b <= c:\n print(b)\n else:\n print(c)\n\n\ndef filtername(name):\n if len(name) > 3:\n return name[:3]\n elif len(name) < 3:\n return name + ' ' * (3 - len(name))\n return name\n\n\ndef filternames(names):\n re = []\n for n in names:\n if len(n) != 3:\n re += [filtername(n)]\n return re\n\n\ndef printsort2(x):\n for i in range(len(x) - 1):\n for j in range(1 + i, len(x)):\n if x[i] > x[j]:\n x[i], x[j] = x[j], x[i]\n for a in x:\n print(a, end=' ')\n\n\ndef print_hell(inp):\n if '안녕' in inp:\n print('Hello')\n",
"step-5": "def getmin(a, b, c):\n if a <= b and a <= c:\n print(a)\n elif b <= a and b <= c:\n print(b)\n else:\n print(c)\n\n\ndef filtername(name):\n if len(name) > 3:\n return name[:3]\n elif len(name) < 3:\n return name + \" \" * (3 - len(name))\n return name\n\n\ndef filternames(names):\n re = []\n for n in names:\n if len(n) != 3:\n re += [filtername(n)]\n return re\n\n\ndef printsort2(x):\n for i in range(len(x) - 1):\n for j in range(1 + i, len(x)):\n if x[i] > x[j]:\n x[i], x[j] = x[j], x[i]\n for a in x:\n print(a, end=\" \")\n\n\ndef print_hell(inp):\n if \"안녕\" in inp:\n print(\"Hello\")\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from .candles import CandleCallback
from .firestore import FirestoreTradeCallback
from .gcppubsub import GCPPubSubTradeCallback
from .thresh import ThreshCallback
from .trades import (
NonSequentialIntegerTradeCallback,
SequentialIntegerTradeCallback,
TradeCallback,
)
__all__ = [
"FirestoreTradeCallback",
"GCPPubSubTradeCallback",
"CandleCallback",
"TradeCallback",
"ThreshCallback",
"SequentialIntegerTradeCallback",
"NonSequentialIntegerTradeCallback",
]
|
normal
|
{
"blob_id": "b6dc29ae5661f84273ff91a124420bc10c7b6f6e",
"index": 3704,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__ = ['FirestoreTradeCallback', 'GCPPubSubTradeCallback',\n 'CandleCallback', 'TradeCallback', 'ThreshCallback',\n 'SequentialIntegerTradeCallback', 'NonSequentialIntegerTradeCallback']\n",
"step-3": "from .candles import CandleCallback\nfrom .firestore import FirestoreTradeCallback\nfrom .gcppubsub import GCPPubSubTradeCallback\nfrom .thresh import ThreshCallback\nfrom .trades import NonSequentialIntegerTradeCallback, SequentialIntegerTradeCallback, TradeCallback\n__all__ = ['FirestoreTradeCallback', 'GCPPubSubTradeCallback',\n 'CandleCallback', 'TradeCallback', 'ThreshCallback',\n 'SequentialIntegerTradeCallback', 'NonSequentialIntegerTradeCallback']\n",
"step-4": "from .candles import CandleCallback\nfrom .firestore import FirestoreTradeCallback\nfrom .gcppubsub import GCPPubSubTradeCallback\nfrom .thresh import ThreshCallback\nfrom .trades import (\n NonSequentialIntegerTradeCallback,\n SequentialIntegerTradeCallback,\n TradeCallback,\n)\n\n__all__ = [\n \"FirestoreTradeCallback\",\n \"GCPPubSubTradeCallback\",\n \"CandleCallback\",\n \"TradeCallback\",\n \"ThreshCallback\",\n \"SequentialIntegerTradeCallback\",\n \"NonSequentialIntegerTradeCallback\",\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 2.2 on 2019-05-13 06:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base_data_app', '0008_key_keyslider'),
]
operations = [
migrations.AddField(
model_name='key',
name='image',
field=models.ImageField(null=True, upload_to='key', verbose_name='Картинка'),
),
]
|
normal
|
{
"blob_id": "ad53b100a1774f5429278379302b85f3a675adea",
"index": 8986,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('base_data_app', '0008_key_keyslider')]\n operations = [migrations.AddField(model_name='key', name='image', field\n =models.ImageField(null=True, upload_to='key', verbose_name=\n 'Картинка'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('base_data_app', '0008_key_keyslider')]\n operations = [migrations.AddField(model_name='key', name='image', field\n =models.ImageField(null=True, upload_to='key', verbose_name=\n 'Картинка'))]\n",
"step-5": "# Generated by Django 2.2 on 2019-05-13 06:57\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('base_data_app', '0008_key_keyslider'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='key',\n name='image',\n field=models.ImageField(null=True, upload_to='key', verbose_name='Картинка'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Model(pl.LightningModule):
<|reserved_special_token_0|>
def init_training_parameters(self, criterion, optimizer):
self.criterion = criterion
self.optimizer = optimizer
def set_criterion(self, criterion):
self.criterion = criterion
<|reserved_special_token_0|>
def forward(self, x):
return self.net(x)
<|reserved_special_token_0|>
def on_train_epoch_start(self) ->None:
self.batch_loss_collector = []
def training_step(self, batch, batch_idx):
images, targets = batch
outputs = self.net(images)
loss = self.criterion(outputs, targets)
self.log('train_loss', loss, prog_bar=True)
self.batch_loss_collector.append(loss.item())
return loss
<|reserved_special_token_0|>
def on_validation_epoch_start(self) ->None:
self.batch_loss_collector = []
def validation_step(self, batch, batch_idx):
images, targets = batch
outputs = self.net(images)
loss = self.criterion(outputs, targets)
self.log('val_loss', loss, prog_bar=True)
self.batch_loss_collector.append(loss.item())
return loss
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Model(pl.LightningModule):
<|reserved_special_token_0|>
def init_training_parameters(self, criterion, optimizer):
self.criterion = criterion
self.optimizer = optimizer
def set_criterion(self, criterion):
self.criterion = criterion
<|reserved_special_token_0|>
def forward(self, x):
return self.net(x)
def configure_optimizers(self):
return self.optimizer
def on_train_epoch_start(self) ->None:
self.batch_loss_collector = []
def training_step(self, batch, batch_idx):
images, targets = batch
outputs = self.net(images)
loss = self.criterion(outputs, targets)
self.log('train_loss', loss, prog_bar=True)
self.batch_loss_collector.append(loss.item())
return loss
def on_train_epoch_end(self, outputs) ->None:
self.train_losses.append(sum(self.batch_loss_collector) / len(self.
batch_loss_collector))
def on_validation_epoch_start(self) ->None:
self.batch_loss_collector = []
def validation_step(self, batch, batch_idx):
images, targets = batch
outputs = self.net(images)
loss = self.criterion(outputs, targets)
self.log('val_loss', loss, prog_bar=True)
self.batch_loss_collector.append(loss.item())
return loss
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Model(pl.LightningModule):
<|reserved_special_token_0|>
def init_training_parameters(self, criterion, optimizer):
self.criterion = criterion
self.optimizer = optimizer
def set_criterion(self, criterion):
self.criterion = criterion
def set_optimizer(self, optimizer):
self.optimizer = optimizer
def forward(self, x):
return self.net(x)
def configure_optimizers(self):
return self.optimizer
def on_train_epoch_start(self) ->None:
self.batch_loss_collector = []
def training_step(self, batch, batch_idx):
images, targets = batch
outputs = self.net(images)
loss = self.criterion(outputs, targets)
self.log('train_loss', loss, prog_bar=True)
self.batch_loss_collector.append(loss.item())
return loss
def on_train_epoch_end(self, outputs) ->None:
self.train_losses.append(sum(self.batch_loss_collector) / len(self.
batch_loss_collector))
def on_validation_epoch_start(self) ->None:
self.batch_loss_collector = []
def validation_step(self, batch, batch_idx):
images, targets = batch
outputs = self.net(images)
loss = self.criterion(outputs, targets)
self.log('val_loss', loss, prog_bar=True)
self.batch_loss_collector.append(loss.item())
return loss
def validation_epoch_end(self, outputs) ->None:
self.valid_losses.append(sum(self.batch_loss_collector) / len(self.
batch_loss_collector))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import pytorch_lightning as pl
from matplotlib import pyplot as plt
class Model(pl.LightningModule):
def __init__(self, net):
super(Model, self).__init__()
self.net = net
self.save_hyperparameters()
self.criterion = None
self.optimizer = None
self.batch_loss_collector = []
self.train_losses = []
self.valid_losses = []
def init_training_parameters(self, criterion, optimizer):
self.criterion = criterion
self.optimizer = optimizer
def set_criterion(self, criterion):
self.criterion = criterion
def set_optimizer(self, optimizer):
self.optimizer = optimizer
def forward(self, x):
return self.net(x)
def configure_optimizers(self):
return self.optimizer
def on_train_epoch_start(self) ->None:
self.batch_loss_collector = []
def training_step(self, batch, batch_idx):
images, targets = batch
outputs = self.net(images)
loss = self.criterion(outputs, targets)
self.log('train_loss', loss, prog_bar=True)
self.batch_loss_collector.append(loss.item())
return loss
def on_train_epoch_end(self, outputs) ->None:
self.train_losses.append(sum(self.batch_loss_collector) / len(self.
batch_loss_collector))
def on_validation_epoch_start(self) ->None:
self.batch_loss_collector = []
def validation_step(self, batch, batch_idx):
images, targets = batch
outputs = self.net(images)
loss = self.criterion(outputs, targets)
self.log('val_loss', loss, prog_bar=True)
self.batch_loss_collector.append(loss.item())
return loss
def validation_epoch_end(self, outputs) ->None:
self.valid_losses.append(sum(self.batch_loss_collector) / len(self.
batch_loss_collector))
def plot_losses(self):
plt.figure()
plt.plot(range(len(self.train_losses)), self.train_losses, color=
'red', label='Training error')
plt.plot(range(len(self.valid_losses)), self.valid_losses, color=
'blue', label='Validation error')
plt.xlabel('Epoch')
plt.ylabel('Losses')
plt.ylim(0)
plt.legend()
plt.show()
|
flexible
|
{
"blob_id": "324081eb4e133f6d16e716f3119e4cbc5e045ede",
"index": 8526,
"step-1": "<mask token>\n\n\nclass Model(pl.LightningModule):\n <mask token>\n\n def init_training_parameters(self, criterion, optimizer):\n self.criterion = criterion\n self.optimizer = optimizer\n\n def set_criterion(self, criterion):\n self.criterion = criterion\n <mask token>\n\n def forward(self, x):\n return self.net(x)\n <mask token>\n\n def on_train_epoch_start(self) ->None:\n self.batch_loss_collector = []\n\n def training_step(self, batch, batch_idx):\n images, targets = batch\n outputs = self.net(images)\n loss = self.criterion(outputs, targets)\n self.log('train_loss', loss, prog_bar=True)\n self.batch_loss_collector.append(loss.item())\n return loss\n <mask token>\n\n def on_validation_epoch_start(self) ->None:\n self.batch_loss_collector = []\n\n def validation_step(self, batch, batch_idx):\n images, targets = batch\n outputs = self.net(images)\n loss = self.criterion(outputs, targets)\n self.log('val_loss', loss, prog_bar=True)\n self.batch_loss_collector.append(loss.item())\n return loss\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Model(pl.LightningModule):\n <mask token>\n\n def init_training_parameters(self, criterion, optimizer):\n self.criterion = criterion\n self.optimizer = optimizer\n\n def set_criterion(self, criterion):\n self.criterion = criterion\n <mask token>\n\n def forward(self, x):\n return self.net(x)\n\n def configure_optimizers(self):\n return self.optimizer\n\n def on_train_epoch_start(self) ->None:\n self.batch_loss_collector = []\n\n def training_step(self, batch, batch_idx):\n images, targets = batch\n outputs = self.net(images)\n loss = self.criterion(outputs, targets)\n self.log('train_loss', loss, prog_bar=True)\n self.batch_loss_collector.append(loss.item())\n return loss\n\n def on_train_epoch_end(self, outputs) ->None:\n self.train_losses.append(sum(self.batch_loss_collector) / len(self.\n batch_loss_collector))\n\n def on_validation_epoch_start(self) ->None:\n self.batch_loss_collector = []\n\n def validation_step(self, batch, batch_idx):\n images, targets = batch\n outputs = self.net(images)\n loss = self.criterion(outputs, targets)\n self.log('val_loss', loss, prog_bar=True)\n self.batch_loss_collector.append(loss.item())\n return loss\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Model(pl.LightningModule):\n <mask token>\n\n def init_training_parameters(self, criterion, optimizer):\n self.criterion = criterion\n self.optimizer = optimizer\n\n def set_criterion(self, criterion):\n self.criterion = criterion\n\n def set_optimizer(self, optimizer):\n self.optimizer = optimizer\n\n def forward(self, x):\n return self.net(x)\n\n def configure_optimizers(self):\n return self.optimizer\n\n def on_train_epoch_start(self) ->None:\n self.batch_loss_collector = []\n\n def training_step(self, batch, batch_idx):\n images, targets = batch\n outputs = self.net(images)\n loss = self.criterion(outputs, targets)\n self.log('train_loss', loss, prog_bar=True)\n self.batch_loss_collector.append(loss.item())\n return loss\n\n def on_train_epoch_end(self, outputs) ->None:\n self.train_losses.append(sum(self.batch_loss_collector) / len(self.\n batch_loss_collector))\n\n def on_validation_epoch_start(self) ->None:\n self.batch_loss_collector = []\n\n def validation_step(self, batch, batch_idx):\n images, targets = batch\n outputs = self.net(images)\n loss = self.criterion(outputs, targets)\n self.log('val_loss', loss, prog_bar=True)\n self.batch_loss_collector.append(loss.item())\n return loss\n\n def validation_epoch_end(self, outputs) ->None:\n self.valid_losses.append(sum(self.batch_loss_collector) / len(self.\n batch_loss_collector))\n <mask token>\n",
"step-4": "import pytorch_lightning as pl\nfrom matplotlib import pyplot as plt\n\n\nclass Model(pl.LightningModule):\n\n def __init__(self, net):\n super(Model, self).__init__()\n self.net = net\n self.save_hyperparameters()\n self.criterion = None\n self.optimizer = None\n self.batch_loss_collector = []\n self.train_losses = []\n self.valid_losses = []\n\n def init_training_parameters(self, criterion, optimizer):\n self.criterion = criterion\n self.optimizer = optimizer\n\n def set_criterion(self, criterion):\n self.criterion = criterion\n\n def set_optimizer(self, optimizer):\n self.optimizer = optimizer\n\n def forward(self, x):\n return self.net(x)\n\n def configure_optimizers(self):\n return self.optimizer\n\n def on_train_epoch_start(self) ->None:\n self.batch_loss_collector = []\n\n def training_step(self, batch, batch_idx):\n images, targets = batch\n outputs = self.net(images)\n loss = self.criterion(outputs, targets)\n self.log('train_loss', loss, prog_bar=True)\n self.batch_loss_collector.append(loss.item())\n return loss\n\n def on_train_epoch_end(self, outputs) ->None:\n self.train_losses.append(sum(self.batch_loss_collector) / len(self.\n batch_loss_collector))\n\n def on_validation_epoch_start(self) ->None:\n self.batch_loss_collector = []\n\n def validation_step(self, batch, batch_idx):\n images, targets = batch\n outputs = self.net(images)\n loss = self.criterion(outputs, targets)\n self.log('val_loss', loss, prog_bar=True)\n self.batch_loss_collector.append(loss.item())\n return loss\n\n def validation_epoch_end(self, outputs) ->None:\n self.valid_losses.append(sum(self.batch_loss_collector) / len(self.\n batch_loss_collector))\n\n def plot_losses(self):\n plt.figure()\n plt.plot(range(len(self.train_losses)), self.train_losses, color=\n 'red', label='Training error')\n plt.plot(range(len(self.valid_losses)), self.valid_losses, color=\n 'blue', label='Validation error')\n plt.xlabel('Epoch')\n plt.ylabel('Losses')\n plt.ylim(0)\n plt.legend()\n plt.show()\n",
"step-5": null,
"step-ids": [
8,
10,
12,
15
]
}
|
[
8,
10,
12,
15
] |
import pandas as pd
from pandas import DataFrame
myencoding = 'utf-8'
chikenList = ['pelicana', 'nene', 'cheogajip', 'goobne']
# chikenList = ['pelicana']
newframe = DataFrame()
for onestore in chikenList:
filename = onestore + '.csv'
myframe = pd.read_csv(filename, index_col=0, encoding=myencoding)
# print(myframe.head())
# print('-'*30)
newframe = pd.concat([newframe, myframe], axis=0, ignore_index=True)
print(newframe.info())
totalfile = 'allstore.csv'
newframe.to_csv(totalfile, encoding=myencoding)
print(totalfile + '파일이 저장됨')
|
normal
|
{
"blob_id": "11a31d3276201105ca7485fa4e4eb711012accd5",
"index": 2190,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor onestore in chikenList:\n filename = onestore + '.csv'\n myframe = pd.read_csv(filename, index_col=0, encoding=myencoding)\n newframe = pd.concat([newframe, myframe], axis=0, ignore_index=True)\nprint(newframe.info())\n<mask token>\nnewframe.to_csv(totalfile, encoding=myencoding)\nprint(totalfile + '파일이 저장됨')\n",
"step-3": "<mask token>\nmyencoding = 'utf-8'\nchikenList = ['pelicana', 'nene', 'cheogajip', 'goobne']\nnewframe = DataFrame()\nfor onestore in chikenList:\n filename = onestore + '.csv'\n myframe = pd.read_csv(filename, index_col=0, encoding=myencoding)\n newframe = pd.concat([newframe, myframe], axis=0, ignore_index=True)\nprint(newframe.info())\ntotalfile = 'allstore.csv'\nnewframe.to_csv(totalfile, encoding=myencoding)\nprint(totalfile + '파일이 저장됨')\n",
"step-4": "import pandas as pd\nfrom pandas import DataFrame\nmyencoding = 'utf-8'\nchikenList = ['pelicana', 'nene', 'cheogajip', 'goobne']\nnewframe = DataFrame()\nfor onestore in chikenList:\n filename = onestore + '.csv'\n myframe = pd.read_csv(filename, index_col=0, encoding=myencoding)\n newframe = pd.concat([newframe, myframe], axis=0, ignore_index=True)\nprint(newframe.info())\ntotalfile = 'allstore.csv'\nnewframe.to_csv(totalfile, encoding=myencoding)\nprint(totalfile + '파일이 저장됨')\n",
"step-5": "import pandas as pd\nfrom pandas import DataFrame\n\nmyencoding = 'utf-8'\nchikenList = ['pelicana', 'nene', 'cheogajip', 'goobne']\n# chikenList = ['pelicana']\n\nnewframe = DataFrame()\n\nfor onestore in chikenList:\n filename = onestore + '.csv'\n myframe = pd.read_csv(filename, index_col=0, encoding=myencoding)\n # print(myframe.head())\n # print('-'*30)\n newframe = pd.concat([newframe, myframe], axis=0, ignore_index=True)\n\nprint(newframe.info())\n\ntotalfile = 'allstore.csv'\nnewframe.to_csv(totalfile, encoding=myencoding)\nprint(totalfile + '파일이 저장됨')",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def gameOver(board, symbol):
if board[0] == board[3] == board[6] == symbol or board[1] == board[7
] == board[4] == symbol or board[2] == board[5] == board[8
] == symbol or board[0] == board[1] == board[2] == symbol or board[5
] == board[3] == board[4] == symbol or board[6] == board[7] == board[8
] == symbol or board[2] == board[4] == board[6] == symbol or board[0
] == board[4] == board[8] == symbol:
return True
def mark(pos, symbol):
board[pos] = symbol
unmarked.remove(pos)
def displayBoard():
for i in range(len(board)):
if i == 1 or i == 4 or i == 7:
print(f'|{board[i]}|', end=' ')
elif i == 2 or i == 5:
print(f'{board[i]}\n--------')
else:
print(f'{board[i]}', end=' ')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
player_sym = input("Choose 'X' or 'O' : ")
if player_sym != 'X' and player_sym != 'O':
raise Exception('Symbol not found')
except Exception as e:
print(e.args)
else:
choices.remove(player_sym)
comp_sym = choices[0]
player_dict = {player_sym: 'Player', comp_sym: 'Computer'}
<|reserved_special_token_0|>
def gameOver(board, symbol):
if board[0] == board[3] == board[6] == symbol or board[1] == board[7
] == board[4] == symbol or board[2] == board[5] == board[8
] == symbol or board[0] == board[1] == board[2] == symbol or board[5
] == board[3] == board[4] == symbol or board[6] == board[7] == board[8
] == symbol or board[2] == board[4] == board[6] == symbol or board[0
] == board[4] == board[8] == symbol:
return True
def mark(pos, symbol):
board[pos] = symbol
unmarked.remove(pos)
def displayBoard():
for i in range(len(board)):
if i == 1 or i == 4 or i == 7:
print(f'|{board[i]}|', end=' ')
elif i == 2 or i == 5:
print(f'{board[i]}\n--------')
else:
print(f'{board[i]}', end=' ')
if __name__ == '__main__':
while not gameEnd:
try:
player_pos = int(input('\n\nWhere would you mark? '))
if player_pos < 0 or player_pos > 8 or player_pos not in unmarked:
raise Exception('Position out of Board')
break
except Exception as e:
print(e.args)
else:
mark(player_pos, player_sym)
if gameOver(board, player_sym):
displayBoard()
print('\n\nPlayer Won!!!')
break
comp_pos = unmarked[random.randint(0, len(unmarked) - 1)]
mark(comp_pos, comp_sym)
if gameOver(board, comp_sym):
displayBoard()
print('\n\nComputer WON!!!')
break
displayBoard()
print('GAME OVER')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
choices = ['X', 'O']
try:
player_sym = input("Choose 'X' or 'O' : ")
if player_sym != 'X' and player_sym != 'O':
raise Exception('Symbol not found')
except Exception as e:
print(e.args)
else:
choices.remove(player_sym)
comp_sym = choices[0]
player_dict = {player_sym: 'Player', comp_sym: 'Computer'}
board = [' '] * 9
gameEnd = False
unmarked = [i for i in range(9)]
def gameOver(board, symbol):
if board[0] == board[3] == board[6] == symbol or board[1] == board[7
] == board[4] == symbol or board[2] == board[5] == board[8
] == symbol or board[0] == board[1] == board[2] == symbol or board[5
] == board[3] == board[4] == symbol or board[6] == board[7] == board[8
] == symbol or board[2] == board[4] == board[6] == symbol or board[0
] == board[4] == board[8] == symbol:
return True
def mark(pos, symbol):
board[pos] = symbol
unmarked.remove(pos)
def displayBoard():
for i in range(len(board)):
if i == 1 or i == 4 or i == 7:
print(f'|{board[i]}|', end=' ')
elif i == 2 or i == 5:
print(f'{board[i]}\n--------')
else:
print(f'{board[i]}', end=' ')
if __name__ == '__main__':
while not gameEnd:
try:
player_pos = int(input('\n\nWhere would you mark? '))
if player_pos < 0 or player_pos > 8 or player_pos not in unmarked:
raise Exception('Position out of Board')
break
except Exception as e:
print(e.args)
else:
mark(player_pos, player_sym)
if gameOver(board, player_sym):
displayBoard()
print('\n\nPlayer Won!!!')
break
comp_pos = unmarked[random.randint(0, len(unmarked) - 1)]
mark(comp_pos, comp_sym)
if gameOver(board, comp_sym):
displayBoard()
print('\n\nComputer WON!!!')
break
displayBoard()
print('GAME OVER')
<|reserved_special_token_1|>
import random
choices = ['X', 'O']
try:
player_sym = input("Choose 'X' or 'O' : ")
if player_sym != 'X' and player_sym != 'O':
raise Exception('Symbol not found')
except Exception as e:
print(e.args)
else:
choices.remove(player_sym)
comp_sym = choices[0]
player_dict = {player_sym: 'Player', comp_sym: 'Computer'}
board = [' '] * 9
gameEnd = False
unmarked = [i for i in range(9)]
def gameOver(board, symbol):
if board[0] == board[3] == board[6] == symbol or board[1] == board[7
] == board[4] == symbol or board[2] == board[5] == board[8
] == symbol or board[0] == board[1] == board[2] == symbol or board[5
] == board[3] == board[4] == symbol or board[6] == board[7] == board[8
] == symbol or board[2] == board[4] == board[6] == symbol or board[0
] == board[4] == board[8] == symbol:
return True
def mark(pos, symbol):
board[pos] = symbol
unmarked.remove(pos)
def displayBoard():
for i in range(len(board)):
if i == 1 or i == 4 or i == 7:
print(f'|{board[i]}|', end=' ')
elif i == 2 or i == 5:
print(f'{board[i]}\n--------')
else:
print(f'{board[i]}', end=' ')
if __name__ == '__main__':
while not gameEnd:
try:
player_pos = int(input('\n\nWhere would you mark? '))
if player_pos < 0 or player_pos > 8 or player_pos not in unmarked:
raise Exception('Position out of Board')
break
except Exception as e:
print(e.args)
else:
mark(player_pos, player_sym)
if gameOver(board, player_sym):
displayBoard()
print('\n\nPlayer Won!!!')
break
comp_pos = unmarked[random.randint(0, len(unmarked) - 1)]
mark(comp_pos, comp_sym)
if gameOver(board, comp_sym):
displayBoard()
print('\n\nComputer WON!!!')
break
displayBoard()
print('GAME OVER')
<|reserved_special_token_1|>
import random
choices = ['X', 'O']
try:
# Choice of X-O given to the player
player_sym = input("Choose 'X' or 'O' : ")
# raising an exception if the variable is not X or O
if player_sym!='X' and player_sym!='O':
raise Exception("Symbol not found")
except Exception as e:
print(e.args)
else:
# Allotting the other one as the computer symbol
choices.remove(player_sym)
comp_sym = choices[0]
player_dict = {player_sym:'Player', comp_sym:'Computer'}
# creating the board
board = [' ']*9
gameEnd = False # to track when the game ends
unmarked = [i for i in range(9)] # to track all the blank boxes left
# gameOver function check if the game already has a winner
def gameOver(board, symbol):
# below is the sequence of all the possible winning combinations
if board[0]==board[3]==board[6]==symbol or board[1]==board[7]==board[4]==symbol or board[2]==board[5]==board[8]==symbol or board[0]==board[1]==board[2]==symbol or board[5]==board[3]==board[4]==symbol or board[6]==board[7]==board[8]==symbol or board[2]==board[4]==board[6]==symbol or board[0]==board[4]==board[8]==symbol:
# if there is a pattern match the game is over hence return True
return True
# function for marking the box with the symbol
def mark(pos, symbol):
board[pos] = symbol
unmarked.remove(pos)
# Used it for debugging : print(f"Unmarked : {unmarked}")
# function to display the board at a particular time
def displayBoard():
for i in range(len(board)):
# formatting the output for the middle elements
if i==1 or i==4 or i==7:
print(f'|{board[i]}|', end=' ')
elif i==2 or i==5:
print(f'{board[i]}\n--------') # marks the end of a line and hence bifurcates two lines
else:
print(f'{board[i]}', end=' ')
if __name__== "__main__":
# this is where the game starts
while not gameEnd: # loop until game ends
try:
player_pos = int(input("\n\nWhere would you mark? "))
# check if position index is on the board and is available for marking else raise Exception
if player_pos<0 or player_pos>8 or (player_pos not in unmarked):
raise Exception("Position out of Board")
break
except Exception as e:
print(e.args)
else:
mark(player_pos, player_sym)
# check if the game has already ended and if yes, declare the player as winner
if gameOver(board, player_sym):
displayBoard()
print("\n\nPlayer Won!!!")
break
# computer will mark on some random square that is not marked yet
comp_pos = unmarked[random.randint(0, len(unmarked)-1)]
mark(comp_pos, comp_sym)
# check if the game has already ended and if yes, declare the computer as winner
if gameOver(board, comp_sym):
displayBoard()
print("\n\nComputer WON!!!")
break
# display the board after each iteration
displayBoard()
# marks the end of the game
print("GAME OVER")
|
flexible
|
{
"blob_id": "d2f6d7c779d3d6e61d9da7af01a2931fdabec828",
"index": 371,
"step-1": "<mask token>\n\n\ndef gameOver(board, symbol):\n if board[0] == board[3] == board[6] == symbol or board[1] == board[7\n ] == board[4] == symbol or board[2] == board[5] == board[8\n ] == symbol or board[0] == board[1] == board[2] == symbol or board[5\n ] == board[3] == board[4] == symbol or board[6] == board[7] == board[8\n ] == symbol or board[2] == board[4] == board[6] == symbol or board[0\n ] == board[4] == board[8] == symbol:\n return True\n\n\ndef mark(pos, symbol):\n board[pos] = symbol\n unmarked.remove(pos)\n\n\ndef displayBoard():\n for i in range(len(board)):\n if i == 1 or i == 4 or i == 7:\n print(f'|{board[i]}|', end=' ')\n elif i == 2 or i == 5:\n print(f'{board[i]}\\n--------')\n else:\n print(f'{board[i]}', end=' ')\n\n\n<mask token>\n",
"step-2": "<mask token>\ntry:\n player_sym = input(\"Choose 'X' or 'O' : \")\n if player_sym != 'X' and player_sym != 'O':\n raise Exception('Symbol not found')\nexcept Exception as e:\n print(e.args)\nelse:\n choices.remove(player_sym)\n comp_sym = choices[0]\n player_dict = {player_sym: 'Player', comp_sym: 'Computer'}\n<mask token>\n\n\ndef gameOver(board, symbol):\n if board[0] == board[3] == board[6] == symbol or board[1] == board[7\n ] == board[4] == symbol or board[2] == board[5] == board[8\n ] == symbol or board[0] == board[1] == board[2] == symbol or board[5\n ] == board[3] == board[4] == symbol or board[6] == board[7] == board[8\n ] == symbol or board[2] == board[4] == board[6] == symbol or board[0\n ] == board[4] == board[8] == symbol:\n return True\n\n\ndef mark(pos, symbol):\n board[pos] = symbol\n unmarked.remove(pos)\n\n\ndef displayBoard():\n for i in range(len(board)):\n if i == 1 or i == 4 or i == 7:\n print(f'|{board[i]}|', end=' ')\n elif i == 2 or i == 5:\n print(f'{board[i]}\\n--------')\n else:\n print(f'{board[i]}', end=' ')\n\n\nif __name__ == '__main__':\n while not gameEnd:\n try:\n player_pos = int(input('\\n\\nWhere would you mark? '))\n if player_pos < 0 or player_pos > 8 or player_pos not in unmarked:\n raise Exception('Position out of Board')\n break\n except Exception as e:\n print(e.args)\n else:\n mark(player_pos, player_sym)\n if gameOver(board, player_sym):\n displayBoard()\n print('\\n\\nPlayer Won!!!')\n break\n comp_pos = unmarked[random.randint(0, len(unmarked) - 1)]\n mark(comp_pos, comp_sym)\n if gameOver(board, comp_sym):\n displayBoard()\n print('\\n\\nComputer WON!!!')\n break\n displayBoard()\n print('GAME OVER')\n",
"step-3": "<mask token>\nchoices = ['X', 'O']\ntry:\n player_sym = input(\"Choose 'X' or 'O' : \")\n if player_sym != 'X' and player_sym != 'O':\n raise Exception('Symbol not found')\nexcept Exception as e:\n print(e.args)\nelse:\n choices.remove(player_sym)\n comp_sym = choices[0]\n player_dict = {player_sym: 'Player', comp_sym: 'Computer'}\nboard = [' '] * 9\ngameEnd = False\nunmarked = [i for i in range(9)]\n\n\ndef gameOver(board, symbol):\n if board[0] == board[3] == board[6] == symbol or board[1] == board[7\n ] == board[4] == symbol or board[2] == board[5] == board[8\n ] == symbol or board[0] == board[1] == board[2] == symbol or board[5\n ] == board[3] == board[4] == symbol or board[6] == board[7] == board[8\n ] == symbol or board[2] == board[4] == board[6] == symbol or board[0\n ] == board[4] == board[8] == symbol:\n return True\n\n\ndef mark(pos, symbol):\n board[pos] = symbol\n unmarked.remove(pos)\n\n\ndef displayBoard():\n for i in range(len(board)):\n if i == 1 or i == 4 or i == 7:\n print(f'|{board[i]}|', end=' ')\n elif i == 2 or i == 5:\n print(f'{board[i]}\\n--------')\n else:\n print(f'{board[i]}', end=' ')\n\n\nif __name__ == '__main__':\n while not gameEnd:\n try:\n player_pos = int(input('\\n\\nWhere would you mark? '))\n if player_pos < 0 or player_pos > 8 or player_pos not in unmarked:\n raise Exception('Position out of Board')\n break\n except Exception as e:\n print(e.args)\n else:\n mark(player_pos, player_sym)\n if gameOver(board, player_sym):\n displayBoard()\n print('\\n\\nPlayer Won!!!')\n break\n comp_pos = unmarked[random.randint(0, len(unmarked) - 1)]\n mark(comp_pos, comp_sym)\n if gameOver(board, comp_sym):\n displayBoard()\n print('\\n\\nComputer WON!!!')\n break\n displayBoard()\n print('GAME OVER')\n",
"step-4": "import random\nchoices = ['X', 'O']\ntry:\n player_sym = input(\"Choose 'X' or 'O' : \")\n if player_sym != 'X' and player_sym != 'O':\n raise Exception('Symbol not found')\nexcept Exception as e:\n print(e.args)\nelse:\n choices.remove(player_sym)\n comp_sym = choices[0]\n player_dict = {player_sym: 'Player', comp_sym: 'Computer'}\nboard = [' '] * 9\ngameEnd = False\nunmarked = [i for i in range(9)]\n\n\ndef gameOver(board, symbol):\n if board[0] == board[3] == board[6] == symbol or board[1] == board[7\n ] == board[4] == symbol or board[2] == board[5] == board[8\n ] == symbol or board[0] == board[1] == board[2] == symbol or board[5\n ] == board[3] == board[4] == symbol or board[6] == board[7] == board[8\n ] == symbol or board[2] == board[4] == board[6] == symbol or board[0\n ] == board[4] == board[8] == symbol:\n return True\n\n\ndef mark(pos, symbol):\n board[pos] = symbol\n unmarked.remove(pos)\n\n\ndef displayBoard():\n for i in range(len(board)):\n if i == 1 or i == 4 or i == 7:\n print(f'|{board[i]}|', end=' ')\n elif i == 2 or i == 5:\n print(f'{board[i]}\\n--------')\n else:\n print(f'{board[i]}', end=' ')\n\n\nif __name__ == '__main__':\n while not gameEnd:\n try:\n player_pos = int(input('\\n\\nWhere would you mark? '))\n if player_pos < 0 or player_pos > 8 or player_pos not in unmarked:\n raise Exception('Position out of Board')\n break\n except Exception as e:\n print(e.args)\n else:\n mark(player_pos, player_sym)\n if gameOver(board, player_sym):\n displayBoard()\n print('\\n\\nPlayer Won!!!')\n break\n comp_pos = unmarked[random.randint(0, len(unmarked) - 1)]\n mark(comp_pos, comp_sym)\n if gameOver(board, comp_sym):\n displayBoard()\n print('\\n\\nComputer WON!!!')\n break\n displayBoard()\n print('GAME OVER')\n",
"step-5": "import random\n\nchoices = ['X', 'O']\ntry:\n# Choice of X-O given to the player\n player_sym = input(\"Choose 'X' or 'O' : \")\n# raising an exception if the variable is not X or O\n if player_sym!='X' and player_sym!='O':\n raise Exception(\"Symbol not found\")\nexcept Exception as e:\n print(e.args)\nelse:\n# Allotting the other one as the computer symbol\n choices.remove(player_sym)\n comp_sym = choices[0]\n player_dict = {player_sym:'Player', comp_sym:'Computer'}\n \n# creating the board\nboard = [' ']*9\ngameEnd = False # to track when the game ends\nunmarked = [i for i in range(9)] # to track all the blank boxes left\n\n\n\n# gameOver function check if the game already has a winner\ndef gameOver(board, symbol):\n# below is the sequence of all the possible winning combinations \n if board[0]==board[3]==board[6]==symbol or board[1]==board[7]==board[4]==symbol or board[2]==board[5]==board[8]==symbol or board[0]==board[1]==board[2]==symbol or board[5]==board[3]==board[4]==symbol or board[6]==board[7]==board[8]==symbol or board[2]==board[4]==board[6]==symbol or board[0]==board[4]==board[8]==symbol:\n# if there is a pattern match the game is over hence return True\n return True\n\n\n\n# function for marking the box with the symbol\n\ndef mark(pos, symbol):\n board[pos] = symbol\n unmarked.remove(pos)\n# Used it for debugging : print(f\"Unmarked : {unmarked}\")\n\n\n\n# function to display the board at a particular time\ndef displayBoard():\n for i in range(len(board)):\n# formatting the output for the middle elements\n if i==1 or i==4 or i==7:\n print(f'|{board[i]}|', end=' ')\n elif i==2 or i==5:\n print(f'{board[i]}\\n--------') # marks the end of a line and hence bifurcates two lines\n else:\n print(f'{board[i]}', end=' ')\n\n\nif __name__== \"__main__\":\n # this is where the game starts \n while not gameEnd: # loop until game ends\n try:\n player_pos = int(input(\"\\n\\nWhere would you mark? \"))\n # check if position index is on the board and is available for marking else raise Exception\n if player_pos<0 or player_pos>8 or (player_pos not in unmarked): \n raise Exception(\"Position out of Board\")\n break\n except Exception as e:\n print(e.args)\n else:\n mark(player_pos, player_sym)\n \n # check if the game has already ended and if yes, declare the player as winner\n if gameOver(board, player_sym): \n displayBoard()\n print(\"\\n\\nPlayer Won!!!\")\n break\n \n # computer will mark on some random square that is not marked yet\n comp_pos = unmarked[random.randint(0, len(unmarked)-1)]\n mark(comp_pos, comp_sym)\n \n # check if the game has already ended and if yes, declare the computer as winner\n if gameOver(board, comp_sym): \n displayBoard()\n print(\"\\n\\nComputer WON!!!\")\n break\n \n # display the board after each iteration\n displayBoard()\n \n # marks the end of the game\n print(\"GAME OVER\")",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
api.main()
<|reserved_special_token_1|>
from xrouter import api
api.main()
<|reserved_special_token_1|>
#!/usr/bin/env python
from xrouter import api
api.main()
|
flexible
|
{
"blob_id": "64368679aa2e387e25a36b2f3d0312a99b819e95",
"index": 2147,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napi.main()\n",
"step-3": "from xrouter import api\napi.main()\n",
"step-4": "#!/usr/bin/env python\nfrom xrouter import api\napi.main()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
while True:
a, b = input().split()
a = float(a)
b = float(b)
if b == 0:
print('error')
else:
c = a / b + 0.5
c = int(c)
print(c)
<|reserved_special_token_1|>
# coding=utf-8
while True:
a,b=input().split()
a=float(a)
b=float(b)
if b==0:
print("error")
else:
c=a/b+0.5
c=int(c)
print(c)
|
flexible
|
{
"blob_id": "dab5e7ee1d14cba485cbaece1354ec8d686ca4ab",
"index": 9080,
"step-1": "<mask token>\n",
"step-2": "while True:\n a, b = input().split()\n a = float(a)\n b = float(b)\n if b == 0:\n print('error')\n else:\n c = a / b + 0.5\n c = int(c)\n print(c)\n",
"step-3": "# coding=utf-8\nwhile True:\n a,b=input().split()\n a=float(a)\n b=float(b)\n if b==0:\n print(\"error\")\n else:\n c=a/b+0.5\n c=int(c)\n print(c)",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
@jd.get('/routerjson')
def apply_jd_waybill(db):
query = bottle.request.query
if query['method'] == 'jingdong.etms.waybillcode.get':
jd_code, resp = jd_get_response_normal()
logging.debug('JD response: {} {}'.format(jd_code, resp))
db.add(JdWaybillApplyResp(jd_code, resp))
else:
jd_param = json.loads(query['360buy_param_json'])
delivery_id = jd_param['deliveryId']
order_id = jd_param['orderId']
resp = jd_send_response_normal(delivery_id, order_id)
db.add(JdWaybillSendResp(delivery_id, order_id, resp))
logging.debug('JD response: {}'.format(resp))
return resp
@jd.get('/jd_waybill')
def jd_waybill(db):
query = bottle.request.query
jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get
('wms_order_code')).first()
if jd_rsp:
return jd_rsp.body
return HTTPError(404, None)
def jd_get_response_normal():
code = str(uuid.uuid4()).split('-')[-1]
return code, json.dumps({'jingdong_etms_waybillcode_get_responce': {
'resultInfo': {'message': u'成功', 'code': 100, 'deliveryIdList': [
code]}, 'code': u'0'}})
def jd_send_response_normal(deliver_id, order_id):
return json.dumps({'jingdong_etms_waybill_send_responce': {'resultInfo':
{'message': u'成功', 'deliveryId': deliver_id, 'code': 100, 'orderId':
order_id}}})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
jd.install(plugin)
@jd.get('/routerjson')
def apply_jd_waybill(db):
query = bottle.request.query
if query['method'] == 'jingdong.etms.waybillcode.get':
jd_code, resp = jd_get_response_normal()
logging.debug('JD response: {} {}'.format(jd_code, resp))
db.add(JdWaybillApplyResp(jd_code, resp))
else:
jd_param = json.loads(query['360buy_param_json'])
delivery_id = jd_param['deliveryId']
order_id = jd_param['orderId']
resp = jd_send_response_normal(delivery_id, order_id)
db.add(JdWaybillSendResp(delivery_id, order_id, resp))
logging.debug('JD response: {}'.format(resp))
return resp
@jd.get('/jd_waybill')
def jd_waybill(db):
query = bottle.request.query
jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get
('wms_order_code')).first()
if jd_rsp:
return jd_rsp.body
return HTTPError(404, None)
def jd_get_response_normal():
code = str(uuid.uuid4()).split('-')[-1]
return code, json.dumps({'jingdong_etms_waybillcode_get_responce': {
'resultInfo': {'message': u'成功', 'code': 100, 'deliveryIdList': [
code]}, 'code': u'0'}})
def jd_send_response_normal(deliver_id, order_id):
return json.dumps({'jingdong_etms_waybill_send_responce': {'resultInfo':
{'message': u'成功', 'deliveryId': deliver_id, 'code': 100, 'orderId':
order_id}}})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
jd = bottle.Bottle(catchall=False)
plugin = sqlalchemy.Plugin(engine, Base.metadata, keyword='db', create=True,
commit=True, use_kwargs=False)
jd.install(plugin)
@jd.get('/routerjson')
def apply_jd_waybill(db):
query = bottle.request.query
if query['method'] == 'jingdong.etms.waybillcode.get':
jd_code, resp = jd_get_response_normal()
logging.debug('JD response: {} {}'.format(jd_code, resp))
db.add(JdWaybillApplyResp(jd_code, resp))
else:
jd_param = json.loads(query['360buy_param_json'])
delivery_id = jd_param['deliveryId']
order_id = jd_param['orderId']
resp = jd_send_response_normal(delivery_id, order_id)
db.add(JdWaybillSendResp(delivery_id, order_id, resp))
logging.debug('JD response: {}'.format(resp))
return resp
@jd.get('/jd_waybill')
def jd_waybill(db):
query = bottle.request.query
jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get
('wms_order_code')).first()
if jd_rsp:
return jd_rsp.body
return HTTPError(404, None)
def jd_get_response_normal():
code = str(uuid.uuid4()).split('-')[-1]
return code, json.dumps({'jingdong_etms_waybillcode_get_responce': {
'resultInfo': {'message': u'成功', 'code': 100, 'deliveryIdList': [
code]}, 'code': u'0'}})
def jd_send_response_normal(deliver_id, order_id):
return json.dumps({'jingdong_etms_waybill_send_responce': {'resultInfo':
{'message': u'成功', 'deliveryId': deliver_id, 'code': 100, 'orderId':
order_id}}})
<|reserved_special_token_1|>
import logging
import uuid
import json
import xmltodict
import bottle
from bottle import HTTPError
from bottle.ext import sqlalchemy
from database import Base, engine
from database import JdWaybillSendResp, JdWaybillApplyResp
jd = bottle.Bottle(catchall=False)
plugin = sqlalchemy.Plugin(engine, Base.metadata, keyword='db', create=True,
commit=True, use_kwargs=False)
jd.install(plugin)
@jd.get('/routerjson')
def apply_jd_waybill(db):
query = bottle.request.query
if query['method'] == 'jingdong.etms.waybillcode.get':
jd_code, resp = jd_get_response_normal()
logging.debug('JD response: {} {}'.format(jd_code, resp))
db.add(JdWaybillApplyResp(jd_code, resp))
else:
jd_param = json.loads(query['360buy_param_json'])
delivery_id = jd_param['deliveryId']
order_id = jd_param['orderId']
resp = jd_send_response_normal(delivery_id, order_id)
db.add(JdWaybillSendResp(delivery_id, order_id, resp))
logging.debug('JD response: {}'.format(resp))
return resp
@jd.get('/jd_waybill')
def jd_waybill(db):
query = bottle.request.query
jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get
('wms_order_code')).first()
if jd_rsp:
return jd_rsp.body
return HTTPError(404, None)
def jd_get_response_normal():
code = str(uuid.uuid4()).split('-')[-1]
return code, json.dumps({'jingdong_etms_waybillcode_get_responce': {
'resultInfo': {'message': u'成功', 'code': 100, 'deliveryIdList': [
code]}, 'code': u'0'}})
def jd_send_response_normal(deliver_id, order_id):
return json.dumps({'jingdong_etms_waybill_send_responce': {'resultInfo':
{'message': u'成功', 'deliveryId': deliver_id, 'code': 100, 'orderId':
order_id}}})
<|reserved_special_token_1|>
# coding: utf-8
import logging
import uuid
import json
import xmltodict
import bottle
from bottle import HTTPError
from bottle.ext import sqlalchemy
from database import Base, engine
from database import JdWaybillSendResp, JdWaybillApplyResp
jd = bottle.Bottle(catchall=False)
plugin = sqlalchemy.Plugin(
engine, # SQLAlchemy engine created with create_engine function.
Base.metadata, # SQLAlchemy metadata, required only if create=True.
keyword='db', # Keyword used to inject session database in a route (default 'db').
create=True, # If it is true, execute `metadata.create_all(engine)` when plugin is applied (default False).
commit=True, # If it is true, plugin commit changes after route is executed (default True).
use_kwargs=False
# If it is true and keyword is not defined,
# plugin uses **kwargs argument to inject session database (default False).
)
jd.install(plugin)
@jd.get('/routerjson')
def apply_jd_waybill(db):
query = bottle.request.query
if query['method'] == 'jingdong.etms.waybillcode.get':
jd_code, resp = jd_get_response_normal()
logging.debug('JD response: {} {}'.format(jd_code, resp))
db.add(JdWaybillApplyResp(jd_code, resp))
else: # '''jingdong.etms.waybillcode.send'''
jd_param = json.loads(query['360buy_param_json'])
delivery_id = jd_param['deliveryId']
order_id = jd_param['orderId']
resp = jd_send_response_normal(delivery_id, order_id)
db.add(JdWaybillSendResp(delivery_id, order_id, resp))
logging.debug('JD response: {}'.format(resp))
return resp
@jd.get('/jd_waybill')
def jd_waybill(db):
query = bottle.request.query
jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get('wms_order_code')).first()
if jd_rsp:
# return entities
return jd_rsp.body
return HTTPError(404, None)
def jd_get_response_normal():
code = str(uuid.uuid4()).split('-')[-1]
return code, json.dumps({
'jingdong_etms_waybillcode_get_responce':
{'resultInfo':
{'message': u'成功',
'code': 100,
'deliveryIdList': [code]
},
'code': u'0'
}
})
def jd_send_response_normal(deliver_id, order_id):
return json.dumps({
"jingdong_etms_waybill_send_responce": {
"resultInfo": {
"message": u"成功",
"deliveryId": deliver_id,
"code": 100,
"orderId": order_id
}
}
})
|
flexible
|
{
"blob_id": "a93884757069393b4d96de5ec9c7d815d58a2ea5",
"index": 935,
"step-1": "<mask token>\n\n\n@jd.get('/routerjson')\ndef apply_jd_waybill(db):\n query = bottle.request.query\n if query['method'] == 'jingdong.etms.waybillcode.get':\n jd_code, resp = jd_get_response_normal()\n logging.debug('JD response: {} {}'.format(jd_code, resp))\n db.add(JdWaybillApplyResp(jd_code, resp))\n else:\n jd_param = json.loads(query['360buy_param_json'])\n delivery_id = jd_param['deliveryId']\n order_id = jd_param['orderId']\n resp = jd_send_response_normal(delivery_id, order_id)\n db.add(JdWaybillSendResp(delivery_id, order_id, resp))\n logging.debug('JD response: {}'.format(resp))\n return resp\n\n\n@jd.get('/jd_waybill')\ndef jd_waybill(db):\n query = bottle.request.query\n jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get\n ('wms_order_code')).first()\n if jd_rsp:\n return jd_rsp.body\n return HTTPError(404, None)\n\n\ndef jd_get_response_normal():\n code = str(uuid.uuid4()).split('-')[-1]\n return code, json.dumps({'jingdong_etms_waybillcode_get_responce': {\n 'resultInfo': {'message': u'成功', 'code': 100, 'deliveryIdList': [\n code]}, 'code': u'0'}})\n\n\ndef jd_send_response_normal(deliver_id, order_id):\n return json.dumps({'jingdong_etms_waybill_send_responce': {'resultInfo':\n {'message': u'成功', 'deliveryId': deliver_id, 'code': 100, 'orderId':\n order_id}}})\n",
"step-2": "<mask token>\njd.install(plugin)\n\n\n@jd.get('/routerjson')\ndef apply_jd_waybill(db):\n query = bottle.request.query\n if query['method'] == 'jingdong.etms.waybillcode.get':\n jd_code, resp = jd_get_response_normal()\n logging.debug('JD response: {} {}'.format(jd_code, resp))\n db.add(JdWaybillApplyResp(jd_code, resp))\n else:\n jd_param = json.loads(query['360buy_param_json'])\n delivery_id = jd_param['deliveryId']\n order_id = jd_param['orderId']\n resp = jd_send_response_normal(delivery_id, order_id)\n db.add(JdWaybillSendResp(delivery_id, order_id, resp))\n logging.debug('JD response: {}'.format(resp))\n return resp\n\n\n@jd.get('/jd_waybill')\ndef jd_waybill(db):\n query = bottle.request.query\n jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get\n ('wms_order_code')).first()\n if jd_rsp:\n return jd_rsp.body\n return HTTPError(404, None)\n\n\ndef jd_get_response_normal():\n code = str(uuid.uuid4()).split('-')[-1]\n return code, json.dumps({'jingdong_etms_waybillcode_get_responce': {\n 'resultInfo': {'message': u'成功', 'code': 100, 'deliveryIdList': [\n code]}, 'code': u'0'}})\n\n\ndef jd_send_response_normal(deliver_id, order_id):\n return json.dumps({'jingdong_etms_waybill_send_responce': {'resultInfo':\n {'message': u'成功', 'deliveryId': deliver_id, 'code': 100, 'orderId':\n order_id}}})\n",
"step-3": "<mask token>\njd = bottle.Bottle(catchall=False)\nplugin = sqlalchemy.Plugin(engine, Base.metadata, keyword='db', create=True,\n commit=True, use_kwargs=False)\njd.install(plugin)\n\n\n@jd.get('/routerjson')\ndef apply_jd_waybill(db):\n query = bottle.request.query\n if query['method'] == 'jingdong.etms.waybillcode.get':\n jd_code, resp = jd_get_response_normal()\n logging.debug('JD response: {} {}'.format(jd_code, resp))\n db.add(JdWaybillApplyResp(jd_code, resp))\n else:\n jd_param = json.loads(query['360buy_param_json'])\n delivery_id = jd_param['deliveryId']\n order_id = jd_param['orderId']\n resp = jd_send_response_normal(delivery_id, order_id)\n db.add(JdWaybillSendResp(delivery_id, order_id, resp))\n logging.debug('JD response: {}'.format(resp))\n return resp\n\n\n@jd.get('/jd_waybill')\ndef jd_waybill(db):\n query = bottle.request.query\n jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get\n ('wms_order_code')).first()\n if jd_rsp:\n return jd_rsp.body\n return HTTPError(404, None)\n\n\ndef jd_get_response_normal():\n code = str(uuid.uuid4()).split('-')[-1]\n return code, json.dumps({'jingdong_etms_waybillcode_get_responce': {\n 'resultInfo': {'message': u'成功', 'code': 100, 'deliveryIdList': [\n code]}, 'code': u'0'}})\n\n\ndef jd_send_response_normal(deliver_id, order_id):\n return json.dumps({'jingdong_etms_waybill_send_responce': {'resultInfo':\n {'message': u'成功', 'deliveryId': deliver_id, 'code': 100, 'orderId':\n order_id}}})\n",
"step-4": "import logging\nimport uuid\nimport json\nimport xmltodict\nimport bottle\nfrom bottle import HTTPError\nfrom bottle.ext import sqlalchemy\nfrom database import Base, engine\nfrom database import JdWaybillSendResp, JdWaybillApplyResp\njd = bottle.Bottle(catchall=False)\nplugin = sqlalchemy.Plugin(engine, Base.metadata, keyword='db', create=True,\n commit=True, use_kwargs=False)\njd.install(plugin)\n\n\n@jd.get('/routerjson')\ndef apply_jd_waybill(db):\n query = bottle.request.query\n if query['method'] == 'jingdong.etms.waybillcode.get':\n jd_code, resp = jd_get_response_normal()\n logging.debug('JD response: {} {}'.format(jd_code, resp))\n db.add(JdWaybillApplyResp(jd_code, resp))\n else:\n jd_param = json.loads(query['360buy_param_json'])\n delivery_id = jd_param['deliveryId']\n order_id = jd_param['orderId']\n resp = jd_send_response_normal(delivery_id, order_id)\n db.add(JdWaybillSendResp(delivery_id, order_id, resp))\n logging.debug('JD response: {}'.format(resp))\n return resp\n\n\n@jd.get('/jd_waybill')\ndef jd_waybill(db):\n query = bottle.request.query\n jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get\n ('wms_order_code')).first()\n if jd_rsp:\n return jd_rsp.body\n return HTTPError(404, None)\n\n\ndef jd_get_response_normal():\n code = str(uuid.uuid4()).split('-')[-1]\n return code, json.dumps({'jingdong_etms_waybillcode_get_responce': {\n 'resultInfo': {'message': u'成功', 'code': 100, 'deliveryIdList': [\n code]}, 'code': u'0'}})\n\n\ndef jd_send_response_normal(deliver_id, order_id):\n return json.dumps({'jingdong_etms_waybill_send_responce': {'resultInfo':\n {'message': u'成功', 'deliveryId': deliver_id, 'code': 100, 'orderId':\n order_id}}})\n",
"step-5": "# coding: utf-8\nimport logging\nimport uuid\nimport json\nimport xmltodict\nimport bottle\nfrom bottle import HTTPError\nfrom bottle.ext import sqlalchemy\nfrom database import Base, engine\nfrom database import JdWaybillSendResp, JdWaybillApplyResp\n\njd = bottle.Bottle(catchall=False)\n\nplugin = sqlalchemy.Plugin(\n engine, # SQLAlchemy engine created with create_engine function.\n Base.metadata, # SQLAlchemy metadata, required only if create=True.\n keyword='db', # Keyword used to inject session database in a route (default 'db').\n create=True, # If it is true, execute `metadata.create_all(engine)` when plugin is applied (default False).\n commit=True, # If it is true, plugin commit changes after route is executed (default True).\n use_kwargs=False\n # If it is true and keyword is not defined,\n # plugin uses **kwargs argument to inject session database (default False).\n)\n\njd.install(plugin)\n\n\n@jd.get('/routerjson')\ndef apply_jd_waybill(db):\n query = bottle.request.query\n if query['method'] == 'jingdong.etms.waybillcode.get':\n jd_code, resp = jd_get_response_normal()\n logging.debug('JD response: {} {}'.format(jd_code, resp))\n db.add(JdWaybillApplyResp(jd_code, resp))\n else: # '''jingdong.etms.waybillcode.send'''\n jd_param = json.loads(query['360buy_param_json'])\n delivery_id = jd_param['deliveryId']\n order_id = jd_param['orderId']\n resp = jd_send_response_normal(delivery_id, order_id)\n db.add(JdWaybillSendResp(delivery_id, order_id, resp))\n logging.debug('JD response: {}'.format(resp))\n\n return resp\n\n\n@jd.get('/jd_waybill')\ndef jd_waybill(db):\n query = bottle.request.query\n jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get('wms_order_code')).first()\n if jd_rsp:\n # return entities\n return jd_rsp.body\n return HTTPError(404, None)\n\n\ndef jd_get_response_normal():\n code = str(uuid.uuid4()).split('-')[-1]\n return code, json.dumps({\n 'jingdong_etms_waybillcode_get_responce':\n {'resultInfo':\n {'message': u'成功',\n 'code': 100,\n 'deliveryIdList': [code]\n },\n 'code': u'0'\n }\n })\n\n\ndef jd_send_response_normal(deliver_id, order_id):\n return json.dumps({\n \"jingdong_etms_waybill_send_responce\": {\n \"resultInfo\": {\n \"message\": u\"成功\",\n \"deliveryId\": deliver_id,\n \"code\": 100,\n \"orderId\": order_id\n }\n }\n })\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import random #import random module
guesses_taken = 0 #assign 0 to guesses_taken variable
print('Hello! What is your name?')# print Hello! What is your name? to console
myName = input()#take an input from user(name)
number = random.randint(1, 20)# make random number between 1 and 19 and save in number variable
print('Well, ' + myName + ', I am thinking of a number between 1 and 20.') #print the explanation
while guesses_taken < 6: #while loop looping until guesses_taken < 6
print('Take a guess.') # print the introduction
guess = input() # user input
guess = int(guess) #convert the input to integer
guesses_taken += 1 #guess opportunity reduce
if guess < number:#if guess less than number print Your guess is too low.
print('Your guess is too low.')
if guess > number:#if guess bigger than number print Your guess is too low.
print('Your guess is too high.')
if guess == number:#if guess equal to number break
break
if guess == number:#if guess equal to number, user guessed the number and print the underline
guesses_taken = str(guesses_taken)
print('Good job, ' + myName + '! You guessed my number in ' + guesses_taken + ' guesses!')
if guess != number:#if guess not equal to number user try till guess_take is 6 and print under
number = str(number)
print('Nope. The number I was thinking of was ' + number)
|
normal
|
{
"blob_id": "3302dc058032d9fe412bde6fd89699203526a72d",
"index": 4695,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Hello! What is your name?')\n<mask token>\nprint('Well, ' + myName + ', I am thinking of a number between 1 and 20.')\nwhile guesses_taken < 6:\n print('Take a guess.')\n guess = input()\n guess = int(guess)\n guesses_taken += 1\n if guess < number:\n print('Your guess is too low.')\n if guess > number:\n print('Your guess is too high.')\n if guess == number:\n break\nif guess == number:\n guesses_taken = str(guesses_taken)\n print('Good job, ' + myName + '! You guessed my number in ' +\n guesses_taken + ' guesses!')\nif guess != number:\n number = str(number)\n print('Nope. The number I was thinking of was ' + number)\n",
"step-3": "<mask token>\nguesses_taken = 0\nprint('Hello! What is your name?')\nmyName = input()\nnumber = random.randint(1, 20)\nprint('Well, ' + myName + ', I am thinking of a number between 1 and 20.')\nwhile guesses_taken < 6:\n print('Take a guess.')\n guess = input()\n guess = int(guess)\n guesses_taken += 1\n if guess < number:\n print('Your guess is too low.')\n if guess > number:\n print('Your guess is too high.')\n if guess == number:\n break\nif guess == number:\n guesses_taken = str(guesses_taken)\n print('Good job, ' + myName + '! You guessed my number in ' +\n guesses_taken + ' guesses!')\nif guess != number:\n number = str(number)\n print('Nope. The number I was thinking of was ' + number)\n",
"step-4": "import random\nguesses_taken = 0\nprint('Hello! What is your name?')\nmyName = input()\nnumber = random.randint(1, 20)\nprint('Well, ' + myName + ', I am thinking of a number between 1 and 20.')\nwhile guesses_taken < 6:\n print('Take a guess.')\n guess = input()\n guess = int(guess)\n guesses_taken += 1\n if guess < number:\n print('Your guess is too low.')\n if guess > number:\n print('Your guess is too high.')\n if guess == number:\n break\nif guess == number:\n guesses_taken = str(guesses_taken)\n print('Good job, ' + myName + '! You guessed my number in ' +\n guesses_taken + ' guesses!')\nif guess != number:\n number = str(number)\n print('Nope. The number I was thinking of was ' + number)\n",
"step-5": "import random #import random module\n\nguesses_taken = 0 #assign 0 to guesses_taken variable\n\nprint('Hello! What is your name?')# print Hello! What is your name? to console\nmyName = input()#take an input from user(name)\n\nnumber = random.randint(1, 20)# make random number between 1 and 19 and save in number variable\nprint('Well, ' + myName + ', I am thinking of a number between 1 and 20.') #print the explanation\n\nwhile guesses_taken < 6: #while loop looping until guesses_taken < 6\n print('Take a guess.') # print the introduction\n guess = input() # user input\n guess = int(guess) #convert the input to integer\n\n\n guesses_taken += 1 #guess opportunity reduce\n\n if guess < number:#if guess less than number print Your guess is too low.\n print('Your guess is too low.')\n\n if guess > number:#if guess bigger than number print Your guess is too low.\n print('Your guess is too high.')\n\n if guess == number:#if guess equal to number break\n break\n\nif guess == number:#if guess equal to number, user guessed the number and print the underline\n guesses_taken = str(guesses_taken)\n print('Good job, ' + myName + '! You guessed my number in ' + guesses_taken + ' guesses!')\n\nif guess != number:#if guess not equal to number user try till guess_take is 6 and print under\n number = str(number)\n print('Nope. The number I was thinking of was ' + number)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from datetime import datetime
from sqlalchemy import Column, Integer, String, ForeignKey, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
Base = declarative_base()
class BusLine(Base):
__tablename__ = "bus_lines"
id = Column(Integer, primary_key=True)
line_id = Column(Integer)
line_description = Column(String)
class BusRoute(Base):
__tablename__ = "bus_routes"
id = Column(Integer, primary_key=True)
bus_line_id = Column(Integer)
route_id = Column(Integer)
route_description = Column(String)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class BusRoutePos(Base):
__tablename__ = "bus_route_pos"
id = Column(Integer, primary_key=True)
route_id = Column(Integer, ForeignKey("bus_routes.route_id"), nullable=False)
lat = Column(String)
lon = Column(String)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class BusPos(Base):
__tablename__ = "bus_pos"
id = Column(Integer, primary_key=True)
bus_line_id = Column(Integer, ForeignKey("bus_lines.line_id"), nullable=False)
bus_internal_id = Column(Integer)
lat = Column(String)
lon = Column(String)
orientation = Column(Integer)
timestamp = Column(Integer)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class BusStop(Base):
__tablename__ = "bus_stops"
id = Column(Integer, primary_key=True)
route_id = Column(Integer, ForeignKey("bus_routes.route_id"), nullable=False)
lat = Column(String)
lon = Column(String)
stop_code = Column(String)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class BusTrip(Base):
__tablename__ = "bus_trip"
id = Column(Integer, primary_key=True)
bus_line_id = Column(Integer)
bus_internal_id = Column(Integer)
route_id = Column(Integer)
last_updated = Column(DateTime, default=datetime.utcnow)
last_pos_timestamp = Column(Integer, default=0)
|
normal
|
{
"blob_id": "9e896d935cc57e580ed46cd501b41053bbaab38f",
"index": 6490,
"step-1": "<mask token>\n\n\nclass BusRoute(Base):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass BusRoutePos(Base):\n __tablename__ = 'bus_route_pos'\n id = Column(Integer, primary_key=True)\n route_id = Column(Integer, ForeignKey('bus_routes.route_id'), nullable=\n False)\n lat = Column(String)\n lon = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusPos(Base):\n __tablename__ = 'bus_pos'\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer, ForeignKey('bus_lines.line_id'), nullable\n =False)\n bus_internal_id = Column(Integer)\n lat = Column(String)\n lon = Column(String)\n orientation = Column(Integer)\n timestamp = Column(Integer)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusStop(Base):\n __tablename__ = 'bus_stops'\n id = Column(Integer, primary_key=True)\n route_id = Column(Integer, ForeignKey('bus_routes.route_id'), nullable=\n False)\n lat = Column(String)\n lon = Column(String)\n stop_code = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusTrip(Base):\n __tablename__ = 'bus_trip'\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer)\n bus_internal_id = Column(Integer)\n route_id = Column(Integer)\n last_updated = Column(DateTime, default=datetime.utcnow)\n last_pos_timestamp = Column(Integer, default=0)\n",
"step-2": "<mask token>\n\n\nclass BusLine(Base):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass BusRoute(Base):\n __tablename__ = 'bus_routes'\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer)\n route_id = Column(Integer)\n route_description = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusRoutePos(Base):\n __tablename__ = 'bus_route_pos'\n id = Column(Integer, primary_key=True)\n route_id = Column(Integer, ForeignKey('bus_routes.route_id'), nullable=\n False)\n lat = Column(String)\n lon = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusPos(Base):\n __tablename__ = 'bus_pos'\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer, ForeignKey('bus_lines.line_id'), nullable\n =False)\n bus_internal_id = Column(Integer)\n lat = Column(String)\n lon = Column(String)\n orientation = Column(Integer)\n timestamp = Column(Integer)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusStop(Base):\n __tablename__ = 'bus_stops'\n id = Column(Integer, primary_key=True)\n route_id = Column(Integer, ForeignKey('bus_routes.route_id'), nullable=\n False)\n lat = Column(String)\n lon = Column(String)\n stop_code = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusTrip(Base):\n __tablename__ = 'bus_trip'\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer)\n bus_internal_id = Column(Integer)\n route_id = Column(Integer)\n last_updated = Column(DateTime, default=datetime.utcnow)\n last_pos_timestamp = Column(Integer, default=0)\n",
"step-3": "<mask token>\n\n\nclass BusLine(Base):\n __tablename__ = 'bus_lines'\n id = Column(Integer, primary_key=True)\n line_id = Column(Integer)\n line_description = Column(String)\n\n\nclass BusRoute(Base):\n __tablename__ = 'bus_routes'\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer)\n route_id = Column(Integer)\n route_description = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusRoutePos(Base):\n __tablename__ = 'bus_route_pos'\n id = Column(Integer, primary_key=True)\n route_id = Column(Integer, ForeignKey('bus_routes.route_id'), nullable=\n False)\n lat = Column(String)\n lon = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusPos(Base):\n __tablename__ = 'bus_pos'\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer, ForeignKey('bus_lines.line_id'), nullable\n =False)\n bus_internal_id = Column(Integer)\n lat = Column(String)\n lon = Column(String)\n orientation = Column(Integer)\n timestamp = Column(Integer)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusStop(Base):\n __tablename__ = 'bus_stops'\n id = Column(Integer, primary_key=True)\n route_id = Column(Integer, ForeignKey('bus_routes.route_id'), nullable=\n False)\n lat = Column(String)\n lon = Column(String)\n stop_code = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusTrip(Base):\n __tablename__ = 'bus_trip'\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer)\n bus_internal_id = Column(Integer)\n route_id = Column(Integer)\n last_updated = Column(DateTime, default=datetime.utcnow)\n last_pos_timestamp = Column(Integer, default=0)\n",
"step-4": "<mask token>\nBase = declarative_base()\n\n\nclass BusLine(Base):\n __tablename__ = 'bus_lines'\n id = Column(Integer, primary_key=True)\n line_id = Column(Integer)\n line_description = Column(String)\n\n\nclass BusRoute(Base):\n __tablename__ = 'bus_routes'\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer)\n route_id = Column(Integer)\n route_description = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusRoutePos(Base):\n __tablename__ = 'bus_route_pos'\n id = Column(Integer, primary_key=True)\n route_id = Column(Integer, ForeignKey('bus_routes.route_id'), nullable=\n False)\n lat = Column(String)\n lon = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusPos(Base):\n __tablename__ = 'bus_pos'\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer, ForeignKey('bus_lines.line_id'), nullable\n =False)\n bus_internal_id = Column(Integer)\n lat = Column(String)\n lon = Column(String)\n orientation = Column(Integer)\n timestamp = Column(Integer)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusStop(Base):\n __tablename__ = 'bus_stops'\n id = Column(Integer, primary_key=True)\n route_id = Column(Integer, ForeignKey('bus_routes.route_id'), nullable=\n False)\n lat = Column(String)\n lon = Column(String)\n stop_code = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusTrip(Base):\n __tablename__ = 'bus_trip'\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer)\n bus_internal_id = Column(Integer)\n route_id = Column(Integer)\n last_updated = Column(DateTime, default=datetime.utcnow)\n last_pos_timestamp = Column(Integer, default=0)\n",
"step-5": "from datetime import datetime\n\nfrom sqlalchemy import Column, Integer, String, ForeignKey, DateTime\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship\n\nBase = declarative_base()\n\n\nclass BusLine(Base):\n __tablename__ = \"bus_lines\"\n id = Column(Integer, primary_key=True)\n line_id = Column(Integer)\n line_description = Column(String)\n\n\nclass BusRoute(Base):\n __tablename__ = \"bus_routes\"\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer)\n route_id = Column(Integer)\n route_description = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusRoutePos(Base):\n __tablename__ = \"bus_route_pos\"\n id = Column(Integer, primary_key=True)\n route_id = Column(Integer, ForeignKey(\"bus_routes.route_id\"), nullable=False)\n lat = Column(String)\n lon = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusPos(Base):\n __tablename__ = \"bus_pos\"\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer, ForeignKey(\"bus_lines.line_id\"), nullable=False)\n bus_internal_id = Column(Integer)\n lat = Column(String)\n lon = Column(String)\n orientation = Column(Integer)\n timestamp = Column(Integer)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusStop(Base):\n __tablename__ = \"bus_stops\"\n id = Column(Integer, primary_key=True)\n route_id = Column(Integer, ForeignKey(\"bus_routes.route_id\"), nullable=False)\n lat = Column(String)\n lon = Column(String)\n stop_code = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusTrip(Base):\n __tablename__ = \"bus_trip\"\n id = Column(Integer, primary_key=True)\n\n bus_line_id = Column(Integer)\n bus_internal_id = Column(Integer)\n\n route_id = Column(Integer)\n last_updated = Column(DateTime, default=datetime.utcnow)\n last_pos_timestamp = Column(Integer, default=0)\n",
"step-ids": [
12,
15,
16,
17,
19
]
}
|
[
12,
15,
16,
17,
19
] |
include('f469-disco/manifest_f469.py')
freeze('src')
|
normal
|
{
"blob_id": "3b29912788fa4cc76f34f52da7728e934ee96637",
"index": 7117,
"step-1": "<mask token>\n",
"step-2": "include('f469-disco/manifest_f469.py')\nfreeze('src')\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from django.urls import path
from .views import *
urlpatterns = [path('country', Country_Data, name='country_data'), path(
'tours', Scrape_Data, name='scrape_data'), path('draws', Draw_Data,
name='Draw_data')]
|
normal
|
{
"blob_id": "b39c783cbaff2915c8864ce0b081b5bf052baee5",
"index": 6731,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('country', Country_Data, name='country_data'), path(\n 'tours', Scrape_Data, name='scrape_data'), path('draws', Draw_Data,\n name='Draw_data')]\n",
"step-3": "from django.urls import path\nfrom .views import *\nurlpatterns = [path('country', Country_Data, name='country_data'), path(\n 'tours', Scrape_Data, name='scrape_data'), path('draws', Draw_Data,\n name='Draw_data')]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class DBException(Exception):
"""
Represents a generic exception thrown by the Database Manager
"""
pass
class DBManager:
def __init__(self, cfg):
self.cfg = cfg
self.__companies = {}
self.__loggedIn = False
self.connection = None
self.cursor = None
def __del__(self):
try:
self.connection.close()
except psycopg2.Error:
pass
def __logInDb(self):
try:
dbSettings = self.cfg.databaseSettings
self.connection = psycopg2.connect(DB_CONNECT_STRING.format(
host=dbSettings[0], dbname=dbSettings[1], user=dbSettings[2
], passwd=dbSettings[3]))
self.cursor = self.connection.cursor()
self.__loggedIn = True
return True
except (psycopg2.OperationalError, ConfigurationException):
return False
def __getDomainName(self, schema):
try:
self.cursor.execute(
"SELECT domain_url FROM customers_customer WHERE schema_name='{schemaname}'"
.format(schemaname=schema))
return 'http://' + self.cursor.fetchone()[0]
except psycopg2.DatabaseError:
raise DBException('Failed to extract domain name from database')
def __buildCategoryUrl(self, catId, schemaName):
return '{domain}/vendors/?find=category-{categoryId}'.format(domain
=self.__getDomainName(schemaName), categoryId=catId)
def __buildProfileUrl(self, catSlug, profSlug, schemaName):
return '{domain}/vendors/{categorySlug}/{profileSlug}'.format(domain
=self.__getDomainName(schemaName), categorySlug=catSlug,
profileSlug=profSlug)
def __buildProfileUrlWOCategory(self, profSlug, schemaName):
return '{domain}/vendors/{profileSlug}'.format(domain=self.
__getDomainName(schemaName), profileSlug=profSlug)
def __getCompaniesData(self, schema):
"""
Load Companies list from database
"""
try:
self.cursor.execute(
"""SELECT id, twitter, proven_score, slug FROM {schema}.vendors_vendor WHERE
twitter <> ''"""
.format(schema=schema))
data = self.cursor.fetchall()
companies = []
for entry in data:
self.cursor.execute(
'SELECT location_id FROM {schema}.vendors_vendorlocation WHERE vendor_id = {vendor}'
.format(schema=schema, vendor=entry[0]))
cities = self.cursor.fetchall()
if cities is None:
continue
city = ''
for cityId in cities:
self.cursor.execute(
'SELECT city FROM {schema}.locations_location WHERE id = {city}'
.format(schema=schema, city=cityId[0]))
cityName = self.cursor.fetchone()
if cityName is not None:
city += cityName[0]
self.cursor.execute(
'SELECT category_id, rank FROM {schema}.vendors_vendorcustomkind WHERE vendor_id = {vendor} AND "primary" is true'
.format(schema=schema, vendor=entry[0]))
customKind = self.cursor.fetchone()
if customKind is None:
catId = rank = None
else:
catId, rank = customKind
if catId is not None:
self.cursor.execute(
'SELECT name, slug FROM {schema}.categories_category WHERE id = {cat}'
.format(schema=schema, cat=catId))
catData = self.cursor.fetchone()
else:
catData = None
companies.append(DBItemCompany(_id=entry[0], tweeter=entry[
1], category=catData[0] if catData is not None else
None, categoryUrl=self.__buildCategoryUrl(catId, schema
) if catId is not None else None, provenScore=entry[2],
ranking=rank, location=city, url=self.__buildProfileUrl
(catData[1], entry[3], schema) if catData is not None else
self.__buildProfileUrlWOCategory(entry[3], schema),
categoryId=catId))
self.__companies[schema] = companies
except psycopg2.DatabaseError as err:
raise DBException(err.args[0])
def domainUrl(self, schema):
return self.__getDomainName(schema)
def refreshData(self, schemas):
if not self.__loggedIn:
if not self.__logInDb():
return False
for schema in schemas:
self.__getCompaniesData(schema)
return True
@property
def companies(self):
return self.__companies
@property
def isConnected(self):
return self.__loggedIn
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DBItemCompany:
def __init__(self, _id, tweeter, category, categoryUrl, provenScore,
ranking, location, url, categoryId):
self.id = _id
self.twitterAccount = tweeter
self.category = category
self.categoryUrl = categoryUrl
self.provenScore = provenScore
self.ranking = ranking
self.location = location
self.url = url
self.categoryId = categoryId
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class DBException(Exception):
"""
Represents a generic exception thrown by the Database Manager
"""
pass
class DBManager:
def __init__(self, cfg):
self.cfg = cfg
self.__companies = {}
self.__loggedIn = False
self.connection = None
self.cursor = None
def __del__(self):
try:
self.connection.close()
except psycopg2.Error:
pass
def __logInDb(self):
try:
dbSettings = self.cfg.databaseSettings
self.connection = psycopg2.connect(DB_CONNECT_STRING.format(
host=dbSettings[0], dbname=dbSettings[1], user=dbSettings[2
], passwd=dbSettings[3]))
self.cursor = self.connection.cursor()
self.__loggedIn = True
return True
except (psycopg2.OperationalError, ConfigurationException):
return False
def __getDomainName(self, schema):
try:
self.cursor.execute(
"SELECT domain_url FROM customers_customer WHERE schema_name='{schemaname}'"
.format(schemaname=schema))
return 'http://' + self.cursor.fetchone()[0]
except psycopg2.DatabaseError:
raise DBException('Failed to extract domain name from database')
def __buildCategoryUrl(self, catId, schemaName):
return '{domain}/vendors/?find=category-{categoryId}'.format(domain
=self.__getDomainName(schemaName), categoryId=catId)
def __buildProfileUrl(self, catSlug, profSlug, schemaName):
return '{domain}/vendors/{categorySlug}/{profileSlug}'.format(domain
=self.__getDomainName(schemaName), categorySlug=catSlug,
profileSlug=profSlug)
def __buildProfileUrlWOCategory(self, profSlug, schemaName):
return '{domain}/vendors/{profileSlug}'.format(domain=self.
__getDomainName(schemaName), profileSlug=profSlug)
def __getCompaniesData(self, schema):
"""
Load Companies list from database
"""
try:
self.cursor.execute(
"""SELECT id, twitter, proven_score, slug FROM {schema}.vendors_vendor WHERE
twitter <> ''"""
.format(schema=schema))
data = self.cursor.fetchall()
companies = []
for entry in data:
self.cursor.execute(
'SELECT location_id FROM {schema}.vendors_vendorlocation WHERE vendor_id = {vendor}'
.format(schema=schema, vendor=entry[0]))
cities = self.cursor.fetchall()
if cities is None:
continue
city = ''
for cityId in cities:
self.cursor.execute(
'SELECT city FROM {schema}.locations_location WHERE id = {city}'
.format(schema=schema, city=cityId[0]))
cityName = self.cursor.fetchone()
if cityName is not None:
city += cityName[0]
self.cursor.execute(
'SELECT category_id, rank FROM {schema}.vendors_vendorcustomkind WHERE vendor_id = {vendor} AND "primary" is true'
.format(schema=schema, vendor=entry[0]))
customKind = self.cursor.fetchone()
if customKind is None:
catId = rank = None
else:
catId, rank = customKind
if catId is not None:
self.cursor.execute(
'SELECT name, slug FROM {schema}.categories_category WHERE id = {cat}'
.format(schema=schema, cat=catId))
catData = self.cursor.fetchone()
else:
catData = None
companies.append(DBItemCompany(_id=entry[0], tweeter=entry[
1], category=catData[0] if catData is not None else
None, categoryUrl=self.__buildCategoryUrl(catId, schema
) if catId is not None else None, provenScore=entry[2],
ranking=rank, location=city, url=self.__buildProfileUrl
(catData[1], entry[3], schema) if catData is not None else
self.__buildProfileUrlWOCategory(entry[3], schema),
categoryId=catId))
self.__companies[schema] = companies
except psycopg2.DatabaseError as err:
raise DBException(err.args[0])
def domainUrl(self, schema):
return self.__getDomainName(schema)
def refreshData(self, schemas):
if not self.__loggedIn:
if not self.__logInDb():
return False
for schema in schemas:
self.__getCompaniesData(schema)
return True
@property
def companies(self):
return self.__companies
@property
def isConnected(self):
return self.__loggedIn
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DBItemCompany:
def __init__(self, _id, tweeter, category, categoryUrl, provenScore,
ranking, location, url, categoryId):
self.id = _id
self.twitterAccount = tweeter
self.category = category
self.categoryUrl = categoryUrl
self.provenScore = provenScore
self.ranking = ranking
self.location = location
self.url = url
self.categoryId = categoryId
<|reserved_special_token_0|>
@property
def twitter(self):
return '@' + self.twitterAccount
class DBException(Exception):
"""
Represents a generic exception thrown by the Database Manager
"""
pass
class DBManager:
def __init__(self, cfg):
self.cfg = cfg
self.__companies = {}
self.__loggedIn = False
self.connection = None
self.cursor = None
def __del__(self):
try:
self.connection.close()
except psycopg2.Error:
pass
def __logInDb(self):
try:
dbSettings = self.cfg.databaseSettings
self.connection = psycopg2.connect(DB_CONNECT_STRING.format(
host=dbSettings[0], dbname=dbSettings[1], user=dbSettings[2
], passwd=dbSettings[3]))
self.cursor = self.connection.cursor()
self.__loggedIn = True
return True
except (psycopg2.OperationalError, ConfigurationException):
return False
def __getDomainName(self, schema):
try:
self.cursor.execute(
"SELECT domain_url FROM customers_customer WHERE schema_name='{schemaname}'"
.format(schemaname=schema))
return 'http://' + self.cursor.fetchone()[0]
except psycopg2.DatabaseError:
raise DBException('Failed to extract domain name from database')
def __buildCategoryUrl(self, catId, schemaName):
return '{domain}/vendors/?find=category-{categoryId}'.format(domain
=self.__getDomainName(schemaName), categoryId=catId)
def __buildProfileUrl(self, catSlug, profSlug, schemaName):
return '{domain}/vendors/{categorySlug}/{profileSlug}'.format(domain
=self.__getDomainName(schemaName), categorySlug=catSlug,
profileSlug=profSlug)
def __buildProfileUrlWOCategory(self, profSlug, schemaName):
return '{domain}/vendors/{profileSlug}'.format(domain=self.
__getDomainName(schemaName), profileSlug=profSlug)
def __getCompaniesData(self, schema):
"""
Load Companies list from database
"""
try:
self.cursor.execute(
"""SELECT id, twitter, proven_score, slug FROM {schema}.vendors_vendor WHERE
twitter <> ''"""
.format(schema=schema))
data = self.cursor.fetchall()
companies = []
for entry in data:
self.cursor.execute(
'SELECT location_id FROM {schema}.vendors_vendorlocation WHERE vendor_id = {vendor}'
.format(schema=schema, vendor=entry[0]))
cities = self.cursor.fetchall()
if cities is None:
continue
city = ''
for cityId in cities:
self.cursor.execute(
'SELECT city FROM {schema}.locations_location WHERE id = {city}'
.format(schema=schema, city=cityId[0]))
cityName = self.cursor.fetchone()
if cityName is not None:
city += cityName[0]
self.cursor.execute(
'SELECT category_id, rank FROM {schema}.vendors_vendorcustomkind WHERE vendor_id = {vendor} AND "primary" is true'
.format(schema=schema, vendor=entry[0]))
customKind = self.cursor.fetchone()
if customKind is None:
catId = rank = None
else:
catId, rank = customKind
if catId is not None:
self.cursor.execute(
'SELECT name, slug FROM {schema}.categories_category WHERE id = {cat}'
.format(schema=schema, cat=catId))
catData = self.cursor.fetchone()
else:
catData = None
companies.append(DBItemCompany(_id=entry[0], tweeter=entry[
1], category=catData[0] if catData is not None else
None, categoryUrl=self.__buildCategoryUrl(catId, schema
) if catId is not None else None, provenScore=entry[2],
ranking=rank, location=city, url=self.__buildProfileUrl
(catData[1], entry[3], schema) if catData is not None else
self.__buildProfileUrlWOCategory(entry[3], schema),
categoryId=catId))
self.__companies[schema] = companies
except psycopg2.DatabaseError as err:
raise DBException(err.args[0])
def domainUrl(self, schema):
return self.__getDomainName(schema)
def refreshData(self, schemas):
if not self.__loggedIn:
if not self.__logInDb():
return False
for schema in schemas:
self.__getCompaniesData(schema)
return True
@property
def companies(self):
return self.__companies
@property
def isConnected(self):
return self.__loggedIn
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DBItemCompany:
def __init__(self, _id, tweeter, category, categoryUrl, provenScore,
ranking, location, url, categoryId):
self.id = _id
self.twitterAccount = tweeter
self.category = category
self.categoryUrl = categoryUrl
self.provenScore = provenScore
self.ranking = ranking
self.location = location
self.url = url
self.categoryId = categoryId
@property
def invalidScore(self):
return self.provenScore is None or self.provenScore < 1
@property
def twitter(self):
return '@' + self.twitterAccount
class DBException(Exception):
"""
Represents a generic exception thrown by the Database Manager
"""
pass
class DBManager:
def __init__(self, cfg):
self.cfg = cfg
self.__companies = {}
self.__loggedIn = False
self.connection = None
self.cursor = None
def __del__(self):
try:
self.connection.close()
except psycopg2.Error:
pass
def __logInDb(self):
try:
dbSettings = self.cfg.databaseSettings
self.connection = psycopg2.connect(DB_CONNECT_STRING.format(
host=dbSettings[0], dbname=dbSettings[1], user=dbSettings[2
], passwd=dbSettings[3]))
self.cursor = self.connection.cursor()
self.__loggedIn = True
return True
except (psycopg2.OperationalError, ConfigurationException):
return False
def __getDomainName(self, schema):
try:
self.cursor.execute(
"SELECT domain_url FROM customers_customer WHERE schema_name='{schemaname}'"
.format(schemaname=schema))
return 'http://' + self.cursor.fetchone()[0]
except psycopg2.DatabaseError:
raise DBException('Failed to extract domain name from database')
def __buildCategoryUrl(self, catId, schemaName):
return '{domain}/vendors/?find=category-{categoryId}'.format(domain
=self.__getDomainName(schemaName), categoryId=catId)
def __buildProfileUrl(self, catSlug, profSlug, schemaName):
return '{domain}/vendors/{categorySlug}/{profileSlug}'.format(domain
=self.__getDomainName(schemaName), categorySlug=catSlug,
profileSlug=profSlug)
def __buildProfileUrlWOCategory(self, profSlug, schemaName):
return '{domain}/vendors/{profileSlug}'.format(domain=self.
__getDomainName(schemaName), profileSlug=profSlug)
def __getCompaniesData(self, schema):
"""
Load Companies list from database
"""
try:
self.cursor.execute(
"""SELECT id, twitter, proven_score, slug FROM {schema}.vendors_vendor WHERE
twitter <> ''"""
.format(schema=schema))
data = self.cursor.fetchall()
companies = []
for entry in data:
self.cursor.execute(
'SELECT location_id FROM {schema}.vendors_vendorlocation WHERE vendor_id = {vendor}'
.format(schema=schema, vendor=entry[0]))
cities = self.cursor.fetchall()
if cities is None:
continue
city = ''
for cityId in cities:
self.cursor.execute(
'SELECT city FROM {schema}.locations_location WHERE id = {city}'
.format(schema=schema, city=cityId[0]))
cityName = self.cursor.fetchone()
if cityName is not None:
city += cityName[0]
self.cursor.execute(
'SELECT category_id, rank FROM {schema}.vendors_vendorcustomkind WHERE vendor_id = {vendor} AND "primary" is true'
.format(schema=schema, vendor=entry[0]))
customKind = self.cursor.fetchone()
if customKind is None:
catId = rank = None
else:
catId, rank = customKind
if catId is not None:
self.cursor.execute(
'SELECT name, slug FROM {schema}.categories_category WHERE id = {cat}'
.format(schema=schema, cat=catId))
catData = self.cursor.fetchone()
else:
catData = None
companies.append(DBItemCompany(_id=entry[0], tweeter=entry[
1], category=catData[0] if catData is not None else
None, categoryUrl=self.__buildCategoryUrl(catId, schema
) if catId is not None else None, provenScore=entry[2],
ranking=rank, location=city, url=self.__buildProfileUrl
(catData[1], entry[3], schema) if catData is not None else
self.__buildProfileUrlWOCategory(entry[3], schema),
categoryId=catId))
self.__companies[schema] = companies
except psycopg2.DatabaseError as err:
raise DBException(err.args[0])
def domainUrl(self, schema):
return self.__getDomainName(schema)
def refreshData(self, schemas):
if not self.__loggedIn:
if not self.__logInDb():
return False
for schema in schemas:
self.__getCompaniesData(schema)
return True
@property
def companies(self):
return self.__companies
@property
def isConnected(self):
return self.__loggedIn
<|reserved_special_token_1|>
import psycopg2
from .configuration import ConfigurationException
DB_CONNECT_STRING = "host='{host}' dbname='{dbname}' user='{user}' password='{passwd}'"
class DBItemCompany:
def __init__(self, _id, tweeter, category, categoryUrl, provenScore, ranking, location, url, categoryId):
self.id = _id
self.twitterAccount = tweeter
self.category = category
self.categoryUrl = categoryUrl
self.provenScore = provenScore
self.ranking = ranking
self.location = location
self.url = url
self.categoryId = categoryId
@property
def invalidScore(self):
return self.provenScore is None or self.provenScore < 1
@property
def twitter(self):
return '@' + self.twitterAccount
class DBException(Exception):
"""
Represents a generic exception thrown by the Database Manager
"""
pass
class DBManager:
def __init__(self, cfg):
self.cfg = cfg
self.__companies = {}
self.__loggedIn = False
self.connection = None
self.cursor = None
def __del__(self):
try:
self.connection.close()
except psycopg2.Error:
pass
def __logInDb(self):
try:
dbSettings = self.cfg.databaseSettings
self.connection = psycopg2.connect(DB_CONNECT_STRING.format(
host=dbSettings[0], dbname=dbSettings[1],
user=dbSettings[2], passwd=dbSettings[3]
))
self.cursor = self.connection.cursor()
self.__loggedIn = True
return True
except (psycopg2.OperationalError, ConfigurationException):
return False
def __getDomainName(self, schema):
try:
self.cursor.execute("SELECT domain_url FROM customers_customer WHERE schema_name='{schemaname}'".format(schemaname=schema))
return 'http://' + self.cursor.fetchone()[0]
except psycopg2.DatabaseError:
raise DBException('Failed to extract domain name from database')
def __buildCategoryUrl(self, catId, schemaName):
return '{domain}/vendors/?find=category-{categoryId}'.format(domain=self.__getDomainName(schemaName), categoryId=catId)
def __buildProfileUrl(self, catSlug, profSlug, schemaName):
return '{domain}/vendors/{categorySlug}/{profileSlug}'.format(domain=self.__getDomainName(schemaName),
categorySlug=catSlug,
profileSlug=profSlug)
def __buildProfileUrlWOCategory(self, profSlug, schemaName):
return '{domain}/vendors/{profileSlug}'.format(domain=self.__getDomainName(schemaName), profileSlug=profSlug)
def __getCompaniesData(self, schema):
"""
Load Companies list from database
"""
try:
self.cursor.execute("""SELECT id, twitter, proven_score, slug FROM {schema}.vendors_vendor WHERE
twitter <> ''""".format(schema=schema))
data = self.cursor.fetchall()
companies = []
for entry in data:
self.cursor.execute('SELECT location_id FROM {schema}.vendors_vendorlocation WHERE vendor_id = {vendor}'.format(schema=schema, vendor=entry[0]))
cities = self.cursor.fetchall()
if cities is None:
continue
city = ''
for cityId in cities:
self.cursor.execute('SELECT city FROM {schema}.locations_location WHERE id = {city}'.format(schema=schema, city=cityId[0]))
cityName = self.cursor.fetchone()
if cityName is not None:
city += cityName[0]
self.cursor.execute('SELECT category_id, rank FROM {schema}.vendors_vendorcustomkind WHERE vendor_id = {vendor} AND "primary" is true'.format(schema=schema, vendor=entry[0]))
customKind = self.cursor.fetchone()
if customKind is None:
catId = rank = None
else:
catId, rank = customKind
if catId is not None:
self.cursor.execute('SELECT name, slug FROM {schema}.categories_category WHERE id = {cat}'.format(schema=schema, cat=catId))
catData = self.cursor.fetchone()
else:
catData = None
companies.append(DBItemCompany(
_id = entry[0],
tweeter = entry[1],
category = catData[0] if catData is not None else None,
categoryUrl = self.__buildCategoryUrl(catId, schema) if catId is not None else None,
provenScore = entry[2],
ranking = rank,
location = city,
url = self.__buildProfileUrl(catData[1], entry[3], schema) if catData is not None else self.__buildProfileUrlWOCategory(entry[3], schema),
categoryId = catId
))
self.__companies[schema] = companies
except psycopg2.DatabaseError as err:
raise DBException(err.args[0])
def domainUrl(self, schema):
return self.__getDomainName(schema)
def refreshData(self, schemas):
if not self.__loggedIn:
if not self.__logInDb():
return False
for schema in schemas:
self.__getCompaniesData(schema)
return True
@property
def companies(self):
return self.__companies
@property
def isConnected(self):
return self.__loggedIn
|
flexible
|
{
"blob_id": "31b87a3ceca1f48665ecc9754d5f87bb9b7bbf13",
"index": 7579,
"step-1": "<mask token>\n\n\nclass DBException(Exception):\n \"\"\"\n Represents a generic exception thrown by the Database Manager\n \"\"\"\n pass\n\n\nclass DBManager:\n\n def __init__(self, cfg):\n self.cfg = cfg\n self.__companies = {}\n self.__loggedIn = False\n self.connection = None\n self.cursor = None\n\n def __del__(self):\n try:\n self.connection.close()\n except psycopg2.Error:\n pass\n\n def __logInDb(self):\n try:\n dbSettings = self.cfg.databaseSettings\n self.connection = psycopg2.connect(DB_CONNECT_STRING.format(\n host=dbSettings[0], dbname=dbSettings[1], user=dbSettings[2\n ], passwd=dbSettings[3]))\n self.cursor = self.connection.cursor()\n self.__loggedIn = True\n return True\n except (psycopg2.OperationalError, ConfigurationException):\n return False\n\n def __getDomainName(self, schema):\n try:\n self.cursor.execute(\n \"SELECT domain_url FROM customers_customer WHERE schema_name='{schemaname}'\"\n .format(schemaname=schema))\n return 'http://' + self.cursor.fetchone()[0]\n except psycopg2.DatabaseError:\n raise DBException('Failed to extract domain name from database')\n\n def __buildCategoryUrl(self, catId, schemaName):\n return '{domain}/vendors/?find=category-{categoryId}'.format(domain\n =self.__getDomainName(schemaName), categoryId=catId)\n\n def __buildProfileUrl(self, catSlug, profSlug, schemaName):\n return '{domain}/vendors/{categorySlug}/{profileSlug}'.format(domain\n =self.__getDomainName(schemaName), categorySlug=catSlug,\n profileSlug=profSlug)\n\n def __buildProfileUrlWOCategory(self, profSlug, schemaName):\n return '{domain}/vendors/{profileSlug}'.format(domain=self.\n __getDomainName(schemaName), profileSlug=profSlug)\n\n def __getCompaniesData(self, schema):\n \"\"\"\n Load Companies list from database\n \"\"\"\n try:\n self.cursor.execute(\n \"\"\"SELECT id, twitter, proven_score, slug FROM {schema}.vendors_vendor WHERE\n twitter <> ''\"\"\"\n .format(schema=schema))\n data = self.cursor.fetchall()\n companies = []\n for entry in data:\n self.cursor.execute(\n 'SELECT location_id FROM {schema}.vendors_vendorlocation WHERE vendor_id = {vendor}'\n .format(schema=schema, vendor=entry[0]))\n cities = self.cursor.fetchall()\n if cities is None:\n continue\n city = ''\n for cityId in cities:\n self.cursor.execute(\n 'SELECT city FROM {schema}.locations_location WHERE id = {city}'\n .format(schema=schema, city=cityId[0]))\n cityName = self.cursor.fetchone()\n if cityName is not None:\n city += cityName[0]\n self.cursor.execute(\n 'SELECT category_id, rank FROM {schema}.vendors_vendorcustomkind WHERE vendor_id = {vendor} AND \"primary\" is true'\n .format(schema=schema, vendor=entry[0]))\n customKind = self.cursor.fetchone()\n if customKind is None:\n catId = rank = None\n else:\n catId, rank = customKind\n if catId is not None:\n self.cursor.execute(\n 'SELECT name, slug FROM {schema}.categories_category WHERE id = {cat}'\n .format(schema=schema, cat=catId))\n catData = self.cursor.fetchone()\n else:\n catData = None\n companies.append(DBItemCompany(_id=entry[0], tweeter=entry[\n 1], category=catData[0] if catData is not None else\n None, categoryUrl=self.__buildCategoryUrl(catId, schema\n ) if catId is not None else None, provenScore=entry[2],\n ranking=rank, location=city, url=self.__buildProfileUrl\n (catData[1], entry[3], schema) if catData is not None else\n self.__buildProfileUrlWOCategory(entry[3], schema),\n categoryId=catId))\n self.__companies[schema] = companies\n except psycopg2.DatabaseError as err:\n raise DBException(err.args[0])\n\n def domainUrl(self, schema):\n return self.__getDomainName(schema)\n\n def refreshData(self, schemas):\n if not self.__loggedIn:\n if not self.__logInDb():\n return False\n for schema in schemas:\n self.__getCompaniesData(schema)\n return True\n\n @property\n def companies(self):\n return self.__companies\n\n @property\n def isConnected(self):\n return self.__loggedIn\n",
"step-2": "<mask token>\n\n\nclass DBItemCompany:\n\n def __init__(self, _id, tweeter, category, categoryUrl, provenScore,\n ranking, location, url, categoryId):\n self.id = _id\n self.twitterAccount = tweeter\n self.category = category\n self.categoryUrl = categoryUrl\n self.provenScore = provenScore\n self.ranking = ranking\n self.location = location\n self.url = url\n self.categoryId = categoryId\n <mask token>\n <mask token>\n\n\nclass DBException(Exception):\n \"\"\"\n Represents a generic exception thrown by the Database Manager\n \"\"\"\n pass\n\n\nclass DBManager:\n\n def __init__(self, cfg):\n self.cfg = cfg\n self.__companies = {}\n self.__loggedIn = False\n self.connection = None\n self.cursor = None\n\n def __del__(self):\n try:\n self.connection.close()\n except psycopg2.Error:\n pass\n\n def __logInDb(self):\n try:\n dbSettings = self.cfg.databaseSettings\n self.connection = psycopg2.connect(DB_CONNECT_STRING.format(\n host=dbSettings[0], dbname=dbSettings[1], user=dbSettings[2\n ], passwd=dbSettings[3]))\n self.cursor = self.connection.cursor()\n self.__loggedIn = True\n return True\n except (psycopg2.OperationalError, ConfigurationException):\n return False\n\n def __getDomainName(self, schema):\n try:\n self.cursor.execute(\n \"SELECT domain_url FROM customers_customer WHERE schema_name='{schemaname}'\"\n .format(schemaname=schema))\n return 'http://' + self.cursor.fetchone()[0]\n except psycopg2.DatabaseError:\n raise DBException('Failed to extract domain name from database')\n\n def __buildCategoryUrl(self, catId, schemaName):\n return '{domain}/vendors/?find=category-{categoryId}'.format(domain\n =self.__getDomainName(schemaName), categoryId=catId)\n\n def __buildProfileUrl(self, catSlug, profSlug, schemaName):\n return '{domain}/vendors/{categorySlug}/{profileSlug}'.format(domain\n =self.__getDomainName(schemaName), categorySlug=catSlug,\n profileSlug=profSlug)\n\n def __buildProfileUrlWOCategory(self, profSlug, schemaName):\n return '{domain}/vendors/{profileSlug}'.format(domain=self.\n __getDomainName(schemaName), profileSlug=profSlug)\n\n def __getCompaniesData(self, schema):\n \"\"\"\n Load Companies list from database\n \"\"\"\n try:\n self.cursor.execute(\n \"\"\"SELECT id, twitter, proven_score, slug FROM {schema}.vendors_vendor WHERE\n twitter <> ''\"\"\"\n .format(schema=schema))\n data = self.cursor.fetchall()\n companies = []\n for entry in data:\n self.cursor.execute(\n 'SELECT location_id FROM {schema}.vendors_vendorlocation WHERE vendor_id = {vendor}'\n .format(schema=schema, vendor=entry[0]))\n cities = self.cursor.fetchall()\n if cities is None:\n continue\n city = ''\n for cityId in cities:\n self.cursor.execute(\n 'SELECT city FROM {schema}.locations_location WHERE id = {city}'\n .format(schema=schema, city=cityId[0]))\n cityName = self.cursor.fetchone()\n if cityName is not None:\n city += cityName[0]\n self.cursor.execute(\n 'SELECT category_id, rank FROM {schema}.vendors_vendorcustomkind WHERE vendor_id = {vendor} AND \"primary\" is true'\n .format(schema=schema, vendor=entry[0]))\n customKind = self.cursor.fetchone()\n if customKind is None:\n catId = rank = None\n else:\n catId, rank = customKind\n if catId is not None:\n self.cursor.execute(\n 'SELECT name, slug FROM {schema}.categories_category WHERE id = {cat}'\n .format(schema=schema, cat=catId))\n catData = self.cursor.fetchone()\n else:\n catData = None\n companies.append(DBItemCompany(_id=entry[0], tweeter=entry[\n 1], category=catData[0] if catData is not None else\n None, categoryUrl=self.__buildCategoryUrl(catId, schema\n ) if catId is not None else None, provenScore=entry[2],\n ranking=rank, location=city, url=self.__buildProfileUrl\n (catData[1], entry[3], schema) if catData is not None else\n self.__buildProfileUrlWOCategory(entry[3], schema),\n categoryId=catId))\n self.__companies[schema] = companies\n except psycopg2.DatabaseError as err:\n raise DBException(err.args[0])\n\n def domainUrl(self, schema):\n return self.__getDomainName(schema)\n\n def refreshData(self, schemas):\n if not self.__loggedIn:\n if not self.__logInDb():\n return False\n for schema in schemas:\n self.__getCompaniesData(schema)\n return True\n\n @property\n def companies(self):\n return self.__companies\n\n @property\n def isConnected(self):\n return self.__loggedIn\n",
"step-3": "<mask token>\n\n\nclass DBItemCompany:\n\n def __init__(self, _id, tweeter, category, categoryUrl, provenScore,\n ranking, location, url, categoryId):\n self.id = _id\n self.twitterAccount = tweeter\n self.category = category\n self.categoryUrl = categoryUrl\n self.provenScore = provenScore\n self.ranking = ranking\n self.location = location\n self.url = url\n self.categoryId = categoryId\n <mask token>\n\n @property\n def twitter(self):\n return '@' + self.twitterAccount\n\n\nclass DBException(Exception):\n \"\"\"\n Represents a generic exception thrown by the Database Manager\n \"\"\"\n pass\n\n\nclass DBManager:\n\n def __init__(self, cfg):\n self.cfg = cfg\n self.__companies = {}\n self.__loggedIn = False\n self.connection = None\n self.cursor = None\n\n def __del__(self):\n try:\n self.connection.close()\n except psycopg2.Error:\n pass\n\n def __logInDb(self):\n try:\n dbSettings = self.cfg.databaseSettings\n self.connection = psycopg2.connect(DB_CONNECT_STRING.format(\n host=dbSettings[0], dbname=dbSettings[1], user=dbSettings[2\n ], passwd=dbSettings[3]))\n self.cursor = self.connection.cursor()\n self.__loggedIn = True\n return True\n except (psycopg2.OperationalError, ConfigurationException):\n return False\n\n def __getDomainName(self, schema):\n try:\n self.cursor.execute(\n \"SELECT domain_url FROM customers_customer WHERE schema_name='{schemaname}'\"\n .format(schemaname=schema))\n return 'http://' + self.cursor.fetchone()[0]\n except psycopg2.DatabaseError:\n raise DBException('Failed to extract domain name from database')\n\n def __buildCategoryUrl(self, catId, schemaName):\n return '{domain}/vendors/?find=category-{categoryId}'.format(domain\n =self.__getDomainName(schemaName), categoryId=catId)\n\n def __buildProfileUrl(self, catSlug, profSlug, schemaName):\n return '{domain}/vendors/{categorySlug}/{profileSlug}'.format(domain\n =self.__getDomainName(schemaName), categorySlug=catSlug,\n profileSlug=profSlug)\n\n def __buildProfileUrlWOCategory(self, profSlug, schemaName):\n return '{domain}/vendors/{profileSlug}'.format(domain=self.\n __getDomainName(schemaName), profileSlug=profSlug)\n\n def __getCompaniesData(self, schema):\n \"\"\"\n Load Companies list from database\n \"\"\"\n try:\n self.cursor.execute(\n \"\"\"SELECT id, twitter, proven_score, slug FROM {schema}.vendors_vendor WHERE\n twitter <> ''\"\"\"\n .format(schema=schema))\n data = self.cursor.fetchall()\n companies = []\n for entry in data:\n self.cursor.execute(\n 'SELECT location_id FROM {schema}.vendors_vendorlocation WHERE vendor_id = {vendor}'\n .format(schema=schema, vendor=entry[0]))\n cities = self.cursor.fetchall()\n if cities is None:\n continue\n city = ''\n for cityId in cities:\n self.cursor.execute(\n 'SELECT city FROM {schema}.locations_location WHERE id = {city}'\n .format(schema=schema, city=cityId[0]))\n cityName = self.cursor.fetchone()\n if cityName is not None:\n city += cityName[0]\n self.cursor.execute(\n 'SELECT category_id, rank FROM {schema}.vendors_vendorcustomkind WHERE vendor_id = {vendor} AND \"primary\" is true'\n .format(schema=schema, vendor=entry[0]))\n customKind = self.cursor.fetchone()\n if customKind is None:\n catId = rank = None\n else:\n catId, rank = customKind\n if catId is not None:\n self.cursor.execute(\n 'SELECT name, slug FROM {schema}.categories_category WHERE id = {cat}'\n .format(schema=schema, cat=catId))\n catData = self.cursor.fetchone()\n else:\n catData = None\n companies.append(DBItemCompany(_id=entry[0], tweeter=entry[\n 1], category=catData[0] if catData is not None else\n None, categoryUrl=self.__buildCategoryUrl(catId, schema\n ) if catId is not None else None, provenScore=entry[2],\n ranking=rank, location=city, url=self.__buildProfileUrl\n (catData[1], entry[3], schema) if catData is not None else\n self.__buildProfileUrlWOCategory(entry[3], schema),\n categoryId=catId))\n self.__companies[schema] = companies\n except psycopg2.DatabaseError as err:\n raise DBException(err.args[0])\n\n def domainUrl(self, schema):\n return self.__getDomainName(schema)\n\n def refreshData(self, schemas):\n if not self.__loggedIn:\n if not self.__logInDb():\n return False\n for schema in schemas:\n self.__getCompaniesData(schema)\n return True\n\n @property\n def companies(self):\n return self.__companies\n\n @property\n def isConnected(self):\n return self.__loggedIn\n",
"step-4": "<mask token>\n\n\nclass DBItemCompany:\n\n def __init__(self, _id, tweeter, category, categoryUrl, provenScore,\n ranking, location, url, categoryId):\n self.id = _id\n self.twitterAccount = tweeter\n self.category = category\n self.categoryUrl = categoryUrl\n self.provenScore = provenScore\n self.ranking = ranking\n self.location = location\n self.url = url\n self.categoryId = categoryId\n\n @property\n def invalidScore(self):\n return self.provenScore is None or self.provenScore < 1\n\n @property\n def twitter(self):\n return '@' + self.twitterAccount\n\n\nclass DBException(Exception):\n \"\"\"\n Represents a generic exception thrown by the Database Manager\n \"\"\"\n pass\n\n\nclass DBManager:\n\n def __init__(self, cfg):\n self.cfg = cfg\n self.__companies = {}\n self.__loggedIn = False\n self.connection = None\n self.cursor = None\n\n def __del__(self):\n try:\n self.connection.close()\n except psycopg2.Error:\n pass\n\n def __logInDb(self):\n try:\n dbSettings = self.cfg.databaseSettings\n self.connection = psycopg2.connect(DB_CONNECT_STRING.format(\n host=dbSettings[0], dbname=dbSettings[1], user=dbSettings[2\n ], passwd=dbSettings[3]))\n self.cursor = self.connection.cursor()\n self.__loggedIn = True\n return True\n except (psycopg2.OperationalError, ConfigurationException):\n return False\n\n def __getDomainName(self, schema):\n try:\n self.cursor.execute(\n \"SELECT domain_url FROM customers_customer WHERE schema_name='{schemaname}'\"\n .format(schemaname=schema))\n return 'http://' + self.cursor.fetchone()[0]\n except psycopg2.DatabaseError:\n raise DBException('Failed to extract domain name from database')\n\n def __buildCategoryUrl(self, catId, schemaName):\n return '{domain}/vendors/?find=category-{categoryId}'.format(domain\n =self.__getDomainName(schemaName), categoryId=catId)\n\n def __buildProfileUrl(self, catSlug, profSlug, schemaName):\n return '{domain}/vendors/{categorySlug}/{profileSlug}'.format(domain\n =self.__getDomainName(schemaName), categorySlug=catSlug,\n profileSlug=profSlug)\n\n def __buildProfileUrlWOCategory(self, profSlug, schemaName):\n return '{domain}/vendors/{profileSlug}'.format(domain=self.\n __getDomainName(schemaName), profileSlug=profSlug)\n\n def __getCompaniesData(self, schema):\n \"\"\"\n Load Companies list from database\n \"\"\"\n try:\n self.cursor.execute(\n \"\"\"SELECT id, twitter, proven_score, slug FROM {schema}.vendors_vendor WHERE\n twitter <> ''\"\"\"\n .format(schema=schema))\n data = self.cursor.fetchall()\n companies = []\n for entry in data:\n self.cursor.execute(\n 'SELECT location_id FROM {schema}.vendors_vendorlocation WHERE vendor_id = {vendor}'\n .format(schema=schema, vendor=entry[0]))\n cities = self.cursor.fetchall()\n if cities is None:\n continue\n city = ''\n for cityId in cities:\n self.cursor.execute(\n 'SELECT city FROM {schema}.locations_location WHERE id = {city}'\n .format(schema=schema, city=cityId[0]))\n cityName = self.cursor.fetchone()\n if cityName is not None:\n city += cityName[0]\n self.cursor.execute(\n 'SELECT category_id, rank FROM {schema}.vendors_vendorcustomkind WHERE vendor_id = {vendor} AND \"primary\" is true'\n .format(schema=schema, vendor=entry[0]))\n customKind = self.cursor.fetchone()\n if customKind is None:\n catId = rank = None\n else:\n catId, rank = customKind\n if catId is not None:\n self.cursor.execute(\n 'SELECT name, slug FROM {schema}.categories_category WHERE id = {cat}'\n .format(schema=schema, cat=catId))\n catData = self.cursor.fetchone()\n else:\n catData = None\n companies.append(DBItemCompany(_id=entry[0], tweeter=entry[\n 1], category=catData[0] if catData is not None else\n None, categoryUrl=self.__buildCategoryUrl(catId, schema\n ) if catId is not None else None, provenScore=entry[2],\n ranking=rank, location=city, url=self.__buildProfileUrl\n (catData[1], entry[3], schema) if catData is not None else\n self.__buildProfileUrlWOCategory(entry[3], schema),\n categoryId=catId))\n self.__companies[schema] = companies\n except psycopg2.DatabaseError as err:\n raise DBException(err.args[0])\n\n def domainUrl(self, schema):\n return self.__getDomainName(schema)\n\n def refreshData(self, schemas):\n if not self.__loggedIn:\n if not self.__logInDb():\n return False\n for schema in schemas:\n self.__getCompaniesData(schema)\n return True\n\n @property\n def companies(self):\n return self.__companies\n\n @property\n def isConnected(self):\n return self.__loggedIn\n",
"step-5": "import psycopg2\n\nfrom .configuration import ConfigurationException\n\nDB_CONNECT_STRING = \"host='{host}' dbname='{dbname}' user='{user}' password='{passwd}'\"\n\nclass DBItemCompany:\n def __init__(self, _id, tweeter, category, categoryUrl, provenScore, ranking, location, url, categoryId):\n self.id = _id\n self.twitterAccount = tweeter\n self.category = category\n self.categoryUrl = categoryUrl\n self.provenScore = provenScore\n self.ranking = ranking\n self.location = location\n self.url = url\n self.categoryId = categoryId\n\n @property\n def invalidScore(self):\n return self.provenScore is None or self.provenScore < 1\n\n @property\n def twitter(self):\n return '@' + self.twitterAccount\n\nclass DBException(Exception):\n \"\"\"\n Represents a generic exception thrown by the Database Manager\n \"\"\"\n pass\n\nclass DBManager:\n def __init__(self, cfg):\n self.cfg = cfg\n\n self.__companies = {}\n self.__loggedIn = False\n self.connection = None\n self.cursor = None\n\n def __del__(self):\n try:\n self.connection.close()\n except psycopg2.Error:\n pass\n\n def __logInDb(self):\n try:\n dbSettings = self.cfg.databaseSettings\n\n self.connection = psycopg2.connect(DB_CONNECT_STRING.format(\n host=dbSettings[0], dbname=dbSettings[1],\n user=dbSettings[2], passwd=dbSettings[3]\n ))\n self.cursor = self.connection.cursor()\n\n self.__loggedIn = True\n\n return True\n except (psycopg2.OperationalError, ConfigurationException):\n return False\n\n def __getDomainName(self, schema):\n try:\n self.cursor.execute(\"SELECT domain_url FROM customers_customer WHERE schema_name='{schemaname}'\".format(schemaname=schema))\n return 'http://' + self.cursor.fetchone()[0]\n except psycopg2.DatabaseError:\n raise DBException('Failed to extract domain name from database')\n\n def __buildCategoryUrl(self, catId, schemaName):\n return '{domain}/vendors/?find=category-{categoryId}'.format(domain=self.__getDomainName(schemaName), categoryId=catId)\n\n def __buildProfileUrl(self, catSlug, profSlug, schemaName):\n return '{domain}/vendors/{categorySlug}/{profileSlug}'.format(domain=self.__getDomainName(schemaName),\n categorySlug=catSlug,\n profileSlug=profSlug)\n\n def __buildProfileUrlWOCategory(self, profSlug, schemaName):\n return '{domain}/vendors/{profileSlug}'.format(domain=self.__getDomainName(schemaName), profileSlug=profSlug)\n\n def __getCompaniesData(self, schema):\n \"\"\"\n Load Companies list from database\n \"\"\"\n try:\n self.cursor.execute(\"\"\"SELECT id, twitter, proven_score, slug FROM {schema}.vendors_vendor WHERE\n twitter <> ''\"\"\".format(schema=schema))\n data = self.cursor.fetchall()\n\n companies = []\n for entry in data:\n self.cursor.execute('SELECT location_id FROM {schema}.vendors_vendorlocation WHERE vendor_id = {vendor}'.format(schema=schema, vendor=entry[0]))\n cities = self.cursor.fetchall()\n\n if cities is None:\n continue\n\n city = ''\n\n for cityId in cities:\n self.cursor.execute('SELECT city FROM {schema}.locations_location WHERE id = {city}'.format(schema=schema, city=cityId[0]))\n cityName = self.cursor.fetchone()\n\n if cityName is not None:\n city += cityName[0]\n\n self.cursor.execute('SELECT category_id, rank FROM {schema}.vendors_vendorcustomkind WHERE vendor_id = {vendor} AND \"primary\" is true'.format(schema=schema, vendor=entry[0]))\n customKind = self.cursor.fetchone()\n\n if customKind is None:\n catId = rank = None\n else:\n catId, rank = customKind\n\n if catId is not None:\n self.cursor.execute('SELECT name, slug FROM {schema}.categories_category WHERE id = {cat}'.format(schema=schema, cat=catId))\n catData = self.cursor.fetchone()\n else:\n catData = None\n\n companies.append(DBItemCompany(\n _id = entry[0],\n tweeter = entry[1],\n category = catData[0] if catData is not None else None,\n categoryUrl = self.__buildCategoryUrl(catId, schema) if catId is not None else None,\n provenScore = entry[2],\n ranking = rank,\n location = city,\n url = self.__buildProfileUrl(catData[1], entry[3], schema) if catData is not None else self.__buildProfileUrlWOCategory(entry[3], schema),\n categoryId = catId\n ))\n\n self.__companies[schema] = companies\n\n except psycopg2.DatabaseError as err:\n raise DBException(err.args[0])\n\n def domainUrl(self, schema):\n return self.__getDomainName(schema)\n\n def refreshData(self, schemas):\n if not self.__loggedIn:\n if not self.__logInDb():\n return False\n\n for schema in schemas:\n self.__getCompaniesData(schema)\n\n return True\n\n @property\n def companies(self):\n return self.__companies\n\n @property\n def isConnected(self):\n return self.__loggedIn\n",
"step-ids": [
15,
17,
18,
19,
22
]
}
|
[
15,
17,
18,
19,
22
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Login(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Login(models.Model):
trinity_id = models.CharField('', max_length=200)
trinity_password = models.CharField('', max_length=500)
objects = models.Manager()
<|reserved_special_token_1|>
from django.db import models
class Login(models.Model):
trinity_id = models.CharField('', max_length=200)
trinity_password = models.CharField('', max_length=500)
objects = models.Manager()
<|reserved_special_token_1|>
from django.db import models
# Create your models here.
class Login(models.Model):
trinity_id = models.CharField('',max_length=200)
trinity_password = models.CharField('',max_length=500)
objects = models.Manager()
|
flexible
|
{
"blob_id": "1c5cb9363c2903905f1026ede77615e8373c250b",
"index": 7321,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Login(models.Model):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Login(models.Model):\n trinity_id = models.CharField('', max_length=200)\n trinity_password = models.CharField('', max_length=500)\n objects = models.Manager()\n",
"step-4": "from django.db import models\n\n\nclass Login(models.Model):\n trinity_id = models.CharField('', max_length=200)\n trinity_password = models.CharField('', max_length=500)\n objects = models.Manager()\n",
"step-5": "from django.db import models\n\n# Create your models here.\n\nclass Login(models.Model):\n trinity_id = models.CharField('',max_length=200)\n trinity_password = models.CharField('',max_length=500)\n\n objects = models.Manager()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import tensorflow as tf
import numpy as np
import OpenAi.Pendulum.ActorCritic.Models as Models
"""
The `Buffer` class implements Experience Replay.
---

---
**Critic loss** - Mean Squared Error of `y - Q(s, a)`
where `y` is the expected return as seen by the Target network,
and `Q(s, a)` is action value predicted by the Critic network. `y` is a moving target
that the critic model tries to achieve; we make this target
stable by updating the Target model slowly.
**Actor loss** - This is computed using the mean of the value given by the Critic network
for the actions taken by the Actor network. We seek to maximize this quantity.
Hence we update the Actor network so that it produces actions that get
the maximum predicted value as seen by the Critic, for a given state.
"""
class Agent:
def __init__(self, env, buffer_capacity=100000, batch_size=64, gamma = -0.99, tau = 0.005, critic_lr = 0.002, actor_lr = 0.001):
num_states = env.observation_space.shape[0]
num_actions = env.action_space.shape[0]
# Number of "experiences" to store at max
self.buffer_capacity = buffer_capacity
# Num of tuples to train on.
self.batch_size = batch_size
# Its tells us num of times record() was called.
self.buffer_counter = 0
self.gamma = gamma
self.tau = tau
self.critic_lr = critic_lr
self.actor_l = actor_lr
# Instead of list of tuples as the exp.replay concept go
# We use different np.arrays for each tuple element
self.state_buffer = np.zeros((self.buffer_capacity, num_states))
self.action_buffer = np.zeros((self.buffer_capacity, num_actions))
self.reward_buffer = np.zeros((self.buffer_capacity, 1))
self.next_state_buffer = np.zeros((self.buffer_capacity, num_states))
self.models = Models.Models(env=env, critic_lr = critic_lr, actor_lr = actor_lr)
# Takes (s,a,r,s') obervation tuple as input
def record(self, obs_tuple):
# Set index to zero if buffer_capacity is exceeded,
# replacing old records
index = self.buffer_counter % self.buffer_capacity
self.state_buffer[index] = obs_tuple[0]
self.action_buffer[index] = obs_tuple[1]
self.reward_buffer[index] = obs_tuple[2]
self.next_state_buffer[index] = obs_tuple[3]
self.buffer_counter += 1
# Eager execution is turned on by default in TensorFlow 2. Decorating with tf.function allows
# TensorFlow to build a static graph out of the logic and computations in our function.
# This provides a large speed up for blocks of code that contain many small TensorFlow operations such as this one.
@tf.function
def update(self, state_batch, action_batch, reward_batch, next_state_batch,):
# Training and updating Actor & Critic networks.
# See Pseudo Code.
with tf.GradientTape() as tape:
target_actions = self.models.target_actor(next_state_batch, training=True)
y = reward_batch + self.gamma * self.models.target_critic(
[next_state_batch, target_actions], training=True
)
critic_value = self.models.critic_model([state_batch, action_batch], training=True)
critic_loss = tf.math.reduce_mean(tf.math.square(y - critic_value))
self.critic_grad = tape.gradient(critic_loss, self.models.critic_model.trainable_variables)
self.models.critic_optimizer.apply_gradients(
zip(self.critic_grad, self.models.critic_model.trainable_variables)
)
with tf.GradientTape() as tape:
actions = self.models.actor_model(state_batch, training=True)
critic_value = self.models.critic_model([state_batch, actions], training=True)
# Used `-value` as we want to maximize the value given
# by the critic for our actions
actor_loss = -tf.math.reduce_mean(critic_value)
actor_grad = tape.gradient(actor_loss, self.models.actor_model.trainable_variables)
self.models.actor_optimizer.apply_gradients(
zip(actor_grad, self.models.actor_model.trainable_variables)
)
# We compute the loss and update parameters
def learn(self):
# Get sampling range
record_range = min(self.buffer_counter, self.buffer_capacity)
# Randomly sample indices
batch_indices = np.random.choice(record_range, self.batch_size)
# Convert to tensors
state_batch = tf.convert_to_tensor(self.state_buffer[batch_indices])
action_batch = tf.convert_to_tensor(self.action_buffer[batch_indices])
reward_batch = tf.convert_to_tensor(self.reward_buffer[batch_indices])
reward_batch = tf.cast(reward_batch, dtype=tf.float32)
next_state_batch = tf.convert_to_tensor(self.next_state_buffer[batch_indices])
self.update(state_batch, action_batch, reward_batch, next_state_batch)
self.update_target(self.models.target_actor.variables, self.models.actor_model.variables, self.tau)
self.update_target(self.models.target_critic.variables, self.models.critic_model.variables, self.tau)
def action(self, state, noise_object):
return self.models.policy(state, noise_object)
@tf.function
def update_target(self, target_weights, weights, tau):
for (a, b) in zip(target_weights, weights):
a.assign(b * tau + a * (1 - tau))
"""
To implement better exploration by the Actor network, we use noisy perturbations,
specifically
an **Ornstein-Uhlenbeck process** for generating noise, as described in the paper.
It samples noise from a correlated normal distribution.
"""
class OUActionNoise:
def __init__(self, mean, std_deviation, theta=0.15, dt=1e-2, x_initial=None):
self.theta = theta
self.mean = mean
self.std_dev = std_deviation
self.dt = dt
self.x_initial = x_initial
self.reset()
def __call__(self):
# Formula taken from https://www.wikipedia.org/wiki/Ornstein-Uhlenbeck_process.
x = (
self.x_prev
+ self.theta * (self.mean - self.x_prev) * self.dt
+ self.std_dev * np.sqrt(self.dt) * np.random.normal(size=self.mean.shape)
)
# Store x into x_prev
# Makes next noise dependent on current one
self.x_prev = x
return x
def reset(self):
if self.x_initial is not None:
self.x_prev = self.x_initial
else:
self.x_prev = np.zeros_like(self.mean)
|
normal
|
{
"blob_id": "8a9ed10bf25f3aa13fde43079303194fc6db26c0",
"index": 4248,
"step-1": "<mask token>\n\n\nclass Agent:\n <mask token>\n\n def record(self, obs_tuple):\n index = self.buffer_counter % self.buffer_capacity\n self.state_buffer[index] = obs_tuple[0]\n self.action_buffer[index] = obs_tuple[1]\n self.reward_buffer[index] = obs_tuple[2]\n self.next_state_buffer[index] = obs_tuple[3]\n self.buffer_counter += 1\n <mask token>\n <mask token>\n\n def action(self, state, noise_object):\n return self.models.policy(state, noise_object)\n\n @tf.function\n def update_target(self, target_weights, weights, tau):\n for a, b in zip(target_weights, weights):\n a.assign(b * tau + a * (1 - tau))\n\n\n<mask token>\n\n\nclass OUActionNoise:\n\n def __init__(self, mean, std_deviation, theta=0.15, dt=0.01, x_initial=None\n ):\n self.theta = theta\n self.mean = mean\n self.std_dev = std_deviation\n self.dt = dt\n self.x_initial = x_initial\n self.reset()\n\n def __call__(self):\n x = self.x_prev + self.theta * (self.mean - self.x_prev\n ) * self.dt + self.std_dev * np.sqrt(self.dt) * np.random.normal(\n size=self.mean.shape)\n self.x_prev = x\n return x\n\n def reset(self):\n if self.x_initial is not None:\n self.x_prev = self.x_initial\n else:\n self.x_prev = np.zeros_like(self.mean)\n",
"step-2": "<mask token>\n\n\nclass Agent:\n <mask token>\n\n def record(self, obs_tuple):\n index = self.buffer_counter % self.buffer_capacity\n self.state_buffer[index] = obs_tuple[0]\n self.action_buffer[index] = obs_tuple[1]\n self.reward_buffer[index] = obs_tuple[2]\n self.next_state_buffer[index] = obs_tuple[3]\n self.buffer_counter += 1\n\n @tf.function\n def update(self, state_batch, action_batch, reward_batch, next_state_batch\n ):\n with tf.GradientTape() as tape:\n target_actions = self.models.target_actor(next_state_batch,\n training=True)\n y = reward_batch + self.gamma * self.models.target_critic([\n next_state_batch, target_actions], training=True)\n critic_value = self.models.critic_model([state_batch,\n action_batch], training=True)\n critic_loss = tf.math.reduce_mean(tf.math.square(y - critic_value))\n self.critic_grad = tape.gradient(critic_loss, self.models.\n critic_model.trainable_variables)\n self.models.critic_optimizer.apply_gradients(zip(self.critic_grad,\n self.models.critic_model.trainable_variables))\n with tf.GradientTape() as tape:\n actions = self.models.actor_model(state_batch, training=True)\n critic_value = self.models.critic_model([state_batch, actions],\n training=True)\n actor_loss = -tf.math.reduce_mean(critic_value)\n actor_grad = tape.gradient(actor_loss, self.models.actor_model.\n trainable_variables)\n self.models.actor_optimizer.apply_gradients(zip(actor_grad, self.\n models.actor_model.trainable_variables))\n <mask token>\n\n def action(self, state, noise_object):\n return self.models.policy(state, noise_object)\n\n @tf.function\n def update_target(self, target_weights, weights, tau):\n for a, b in zip(target_weights, weights):\n a.assign(b * tau + a * (1 - tau))\n\n\n<mask token>\n\n\nclass OUActionNoise:\n\n def __init__(self, mean, std_deviation, theta=0.15, dt=0.01, x_initial=None\n ):\n self.theta = theta\n self.mean = mean\n self.std_dev = std_deviation\n self.dt = dt\n self.x_initial = x_initial\n self.reset()\n\n def __call__(self):\n x = self.x_prev + self.theta * (self.mean - self.x_prev\n ) * self.dt + self.std_dev * np.sqrt(self.dt) * np.random.normal(\n size=self.mean.shape)\n self.x_prev = x\n return x\n\n def reset(self):\n if self.x_initial is not None:\n self.x_prev = self.x_initial\n else:\n self.x_prev = np.zeros_like(self.mean)\n",
"step-3": "<mask token>\n\n\nclass Agent:\n <mask token>\n\n def record(self, obs_tuple):\n index = self.buffer_counter % self.buffer_capacity\n self.state_buffer[index] = obs_tuple[0]\n self.action_buffer[index] = obs_tuple[1]\n self.reward_buffer[index] = obs_tuple[2]\n self.next_state_buffer[index] = obs_tuple[3]\n self.buffer_counter += 1\n\n @tf.function\n def update(self, state_batch, action_batch, reward_batch, next_state_batch\n ):\n with tf.GradientTape() as tape:\n target_actions = self.models.target_actor(next_state_batch,\n training=True)\n y = reward_batch + self.gamma * self.models.target_critic([\n next_state_batch, target_actions], training=True)\n critic_value = self.models.critic_model([state_batch,\n action_batch], training=True)\n critic_loss = tf.math.reduce_mean(tf.math.square(y - critic_value))\n self.critic_grad = tape.gradient(critic_loss, self.models.\n critic_model.trainable_variables)\n self.models.critic_optimizer.apply_gradients(zip(self.critic_grad,\n self.models.critic_model.trainable_variables))\n with tf.GradientTape() as tape:\n actions = self.models.actor_model(state_batch, training=True)\n critic_value = self.models.critic_model([state_batch, actions],\n training=True)\n actor_loss = -tf.math.reduce_mean(critic_value)\n actor_grad = tape.gradient(actor_loss, self.models.actor_model.\n trainable_variables)\n self.models.actor_optimizer.apply_gradients(zip(actor_grad, self.\n models.actor_model.trainable_variables))\n\n def learn(self):\n record_range = min(self.buffer_counter, self.buffer_capacity)\n batch_indices = np.random.choice(record_range, self.batch_size)\n state_batch = tf.convert_to_tensor(self.state_buffer[batch_indices])\n action_batch = tf.convert_to_tensor(self.action_buffer[batch_indices])\n reward_batch = tf.convert_to_tensor(self.reward_buffer[batch_indices])\n reward_batch = tf.cast(reward_batch, dtype=tf.float32)\n next_state_batch = tf.convert_to_tensor(self.next_state_buffer[\n batch_indices])\n self.update(state_batch, action_batch, reward_batch, next_state_batch)\n self.update_target(self.models.target_actor.variables, self.models.\n actor_model.variables, self.tau)\n self.update_target(self.models.target_critic.variables, self.models\n .critic_model.variables, self.tau)\n\n def action(self, state, noise_object):\n return self.models.policy(state, noise_object)\n\n @tf.function\n def update_target(self, target_weights, weights, tau):\n for a, b in zip(target_weights, weights):\n a.assign(b * tau + a * (1 - tau))\n\n\n<mask token>\n\n\nclass OUActionNoise:\n\n def __init__(self, mean, std_deviation, theta=0.15, dt=0.01, x_initial=None\n ):\n self.theta = theta\n self.mean = mean\n self.std_dev = std_deviation\n self.dt = dt\n self.x_initial = x_initial\n self.reset()\n\n def __call__(self):\n x = self.x_prev + self.theta * (self.mean - self.x_prev\n ) * self.dt + self.std_dev * np.sqrt(self.dt) * np.random.normal(\n size=self.mean.shape)\n self.x_prev = x\n return x\n\n def reset(self):\n if self.x_initial is not None:\n self.x_prev = self.x_initial\n else:\n self.x_prev = np.zeros_like(self.mean)\n",
"step-4": "import tensorflow as tf\nimport numpy as np\nimport OpenAi.Pendulum.ActorCritic.Models as Models\n<mask token>\n\n\nclass Agent:\n\n def __init__(self, env, buffer_capacity=100000, batch_size=64, gamma=-\n 0.99, tau=0.005, critic_lr=0.002, actor_lr=0.001):\n num_states = env.observation_space.shape[0]\n num_actions = env.action_space.shape[0]\n self.buffer_capacity = buffer_capacity\n self.batch_size = batch_size\n self.buffer_counter = 0\n self.gamma = gamma\n self.tau = tau\n self.critic_lr = critic_lr\n self.actor_l = actor_lr\n self.state_buffer = np.zeros((self.buffer_capacity, num_states))\n self.action_buffer = np.zeros((self.buffer_capacity, num_actions))\n self.reward_buffer = np.zeros((self.buffer_capacity, 1))\n self.next_state_buffer = np.zeros((self.buffer_capacity, num_states))\n self.models = Models.Models(env=env, critic_lr=critic_lr, actor_lr=\n actor_lr)\n\n def record(self, obs_tuple):\n index = self.buffer_counter % self.buffer_capacity\n self.state_buffer[index] = obs_tuple[0]\n self.action_buffer[index] = obs_tuple[1]\n self.reward_buffer[index] = obs_tuple[2]\n self.next_state_buffer[index] = obs_tuple[3]\n self.buffer_counter += 1\n\n @tf.function\n def update(self, state_batch, action_batch, reward_batch, next_state_batch\n ):\n with tf.GradientTape() as tape:\n target_actions = self.models.target_actor(next_state_batch,\n training=True)\n y = reward_batch + self.gamma * self.models.target_critic([\n next_state_batch, target_actions], training=True)\n critic_value = self.models.critic_model([state_batch,\n action_batch], training=True)\n critic_loss = tf.math.reduce_mean(tf.math.square(y - critic_value))\n self.critic_grad = tape.gradient(critic_loss, self.models.\n critic_model.trainable_variables)\n self.models.critic_optimizer.apply_gradients(zip(self.critic_grad,\n self.models.critic_model.trainable_variables))\n with tf.GradientTape() as tape:\n actions = self.models.actor_model(state_batch, training=True)\n critic_value = self.models.critic_model([state_batch, actions],\n training=True)\n actor_loss = -tf.math.reduce_mean(critic_value)\n actor_grad = tape.gradient(actor_loss, self.models.actor_model.\n trainable_variables)\n self.models.actor_optimizer.apply_gradients(zip(actor_grad, self.\n models.actor_model.trainable_variables))\n\n def learn(self):\n record_range = min(self.buffer_counter, self.buffer_capacity)\n batch_indices = np.random.choice(record_range, self.batch_size)\n state_batch = tf.convert_to_tensor(self.state_buffer[batch_indices])\n action_batch = tf.convert_to_tensor(self.action_buffer[batch_indices])\n reward_batch = tf.convert_to_tensor(self.reward_buffer[batch_indices])\n reward_batch = tf.cast(reward_batch, dtype=tf.float32)\n next_state_batch = tf.convert_to_tensor(self.next_state_buffer[\n batch_indices])\n self.update(state_batch, action_batch, reward_batch, next_state_batch)\n self.update_target(self.models.target_actor.variables, self.models.\n actor_model.variables, self.tau)\n self.update_target(self.models.target_critic.variables, self.models\n .critic_model.variables, self.tau)\n\n def action(self, state, noise_object):\n return self.models.policy(state, noise_object)\n\n @tf.function\n def update_target(self, target_weights, weights, tau):\n for a, b in zip(target_weights, weights):\n a.assign(b * tau + a * (1 - tau))\n\n\n<mask token>\n\n\nclass OUActionNoise:\n\n def __init__(self, mean, std_deviation, theta=0.15, dt=0.01, x_initial=None\n ):\n self.theta = theta\n self.mean = mean\n self.std_dev = std_deviation\n self.dt = dt\n self.x_initial = x_initial\n self.reset()\n\n def __call__(self):\n x = self.x_prev + self.theta * (self.mean - self.x_prev\n ) * self.dt + self.std_dev * np.sqrt(self.dt) * np.random.normal(\n size=self.mean.shape)\n self.x_prev = x\n return x\n\n def reset(self):\n if self.x_initial is not None:\n self.x_prev = self.x_initial\n else:\n self.x_prev = np.zeros_like(self.mean)\n",
"step-5": "\n\nimport tensorflow as tf\nimport numpy as np\n\nimport OpenAi.Pendulum.ActorCritic.Models as Models\n\n\n\"\"\"\nThe `Buffer` class implements Experience Replay.\n---\n\n---\n**Critic loss** - Mean Squared Error of `y - Q(s, a)`\nwhere `y` is the expected return as seen by the Target network,\nand `Q(s, a)` is action value predicted by the Critic network. `y` is a moving target\nthat the critic model tries to achieve; we make this target\nstable by updating the Target model slowly.\n**Actor loss** - This is computed using the mean of the value given by the Critic network\nfor the actions taken by the Actor network. We seek to maximize this quantity.\nHence we update the Actor network so that it produces actions that get\nthe maximum predicted value as seen by the Critic, for a given state.\n\"\"\"\nclass Agent:\n def __init__(self, env, buffer_capacity=100000, batch_size=64, gamma = -0.99, tau = 0.005, critic_lr = 0.002, actor_lr = 0.001):\n num_states = env.observation_space.shape[0]\n num_actions = env.action_space.shape[0]\n # Number of \"experiences\" to store at max\n self.buffer_capacity = buffer_capacity\n # Num of tuples to train on.\n self.batch_size = batch_size\n\n # Its tells us num of times record() was called.\n self.buffer_counter = 0\n\n self.gamma = gamma\n self.tau = tau\n self.critic_lr = critic_lr\n self.actor_l = actor_lr\n\n # Instead of list of tuples as the exp.replay concept go\n # We use different np.arrays for each tuple element\n self.state_buffer = np.zeros((self.buffer_capacity, num_states))\n self.action_buffer = np.zeros((self.buffer_capacity, num_actions))\n self.reward_buffer = np.zeros((self.buffer_capacity, 1))\n self.next_state_buffer = np.zeros((self.buffer_capacity, num_states))\n\n\n self.models = Models.Models(env=env, critic_lr = critic_lr, actor_lr = actor_lr)\n\n\n\n # Takes (s,a,r,s') obervation tuple as input\n def record(self, obs_tuple):\n # Set index to zero if buffer_capacity is exceeded,\n # replacing old records\n index = self.buffer_counter % self.buffer_capacity\n\n self.state_buffer[index] = obs_tuple[0]\n self.action_buffer[index] = obs_tuple[1]\n self.reward_buffer[index] = obs_tuple[2]\n self.next_state_buffer[index] = obs_tuple[3]\n\n self.buffer_counter += 1\n\n # Eager execution is turned on by default in TensorFlow 2. Decorating with tf.function allows\n # TensorFlow to build a static graph out of the logic and computations in our function.\n # This provides a large speed up for blocks of code that contain many small TensorFlow operations such as this one.\n @tf.function\n def update(self, state_batch, action_batch, reward_batch, next_state_batch,):\n # Training and updating Actor & Critic networks.\n # See Pseudo Code.\n with tf.GradientTape() as tape:\n target_actions = self.models.target_actor(next_state_batch, training=True)\n y = reward_batch + self.gamma * self.models.target_critic(\n [next_state_batch, target_actions], training=True\n )\n critic_value = self.models.critic_model([state_batch, action_batch], training=True)\n critic_loss = tf.math.reduce_mean(tf.math.square(y - critic_value))\n\n self.critic_grad = tape.gradient(critic_loss, self.models.critic_model.trainable_variables)\n self.models.critic_optimizer.apply_gradients(\n zip(self.critic_grad, self.models.critic_model.trainable_variables)\n )\n\n with tf.GradientTape() as tape:\n actions = self.models.actor_model(state_batch, training=True)\n critic_value = self.models.critic_model([state_batch, actions], training=True)\n # Used `-value` as we want to maximize the value given\n # by the critic for our actions\n actor_loss = -tf.math.reduce_mean(critic_value)\n\n actor_grad = tape.gradient(actor_loss, self.models.actor_model.trainable_variables)\n self.models.actor_optimizer.apply_gradients(\n zip(actor_grad, self.models.actor_model.trainable_variables)\n )\n\n # We compute the loss and update parameters\n def learn(self):\n # Get sampling range\n record_range = min(self.buffer_counter, self.buffer_capacity)\n # Randomly sample indices\n batch_indices = np.random.choice(record_range, self.batch_size)\n # Convert to tensors\n state_batch = tf.convert_to_tensor(self.state_buffer[batch_indices])\n action_batch = tf.convert_to_tensor(self.action_buffer[batch_indices])\n reward_batch = tf.convert_to_tensor(self.reward_buffer[batch_indices])\n reward_batch = tf.cast(reward_batch, dtype=tf.float32)\n next_state_batch = tf.convert_to_tensor(self.next_state_buffer[batch_indices])\n\n self.update(state_batch, action_batch, reward_batch, next_state_batch)\n\n self.update_target(self.models.target_actor.variables, self.models.actor_model.variables, self.tau)\n self.update_target(self.models.target_critic.variables, self.models.critic_model.variables, self.tau)\n\n\n def action(self, state, noise_object):\n return self.models.policy(state, noise_object)\n\n @tf.function\n def update_target(self, target_weights, weights, tau):\n for (a, b) in zip(target_weights, weights):\n a.assign(b * tau + a * (1 - tau))\n\n\n\"\"\"\nTo implement better exploration by the Actor network, we use noisy perturbations,\nspecifically\nan **Ornstein-Uhlenbeck process** for generating noise, as described in the paper.\nIt samples noise from a correlated normal distribution.\n\"\"\"\nclass OUActionNoise:\n def __init__(self, mean, std_deviation, theta=0.15, dt=1e-2, x_initial=None):\n self.theta = theta\n self.mean = mean\n self.std_dev = std_deviation\n self.dt = dt\n self.x_initial = x_initial\n self.reset()\n\n def __call__(self):\n # Formula taken from https://www.wikipedia.org/wiki/Ornstein-Uhlenbeck_process.\n x = (\n self.x_prev\n + self.theta * (self.mean - self.x_prev) * self.dt\n + self.std_dev * np.sqrt(self.dt) * np.random.normal(size=self.mean.shape)\n )\n # Store x into x_prev\n # Makes next noise dependent on current one\n self.x_prev = x\n return x\n\n def reset(self):\n if self.x_initial is not None:\n self.x_prev = self.x_initial\n else:\n self.x_prev = np.zeros_like(self.mean)\n\n\n\n",
"step-ids": [
8,
9,
10,
12,
13
]
}
|
[
8,
9,
10,
12,
13
] |
<|reserved_special_token_0|>
class _BaseNevergradOptimizer:
<|reserved_special_token_0|>
def __init__(self, method):
self.method = method
self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]
self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']
self.is_sequential = self.method in self.sequential_methods
if self.is_sequential:
seq_msg = '{} is a sequential method. batch size is set to 1'
cprint(seq_msg.format(self.method), 'y')
assert self.method in self.valid_methods, f'unknown nevergrad method: {self.method}'
self.ng_optimizers = {}
self._sampled = {}
return
@torch.no_grad()
def setup_ng(self, var_manager, budget):
"""
initializes NeverGrad optimizer.
Args
var_manager (VariableManger): instance of the variable manager
budget (int): number of optimization iteration.
"""
for var_name, var_dict in var_manager.variable_info.items():
if var_dict['grad_free'] is False:
continue
if type(var_dict['grad_free']) == tuple:
mu, sigma = var_dict['grad_free']
if mu is None:
mu = np.zeros(var_dict['shape'])
if sigma is None:
sigma = 1.0
cma_opt = CMA(mu, sigma=sigma)
else:
mu = np.zeros(var_dict['shape'])
sigma = 1.0
opt_fn = ng.optimizers.registry[self.method]
p = ng.p.Array(init=mu)
ng_opt = opt_fn(parametrization=p, budget=budget)
self.ng_optimizers[var_dict['var_type'], var_name] = ng_opt
assert len(self.ng_optimizers.keys()
) == 1, 'currently only a single input variable can be optimized via ' + 'Nevergrad but got: {}'.format(
self.ng_optimizers.keys())
return
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class _BaseNevergradOptimizer:
<|reserved_special_token_0|>
def __init__(self, method):
self.method = method
self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]
self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']
self.is_sequential = self.method in self.sequential_methods
if self.is_sequential:
seq_msg = '{} is a sequential method. batch size is set to 1'
cprint(seq_msg.format(self.method), 'y')
assert self.method in self.valid_methods, f'unknown nevergrad method: {self.method}'
self.ng_optimizers = {}
self._sampled = {}
return
@torch.no_grad()
def setup_ng(self, var_manager, budget):
"""
initializes NeverGrad optimizer.
Args
var_manager (VariableManger): instance of the variable manager
budget (int): number of optimization iteration.
"""
for var_name, var_dict in var_manager.variable_info.items():
if var_dict['grad_free'] is False:
continue
if type(var_dict['grad_free']) == tuple:
mu, sigma = var_dict['grad_free']
if mu is None:
mu = np.zeros(var_dict['shape'])
if sigma is None:
sigma = 1.0
cma_opt = CMA(mu, sigma=sigma)
else:
mu = np.zeros(var_dict['shape'])
sigma = 1.0
opt_fn = ng.optimizers.registry[self.method]
p = ng.p.Array(init=mu)
ng_opt = opt_fn(parametrization=p, budget=budget)
self.ng_optimizers[var_dict['var_type'], var_name] = ng_opt
assert len(self.ng_optimizers.keys()
) == 1, 'currently only a single input variable can be optimized via ' + 'Nevergrad but got: {}'.format(
self.ng_optimizers.keys())
return
<|reserved_special_token_0|>
@torch.no_grad()
def ng_update(self, variables, loss=None, inverted_loss=False):
"""
Updates NG distribution either with the provided loss or loss that
is recomputed.
Args:
variables (dict): a dictionary instance generated from the
variable manager.
loss (array or list): a 1-dimensional array or list consisting of
losses corresponding to each sample. If the loss is not
provided, uses the variables to recompute the loss.
[Default: None]
inverted_loss (bool): if True, the loss is computed after inverting
the generated images back to the original target. For example
this is used to compute the loss on the original target.
[Default: False]
"""
for (var_type, var_name), ng_opt in self.ng_optimizers.items():
ng_data = self._sampled[var_type, var_name]
if loss is None:
out, loss, _ = self.step(variables, optimize=False)
if inverted_loss and hasattr(variables, 'transform'):
target_type = self.var_manager.variable_info['target'][
'var_type']
weight_type = self.var_manager.variable_info['weight'][
'var_type']
target = self.var_manager.variable_info['target']['default']
weight = self.var_manager.variable_info['weight']['default']
target = target.unsqueeze(0).type_as(out)
weight = weight.unsqueeze(0).type_as(out)
t_fn = self.transform_fns['target']['fn']
t_param = torch.stack(variables.transform.t.data)
out = t_fn(out, t_param, invert=True)
loss = self.loss_fn(out, target, binarize(weight))
loss = loss.cpu().detach().numpy()
for d, l in zip(ng_data, loss):
ng_opt.tell(d, l)
return
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class _BaseNevergradOptimizer:
<|reserved_special_token_0|>
def __init__(self, method):
self.method = method
self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]
self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']
self.is_sequential = self.method in self.sequential_methods
if self.is_sequential:
seq_msg = '{} is a sequential method. batch size is set to 1'
cprint(seq_msg.format(self.method), 'y')
assert self.method in self.valid_methods, f'unknown nevergrad method: {self.method}'
self.ng_optimizers = {}
self._sampled = {}
return
@torch.no_grad()
def setup_ng(self, var_manager, budget):
"""
initializes NeverGrad optimizer.
Args
var_manager (VariableManger): instance of the variable manager
budget (int): number of optimization iteration.
"""
for var_name, var_dict in var_manager.variable_info.items():
if var_dict['grad_free'] is False:
continue
if type(var_dict['grad_free']) == tuple:
mu, sigma = var_dict['grad_free']
if mu is None:
mu = np.zeros(var_dict['shape'])
if sigma is None:
sigma = 1.0
cma_opt = CMA(mu, sigma=sigma)
else:
mu = np.zeros(var_dict['shape'])
sigma = 1.0
opt_fn = ng.optimizers.registry[self.method]
p = ng.p.Array(init=mu)
ng_opt = opt_fn(parametrization=p, budget=budget)
self.ng_optimizers[var_dict['var_type'], var_name] = ng_opt
assert len(self.ng_optimizers.keys()
) == 1, 'currently only a single input variable can be optimized via ' + 'Nevergrad but got: {}'.format(
self.ng_optimizers.keys())
return
@torch.no_grad()
def ng_init(self, var_manager, num_samples):
"""
Args
var_manager (VariableManger): instance of the variable manager
num_samples (int): number of samples for mini-batch optimization
"""
if self.is_sequential:
vars = var_manager.initialize(num_seeds=1)
num_samples = 1
else:
vars = var_manager.initialize(num_samples=num_samples)
for (var_type, var_name), ng_opt in self.ng_optimizers.items():
ng_data = [ng_opt.ask() for _ in range(num_samples)]
_ng_data = np.concatenate([x.args for x in ng_data])
for i, d in enumerate(_ng_data):
vars[var_type][var_name].data[i].data = torch.Tensor(d
).data.type_as(vars[var_type][var_name].data[i].data)
self._sampled[var_type, var_name] = ng_data
return vars
@torch.no_grad()
def ng_update(self, variables, loss=None, inverted_loss=False):
"""
Updates NG distribution either with the provided loss or loss that
is recomputed.
Args:
variables (dict): a dictionary instance generated from the
variable manager.
loss (array or list): a 1-dimensional array or list consisting of
losses corresponding to each sample. If the loss is not
provided, uses the variables to recompute the loss.
[Default: None]
inverted_loss (bool): if True, the loss is computed after inverting
the generated images back to the original target. For example
this is used to compute the loss on the original target.
[Default: False]
"""
for (var_type, var_name), ng_opt in self.ng_optimizers.items():
ng_data = self._sampled[var_type, var_name]
if loss is None:
out, loss, _ = self.step(variables, optimize=False)
if inverted_loss and hasattr(variables, 'transform'):
target_type = self.var_manager.variable_info['target'][
'var_type']
weight_type = self.var_manager.variable_info['weight'][
'var_type']
target = self.var_manager.variable_info['target']['default']
weight = self.var_manager.variable_info['weight']['default']
target = target.unsqueeze(0).type_as(out)
weight = weight.unsqueeze(0).type_as(out)
t_fn = self.transform_fns['target']['fn']
t_param = torch.stack(variables.transform.t.data)
out = t_fn(out, t_param, invert=True)
loss = self.loss_fn(out, target, binarize(weight))
loss = loss.cpu().detach().numpy()
for d, l in zip(ng_data, loss):
ng_opt.tell(d, l)
return
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class _BaseNevergradOptimizer:
"""
Base template for NeverGrad optimization. Should be used jointly with
BaseOptimizer.
For full list of available optimizers
> https://github.com/facebookresearch/nevergrad
or ...
> print(self.valid_methods)
Args:
method: nevergrad optimization method
NOTE:
nevergrad CMA have been observed to perform wrose than the original
codebase. use with warning. nevergrad has a perk of being optimized
in parallel, hence batch-size can be arbitrarily chosen.
"""
def __init__(self, method):
self.method = method
self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]
self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']
self.is_sequential = self.method in self.sequential_methods
if self.is_sequential:
seq_msg = '{} is a sequential method. batch size is set to 1'
cprint(seq_msg.format(self.method), 'y')
assert self.method in self.valid_methods, f'unknown nevergrad method: {self.method}'
self.ng_optimizers = {}
self._sampled = {}
return
@torch.no_grad()
def setup_ng(self, var_manager, budget):
"""
initializes NeverGrad optimizer.
Args
var_manager (VariableManger): instance of the variable manager
budget (int): number of optimization iteration.
"""
for var_name, var_dict in var_manager.variable_info.items():
if var_dict['grad_free'] is False:
continue
if type(var_dict['grad_free']) == tuple:
mu, sigma = var_dict['grad_free']
if mu is None:
mu = np.zeros(var_dict['shape'])
if sigma is None:
sigma = 1.0
cma_opt = CMA(mu, sigma=sigma)
else:
mu = np.zeros(var_dict['shape'])
sigma = 1.0
opt_fn = ng.optimizers.registry[self.method]
p = ng.p.Array(init=mu)
ng_opt = opt_fn(parametrization=p, budget=budget)
self.ng_optimizers[var_dict['var_type'], var_name] = ng_opt
assert len(self.ng_optimizers.keys()
) == 1, 'currently only a single input variable can be optimized via ' + 'Nevergrad but got: {}'.format(
self.ng_optimizers.keys())
return
@torch.no_grad()
def ng_init(self, var_manager, num_samples):
"""
Args
var_manager (VariableManger): instance of the variable manager
num_samples (int): number of samples for mini-batch optimization
"""
if self.is_sequential:
vars = var_manager.initialize(num_seeds=1)
num_samples = 1
else:
vars = var_manager.initialize(num_samples=num_samples)
for (var_type, var_name), ng_opt in self.ng_optimizers.items():
ng_data = [ng_opt.ask() for _ in range(num_samples)]
_ng_data = np.concatenate([x.args for x in ng_data])
for i, d in enumerate(_ng_data):
vars[var_type][var_name].data[i].data = torch.Tensor(d
).data.type_as(vars[var_type][var_name].data[i].data)
self._sampled[var_type, var_name] = ng_data
return vars
@torch.no_grad()
def ng_update(self, variables, loss=None, inverted_loss=False):
"""
Updates NG distribution either with the provided loss or loss that
is recomputed.
Args:
variables (dict): a dictionary instance generated from the
variable manager.
loss (array or list): a 1-dimensional array or list consisting of
losses corresponding to each sample. If the loss is not
provided, uses the variables to recompute the loss.
[Default: None]
inverted_loss (bool): if True, the loss is computed after inverting
the generated images back to the original target. For example
this is used to compute the loss on the original target.
[Default: False]
"""
for (var_type, var_name), ng_opt in self.ng_optimizers.items():
ng_data = self._sampled[var_type, var_name]
if loss is None:
out, loss, _ = self.step(variables, optimize=False)
if inverted_loss and hasattr(variables, 'transform'):
target_type = self.var_manager.variable_info['target'][
'var_type']
weight_type = self.var_manager.variable_info['weight'][
'var_type']
target = self.var_manager.variable_info['target']['default']
weight = self.var_manager.variable_info['weight']['default']
target = target.unsqueeze(0).type_as(out)
weight = weight.unsqueeze(0).type_as(out)
t_fn = self.transform_fns['target']['fn']
t_param = torch.stack(variables.transform.t.data)
out = t_fn(out, t_param, invert=True)
loss = self.loss_fn(out, target, binarize(weight))
loss = loss.cpu().detach().numpy()
for d, l in zip(ng_data, loss):
ng_opt.tell(d, l)
return
<|reserved_special_token_1|>
import nevergrad as ng
import numpy as np
import torch
from pix2latent.utils.image import binarize
class _BaseNevergradOptimizer():
"""
Base template for NeverGrad optimization. Should be used jointly with
BaseOptimizer.
For full list of available optimizers
> https://github.com/facebookresearch/nevergrad
or ...
> print(self.valid_methods)
Args:
method: nevergrad optimization method
NOTE:
nevergrad CMA have been observed to perform wrose than the original
codebase. use with warning. nevergrad has a perk of being optimized
in parallel, hence batch-size can be arbitrarily chosen.
"""
def __init__(self, method):
self.method = method
self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]
# this is not an exhaustive list
self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']
self.is_sequential = self.method in self.sequential_methods
if self.is_sequential:
seq_msg = '{} is a sequential method. batch size is set to 1'
cprint(seq_msg.format(self.method), 'y')
assert self.method in self.valid_methods, \
f'unknown nevergrad method: {self.method}'
self.ng_optimizers = {}
self._sampled = {}
return
@torch.no_grad()
def setup_ng(self, var_manager, budget):
"""
initializes NeverGrad optimizer.
Args
var_manager (VariableManger): instance of the variable manager
budget (int): number of optimization iteration.
"""
for var_name, var_dict in var_manager.variable_info.items():
if var_dict['grad_free'] is False:
continue
if type(var_dict['grad_free']) == tuple:
mu, sigma = var_dict['grad_free']
if mu is None:
mu = np.zeros(var_dict['shape'])
if sigma is None:
sigma = 1.
cma_opt = CMA(mu, sigma=sigma)
else:
mu = np.zeros(var_dict['shape'])
sigma = 1.0
opt_fn = ng.optimizers.registry[self.method]
p = ng.p.Array(init=mu)#.set_mutation(sigma=sigma)
ng_opt = opt_fn(parametrization=p, budget=budget)
self.ng_optimizers[(var_dict['var_type'], var_name)] = ng_opt
assert len(self.ng_optimizers.keys()) == 1, \
'currently only a single input variable can be optimized via '+\
'Nevergrad but got: {}'.format(self.ng_optimizers.keys())
return
@torch.no_grad()
def ng_init(self, var_manager, num_samples):
"""
Args
var_manager (VariableManger): instance of the variable manager
num_samples (int): number of samples for mini-batch optimization
"""
if self.is_sequential:
vars = var_manager.initialize(num_seeds=1)
num_samples = 1
else:
vars = var_manager.initialize(num_samples=num_samples)
for (var_type, var_name), ng_opt in self.ng_optimizers.items():
ng_data = [ng_opt.ask() for _ in range(num_samples)]
_ng_data = np.concatenate([x.args for x in ng_data])
for i, d in enumerate(_ng_data):
vars[var_type][var_name].data[i].data = \
torch.Tensor(d).data.type_as(
vars[var_type][var_name].data[i].data)
self._sampled[(var_type, var_name)] = ng_data
return vars
@torch.no_grad()
def ng_update(self, variables, loss=None, inverted_loss=False):
"""
Updates NG distribution either with the provided loss or loss that
is recomputed.
Args:
variables (dict): a dictionary instance generated from the
variable manager.
loss (array or list): a 1-dimensional array or list consisting of
losses corresponding to each sample. If the loss is not
provided, uses the variables to recompute the loss.
[Default: None]
inverted_loss (bool): if True, the loss is computed after inverting
the generated images back to the original target. For example
this is used to compute the loss on the original target.
[Default: False]
"""
for (var_type, var_name), ng_opt in self.ng_optimizers.items():
ng_data = self._sampled[(var_type, var_name)]
if loss is None:
out, loss, _ = self.step(variables, optimize=False)
if inverted_loss and hasattr(variables, 'transform'):
target_type = \
self.var_manager.variable_info['target']['var_type']
weight_type = \
self.var_manager.variable_info['weight']['var_type']
target = self.var_manager.variable_info['target']['default']
weight = self.var_manager.variable_info['weight']['default']
target = target.unsqueeze(0).type_as(out)
weight = weight.unsqueeze(0).type_as(out)
t_fn = self.transform_fns['target']['fn']
t_param = torch.stack(variables.transform.t.data)
out = t_fn(out, t_param, invert=True)
loss = self.loss_fn(out, target, binarize(weight))
loss = loss.cpu().detach().numpy()
for d, l in zip(ng_data, loss):
ng_opt.tell(d, l)
return
|
flexible
|
{
"blob_id": "4a136a6284add3bcbd7f9546e18e79151cea685f",
"index": 623,
"step-1": "<mask token>\n\n\nclass _BaseNevergradOptimizer:\n <mask token>\n\n def __init__(self, method):\n self.method = method\n self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]\n self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']\n self.is_sequential = self.method in self.sequential_methods\n if self.is_sequential:\n seq_msg = '{} is a sequential method. batch size is set to 1'\n cprint(seq_msg.format(self.method), 'y')\n assert self.method in self.valid_methods, f'unknown nevergrad method: {self.method}'\n self.ng_optimizers = {}\n self._sampled = {}\n return\n\n @torch.no_grad()\n def setup_ng(self, var_manager, budget):\n \"\"\"\n initializes NeverGrad optimizer.\n\n Args\n var_manager (VariableManger): instance of the variable manager\n budget (int): number of optimization iteration.\n \"\"\"\n for var_name, var_dict in var_manager.variable_info.items():\n if var_dict['grad_free'] is False:\n continue\n if type(var_dict['grad_free']) == tuple:\n mu, sigma = var_dict['grad_free']\n if mu is None:\n mu = np.zeros(var_dict['shape'])\n if sigma is None:\n sigma = 1.0\n cma_opt = CMA(mu, sigma=sigma)\n else:\n mu = np.zeros(var_dict['shape'])\n sigma = 1.0\n opt_fn = ng.optimizers.registry[self.method]\n p = ng.p.Array(init=mu)\n ng_opt = opt_fn(parametrization=p, budget=budget)\n self.ng_optimizers[var_dict['var_type'], var_name] = ng_opt\n assert len(self.ng_optimizers.keys()\n ) == 1, 'currently only a single input variable can be optimized via ' + 'Nevergrad but got: {}'.format(\n self.ng_optimizers.keys())\n return\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass _BaseNevergradOptimizer:\n <mask token>\n\n def __init__(self, method):\n self.method = method\n self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]\n self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']\n self.is_sequential = self.method in self.sequential_methods\n if self.is_sequential:\n seq_msg = '{} is a sequential method. batch size is set to 1'\n cprint(seq_msg.format(self.method), 'y')\n assert self.method in self.valid_methods, f'unknown nevergrad method: {self.method}'\n self.ng_optimizers = {}\n self._sampled = {}\n return\n\n @torch.no_grad()\n def setup_ng(self, var_manager, budget):\n \"\"\"\n initializes NeverGrad optimizer.\n\n Args\n var_manager (VariableManger): instance of the variable manager\n budget (int): number of optimization iteration.\n \"\"\"\n for var_name, var_dict in var_manager.variable_info.items():\n if var_dict['grad_free'] is False:\n continue\n if type(var_dict['grad_free']) == tuple:\n mu, sigma = var_dict['grad_free']\n if mu is None:\n mu = np.zeros(var_dict['shape'])\n if sigma is None:\n sigma = 1.0\n cma_opt = CMA(mu, sigma=sigma)\n else:\n mu = np.zeros(var_dict['shape'])\n sigma = 1.0\n opt_fn = ng.optimizers.registry[self.method]\n p = ng.p.Array(init=mu)\n ng_opt = opt_fn(parametrization=p, budget=budget)\n self.ng_optimizers[var_dict['var_type'], var_name] = ng_opt\n assert len(self.ng_optimizers.keys()\n ) == 1, 'currently only a single input variable can be optimized via ' + 'Nevergrad but got: {}'.format(\n self.ng_optimizers.keys())\n return\n <mask token>\n\n @torch.no_grad()\n def ng_update(self, variables, loss=None, inverted_loss=False):\n \"\"\"\n Updates NG distribution either with the provided loss or loss that\n is recomputed.\n\n Args:\n variables (dict): a dictionary instance generated from the\n variable manager.\n loss (array or list): a 1-dimensional array or list consisting of\n losses corresponding to each sample. If the loss is not\n provided, uses the variables to recompute the loss.\n [Default: None]\n inverted_loss (bool): if True, the loss is computed after inverting\n the generated images back to the original target. For example\n this is used to compute the loss on the original target.\n [Default: False]\n \"\"\"\n for (var_type, var_name), ng_opt in self.ng_optimizers.items():\n ng_data = self._sampled[var_type, var_name]\n if loss is None:\n out, loss, _ = self.step(variables, optimize=False)\n if inverted_loss and hasattr(variables, 'transform'):\n target_type = self.var_manager.variable_info['target'][\n 'var_type']\n weight_type = self.var_manager.variable_info['weight'][\n 'var_type']\n target = self.var_manager.variable_info['target']['default']\n weight = self.var_manager.variable_info['weight']['default']\n target = target.unsqueeze(0).type_as(out)\n weight = weight.unsqueeze(0).type_as(out)\n t_fn = self.transform_fns['target']['fn']\n t_param = torch.stack(variables.transform.t.data)\n out = t_fn(out, t_param, invert=True)\n loss = self.loss_fn(out, target, binarize(weight))\n loss = loss.cpu().detach().numpy()\n for d, l in zip(ng_data, loss):\n ng_opt.tell(d, l)\n return\n",
"step-3": "<mask token>\n\n\nclass _BaseNevergradOptimizer:\n <mask token>\n\n def __init__(self, method):\n self.method = method\n self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]\n self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']\n self.is_sequential = self.method in self.sequential_methods\n if self.is_sequential:\n seq_msg = '{} is a sequential method. batch size is set to 1'\n cprint(seq_msg.format(self.method), 'y')\n assert self.method in self.valid_methods, f'unknown nevergrad method: {self.method}'\n self.ng_optimizers = {}\n self._sampled = {}\n return\n\n @torch.no_grad()\n def setup_ng(self, var_manager, budget):\n \"\"\"\n initializes NeverGrad optimizer.\n\n Args\n var_manager (VariableManger): instance of the variable manager\n budget (int): number of optimization iteration.\n \"\"\"\n for var_name, var_dict in var_manager.variable_info.items():\n if var_dict['grad_free'] is False:\n continue\n if type(var_dict['grad_free']) == tuple:\n mu, sigma = var_dict['grad_free']\n if mu is None:\n mu = np.zeros(var_dict['shape'])\n if sigma is None:\n sigma = 1.0\n cma_opt = CMA(mu, sigma=sigma)\n else:\n mu = np.zeros(var_dict['shape'])\n sigma = 1.0\n opt_fn = ng.optimizers.registry[self.method]\n p = ng.p.Array(init=mu)\n ng_opt = opt_fn(parametrization=p, budget=budget)\n self.ng_optimizers[var_dict['var_type'], var_name] = ng_opt\n assert len(self.ng_optimizers.keys()\n ) == 1, 'currently only a single input variable can be optimized via ' + 'Nevergrad but got: {}'.format(\n self.ng_optimizers.keys())\n return\n\n @torch.no_grad()\n def ng_init(self, var_manager, num_samples):\n \"\"\"\n Args\n var_manager (VariableManger): instance of the variable manager\n num_samples (int): number of samples for mini-batch optimization\n \"\"\"\n if self.is_sequential:\n vars = var_manager.initialize(num_seeds=1)\n num_samples = 1\n else:\n vars = var_manager.initialize(num_samples=num_samples)\n for (var_type, var_name), ng_opt in self.ng_optimizers.items():\n ng_data = [ng_opt.ask() for _ in range(num_samples)]\n _ng_data = np.concatenate([x.args for x in ng_data])\n for i, d in enumerate(_ng_data):\n vars[var_type][var_name].data[i].data = torch.Tensor(d\n ).data.type_as(vars[var_type][var_name].data[i].data)\n self._sampled[var_type, var_name] = ng_data\n return vars\n\n @torch.no_grad()\n def ng_update(self, variables, loss=None, inverted_loss=False):\n \"\"\"\n Updates NG distribution either with the provided loss or loss that\n is recomputed.\n\n Args:\n variables (dict): a dictionary instance generated from the\n variable manager.\n loss (array or list): a 1-dimensional array or list consisting of\n losses corresponding to each sample. If the loss is not\n provided, uses the variables to recompute the loss.\n [Default: None]\n inverted_loss (bool): if True, the loss is computed after inverting\n the generated images back to the original target. For example\n this is used to compute the loss on the original target.\n [Default: False]\n \"\"\"\n for (var_type, var_name), ng_opt in self.ng_optimizers.items():\n ng_data = self._sampled[var_type, var_name]\n if loss is None:\n out, loss, _ = self.step(variables, optimize=False)\n if inverted_loss and hasattr(variables, 'transform'):\n target_type = self.var_manager.variable_info['target'][\n 'var_type']\n weight_type = self.var_manager.variable_info['weight'][\n 'var_type']\n target = self.var_manager.variable_info['target']['default']\n weight = self.var_manager.variable_info['weight']['default']\n target = target.unsqueeze(0).type_as(out)\n weight = weight.unsqueeze(0).type_as(out)\n t_fn = self.transform_fns['target']['fn']\n t_param = torch.stack(variables.transform.t.data)\n out = t_fn(out, t_param, invert=True)\n loss = self.loss_fn(out, target, binarize(weight))\n loss = loss.cpu().detach().numpy()\n for d, l in zip(ng_data, loss):\n ng_opt.tell(d, l)\n return\n",
"step-4": "<mask token>\n\n\nclass _BaseNevergradOptimizer:\n \"\"\"\n Base template for NeverGrad optimization. Should be used jointly with\n BaseOptimizer.\n\n For full list of available optimizers\n > https://github.com/facebookresearch/nevergrad\n\n or ...\n > print(self.valid_methods)\n\n Args:\n method: nevergrad optimization method\n\n NOTE:\n nevergrad CMA have been observed to perform wrose than the original\n codebase. use with warning. nevergrad has a perk of being optimized\n in parallel, hence batch-size can be arbitrarily chosen.\n \"\"\"\n\n def __init__(self, method):\n self.method = method\n self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]\n self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']\n self.is_sequential = self.method in self.sequential_methods\n if self.is_sequential:\n seq_msg = '{} is a sequential method. batch size is set to 1'\n cprint(seq_msg.format(self.method), 'y')\n assert self.method in self.valid_methods, f'unknown nevergrad method: {self.method}'\n self.ng_optimizers = {}\n self._sampled = {}\n return\n\n @torch.no_grad()\n def setup_ng(self, var_manager, budget):\n \"\"\"\n initializes NeverGrad optimizer.\n\n Args\n var_manager (VariableManger): instance of the variable manager\n budget (int): number of optimization iteration.\n \"\"\"\n for var_name, var_dict in var_manager.variable_info.items():\n if var_dict['grad_free'] is False:\n continue\n if type(var_dict['grad_free']) == tuple:\n mu, sigma = var_dict['grad_free']\n if mu is None:\n mu = np.zeros(var_dict['shape'])\n if sigma is None:\n sigma = 1.0\n cma_opt = CMA(mu, sigma=sigma)\n else:\n mu = np.zeros(var_dict['shape'])\n sigma = 1.0\n opt_fn = ng.optimizers.registry[self.method]\n p = ng.p.Array(init=mu)\n ng_opt = opt_fn(parametrization=p, budget=budget)\n self.ng_optimizers[var_dict['var_type'], var_name] = ng_opt\n assert len(self.ng_optimizers.keys()\n ) == 1, 'currently only a single input variable can be optimized via ' + 'Nevergrad but got: {}'.format(\n self.ng_optimizers.keys())\n return\n\n @torch.no_grad()\n def ng_init(self, var_manager, num_samples):\n \"\"\"\n Args\n var_manager (VariableManger): instance of the variable manager\n num_samples (int): number of samples for mini-batch optimization\n \"\"\"\n if self.is_sequential:\n vars = var_manager.initialize(num_seeds=1)\n num_samples = 1\n else:\n vars = var_manager.initialize(num_samples=num_samples)\n for (var_type, var_name), ng_opt in self.ng_optimizers.items():\n ng_data = [ng_opt.ask() for _ in range(num_samples)]\n _ng_data = np.concatenate([x.args for x in ng_data])\n for i, d in enumerate(_ng_data):\n vars[var_type][var_name].data[i].data = torch.Tensor(d\n ).data.type_as(vars[var_type][var_name].data[i].data)\n self._sampled[var_type, var_name] = ng_data\n return vars\n\n @torch.no_grad()\n def ng_update(self, variables, loss=None, inverted_loss=False):\n \"\"\"\n Updates NG distribution either with the provided loss or loss that\n is recomputed.\n\n Args:\n variables (dict): a dictionary instance generated from the\n variable manager.\n loss (array or list): a 1-dimensional array or list consisting of\n losses corresponding to each sample. If the loss is not\n provided, uses the variables to recompute the loss.\n [Default: None]\n inverted_loss (bool): if True, the loss is computed after inverting\n the generated images back to the original target. For example\n this is used to compute the loss on the original target.\n [Default: False]\n \"\"\"\n for (var_type, var_name), ng_opt in self.ng_optimizers.items():\n ng_data = self._sampled[var_type, var_name]\n if loss is None:\n out, loss, _ = self.step(variables, optimize=False)\n if inverted_loss and hasattr(variables, 'transform'):\n target_type = self.var_manager.variable_info['target'][\n 'var_type']\n weight_type = self.var_manager.variable_info['weight'][\n 'var_type']\n target = self.var_manager.variable_info['target']['default']\n weight = self.var_manager.variable_info['weight']['default']\n target = target.unsqueeze(0).type_as(out)\n weight = weight.unsqueeze(0).type_as(out)\n t_fn = self.transform_fns['target']['fn']\n t_param = torch.stack(variables.transform.t.data)\n out = t_fn(out, t_param, invert=True)\n loss = self.loss_fn(out, target, binarize(weight))\n loss = loss.cpu().detach().numpy()\n for d, l in zip(ng_data, loss):\n ng_opt.tell(d, l)\n return\n",
"step-5": "import nevergrad as ng\r\n\r\nimport numpy as np\r\nimport torch\r\n\r\nfrom pix2latent.utils.image import binarize\r\n\r\n\r\n\r\nclass _BaseNevergradOptimizer():\r\n \"\"\"\r\n Base template for NeverGrad optimization. Should be used jointly with\r\n BaseOptimizer.\r\n\r\n For full list of available optimizers\r\n > https://github.com/facebookresearch/nevergrad\r\n\r\n or ...\r\n > print(self.valid_methods)\r\n\r\n Args:\r\n method: nevergrad optimization method\r\n\r\n NOTE:\r\n nevergrad CMA have been observed to perform wrose than the original\r\n codebase. use with warning. nevergrad has a perk of being optimized\r\n in parallel, hence batch-size can be arbitrarily chosen.\r\n \"\"\"\r\n\r\n def __init__(self, method):\r\n\r\n self.method = method\r\n self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]\r\n\r\n # this is not an exhaustive list\r\n self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']\r\n self.is_sequential = self.method in self.sequential_methods\r\n\r\n if self.is_sequential:\r\n seq_msg = '{} is a sequential method. batch size is set to 1'\r\n cprint(seq_msg.format(self.method), 'y')\r\n\r\n assert self.method in self.valid_methods, \\\r\n f'unknown nevergrad method: {self.method}'\r\n\r\n self.ng_optimizers = {}\r\n self._sampled = {}\r\n return\r\n\r\n\r\n @torch.no_grad()\r\n def setup_ng(self, var_manager, budget):\r\n \"\"\"\r\n initializes NeverGrad optimizer.\r\n\r\n Args\r\n var_manager (VariableManger): instance of the variable manager\r\n budget (int): number of optimization iteration.\r\n \"\"\"\r\n\r\n for var_name, var_dict in var_manager.variable_info.items():\r\n\r\n if var_dict['grad_free'] is False:\r\n continue\r\n\r\n if type(var_dict['grad_free']) == tuple:\r\n mu, sigma = var_dict['grad_free']\r\n\r\n if mu is None:\r\n mu = np.zeros(var_dict['shape'])\r\n\r\n if sigma is None:\r\n sigma = 1.\r\n\r\n cma_opt = CMA(mu, sigma=sigma)\r\n\r\n else:\r\n mu = np.zeros(var_dict['shape'])\r\n sigma = 1.0\r\n\r\n opt_fn = ng.optimizers.registry[self.method]\r\n p = ng.p.Array(init=mu)#.set_mutation(sigma=sigma)\r\n ng_opt = opt_fn(parametrization=p, budget=budget)\r\n\r\n self.ng_optimizers[(var_dict['var_type'], var_name)] = ng_opt\r\n\r\n assert len(self.ng_optimizers.keys()) == 1, \\\r\n 'currently only a single input variable can be optimized via '+\\\r\n 'Nevergrad but got: {}'.format(self.ng_optimizers.keys())\r\n return\r\n\r\n\r\n @torch.no_grad()\r\n def ng_init(self, var_manager, num_samples):\r\n \"\"\"\r\n Args\r\n var_manager (VariableManger): instance of the variable manager\r\n num_samples (int): number of samples for mini-batch optimization\r\n \"\"\"\r\n if self.is_sequential:\r\n vars = var_manager.initialize(num_seeds=1)\r\n num_samples = 1\r\n else:\r\n vars = var_manager.initialize(num_samples=num_samples)\r\n\r\n for (var_type, var_name), ng_opt in self.ng_optimizers.items():\r\n ng_data = [ng_opt.ask() for _ in range(num_samples)]\r\n\r\n _ng_data = np.concatenate([x.args for x in ng_data])\r\n\r\n for i, d in enumerate(_ng_data):\r\n vars[var_type][var_name].data[i].data = \\\r\n torch.Tensor(d).data.type_as(\r\n vars[var_type][var_name].data[i].data)\r\n\r\n self._sampled[(var_type, var_name)] = ng_data\r\n\r\n return vars\r\n\r\n\r\n @torch.no_grad()\r\n def ng_update(self, variables, loss=None, inverted_loss=False):\r\n\r\n \"\"\"\r\n Updates NG distribution either with the provided loss or loss that\r\n is recomputed.\r\n\r\n Args:\r\n variables (dict): a dictionary instance generated from the\r\n variable manager.\r\n loss (array or list): a 1-dimensional array or list consisting of\r\n losses corresponding to each sample. If the loss is not\r\n provided, uses the variables to recompute the loss.\r\n [Default: None]\r\n inverted_loss (bool): if True, the loss is computed after inverting\r\n the generated images back to the original target. For example\r\n this is used to compute the loss on the original target.\r\n [Default: False]\r\n \"\"\"\r\n\r\n for (var_type, var_name), ng_opt in self.ng_optimizers.items():\r\n\r\n ng_data = self._sampled[(var_type, var_name)]\r\n\r\n if loss is None:\r\n out, loss, _ = self.step(variables, optimize=False)\r\n\r\n if inverted_loss and hasattr(variables, 'transform'):\r\n\r\n target_type = \\\r\n self.var_manager.variable_info['target']['var_type']\r\n weight_type = \\\r\n self.var_manager.variable_info['weight']['var_type']\r\n\r\n target = self.var_manager.variable_info['target']['default']\r\n weight = self.var_manager.variable_info['weight']['default']\r\n\r\n target = target.unsqueeze(0).type_as(out)\r\n weight = weight.unsqueeze(0).type_as(out)\r\n\r\n t_fn = self.transform_fns['target']['fn']\r\n t_param = torch.stack(variables.transform.t.data)\r\n out = t_fn(out, t_param, invert=True)\r\n\r\n loss = self.loss_fn(out, target, binarize(weight))\r\n loss = loss.cpu().detach().numpy()\r\n\r\n for d, l in zip(ng_data, loss):\r\n ng_opt.tell(d, l)\r\n\r\n return\r\n",
"step-ids": [
3,
4,
5,
6,
8
]
}
|
[
3,
4,
5,
6,
8
] |
from pathlib import Path
import eyed3
import csv
import sys
import filetype
import os
pathFile = Path(
'C:\\Users\\JORGE\\Music\\Vicente Garcia - Te Soñé (Lyric Video)(MP3_160K).mp3'
)
audiofile = eyed3.load(pathFile)
with open('loveMusic.csv', 'w', newline='') as csvFile:
fieldsName = ['nameFile', 'tittle', 'artist', 'gender', 'path']
writer = csv.DictWriter(csvFile, fieldnames=fieldsName)
writer.writeheader()
tittle = audiofile.tag.title.encode('ascii', 'ignore'
) if audiofile.tag.title is not None else ''
artist = audiofile.tag.artist.encode('ascii', 'ignore'
) if audiofile.tag.artist is not None else ''
gender = audiofile.tag.genre.name.encode('ascii', 'ignore'
) if audiofile.tag.genre is not None else ''
writer.writerow({'nameFile': 'nameFile', 'tittle': tittle, 'artist':
artist, 'gender': gender, 'path': str(pathFile).encode('ascii',
'ignore')})
|
normal
|
{
"blob_id": "629649abe9d855122a5db6d61a20735ceb89c5cf",
"index": 6426,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('loveMusic.csv', 'w', newline='') as csvFile:\n fieldsName = ['nameFile', 'tittle', 'artist', 'gender', 'path']\n writer = csv.DictWriter(csvFile, fieldnames=fieldsName)\n writer.writeheader()\n tittle = audiofile.tag.title.encode('ascii', 'ignore'\n ) if audiofile.tag.title is not None else ''\n artist = audiofile.tag.artist.encode('ascii', 'ignore'\n ) if audiofile.tag.artist is not None else ''\n gender = audiofile.tag.genre.name.encode('ascii', 'ignore'\n ) if audiofile.tag.genre is not None else ''\n writer.writerow({'nameFile': 'nameFile', 'tittle': tittle, 'artist':\n artist, 'gender': gender, 'path': str(pathFile).encode('ascii',\n 'ignore')})\n",
"step-3": "<mask token>\npathFile = Path(\n 'C:\\\\Users\\\\JORGE\\\\Music\\\\Vicente Garcia - Te Soñé (Lyric Video)(MP3_160K).mp3'\n )\naudiofile = eyed3.load(pathFile)\nwith open('loveMusic.csv', 'w', newline='') as csvFile:\n fieldsName = ['nameFile', 'tittle', 'artist', 'gender', 'path']\n writer = csv.DictWriter(csvFile, fieldnames=fieldsName)\n writer.writeheader()\n tittle = audiofile.tag.title.encode('ascii', 'ignore'\n ) if audiofile.tag.title is not None else ''\n artist = audiofile.tag.artist.encode('ascii', 'ignore'\n ) if audiofile.tag.artist is not None else ''\n gender = audiofile.tag.genre.name.encode('ascii', 'ignore'\n ) if audiofile.tag.genre is not None else ''\n writer.writerow({'nameFile': 'nameFile', 'tittle': tittle, 'artist':\n artist, 'gender': gender, 'path': str(pathFile).encode('ascii',\n 'ignore')})\n",
"step-4": "from pathlib import Path\nimport eyed3\nimport csv\nimport sys\nimport filetype\nimport os\npathFile = Path(\n 'C:\\\\Users\\\\JORGE\\\\Music\\\\Vicente Garcia - Te Soñé (Lyric Video)(MP3_160K).mp3'\n )\naudiofile = eyed3.load(pathFile)\nwith open('loveMusic.csv', 'w', newline='') as csvFile:\n fieldsName = ['nameFile', 'tittle', 'artist', 'gender', 'path']\n writer = csv.DictWriter(csvFile, fieldnames=fieldsName)\n writer.writeheader()\n tittle = audiofile.tag.title.encode('ascii', 'ignore'\n ) if audiofile.tag.title is not None else ''\n artist = audiofile.tag.artist.encode('ascii', 'ignore'\n ) if audiofile.tag.artist is not None else ''\n gender = audiofile.tag.genre.name.encode('ascii', 'ignore'\n ) if audiofile.tag.genre is not None else ''\n writer.writerow({'nameFile': 'nameFile', 'tittle': tittle, 'artist':\n artist, 'gender': gender, 'path': str(pathFile).encode('ascii',\n 'ignore')})\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from tkinter import *
import re
class Molecule:
def __init__(self, nom, poids, adn):
self.nom = nom
self.poids = poids
self.adn = adn
def __repr__(self):
return "{} : {} g".format(self.nom, self.poids)
class Menu:
def __init__(self):
self.data = dict()
self.main = Tk()
self.main.title("Molécules")
self.main.config(bg="black")
self.main.minsize(210, 220)
self.mean = float
Button(self.main, width=14, bg="black", fg="white", text='Ajouter molécule', command=self.add_molecule).grid(
pady=10)
Button(self.main, width=14, bg="black", fg="white", text='Poids maximum', command=self.get_max).grid()
Button(self.main, width=14, bg="black", fg="white", text='Poids moyen', command=self.get_mean).grid(pady=10)
Button(self.main, bg="black", fg="white", text='Molécules au poids supérieur\nà la moyenne',
command=self.greater_than_mean).grid(padx=10)
self.io = Frame(self.main, bg="black")
Button(self.io, bg="black", fg="white", text='Importer', command=self.import_data).grid(row=1, column=1, padx=5)
Button(self.io, bg="black", fg="white", text='Exporter', command=self.export_data).grid(row=1, column=2, padx=5)
self.io.grid(pady=10)
self.dessein = Canvas(self.main, width=500, height=500)
self.y = 45
self.y2 = 50
self.left = self.dessein.create_oval(275, self.y, 200, self.y + 155, fill="deeppink2", outline="")
self.right = self.dessein.create_oval(225, self.y, 300, self.y + 155, fill="deeppink2", outline="")
self.corps = self.dessein.create_polygon(200, self.y2, 300, self.y2, 300, 400, 200, 400, fill="salmon1")
self.shadow1 = self.dessein.create_polygon(275, self.y2, 300, self.y2, 300, 400, 275, 400, fill="salmon2")
self.shadow2 = self.dessein.create_polygon(290, self.y2, 300, self.y2, 300, 400, 290, 400, fill="salmon3")
self.giggle = True
self.ball_left = self.dessein.create_oval(275, 345, 100, 445, fill="salmon1", outline="")
self.ball_right = self.dessein.create_oval(225, 345, 400, 445, fill="salmon1", outline="")
self.main.bind("<Down>", self.grow_penis)
self.dessein.grid(pady=10)
Button(self.main, width=14, bg="black", fg="white", text='Enlarge your penis !!!',
command=self.grow_penis).grid()
self.main.mainloop()
def grow_penis(self, event=None):
if self.y >= 0:
self.y -= 2
if self.y2 <= 75:
self.y2 += 1
self.dessein.coords(self.left, 275, self.y, 200, self.y + 155)
self.dessein.coords(self.right, 225, self.y, 300, self.y + 155)
self.dessein.coords(self.corps, 200, self.y2, 300, self.y2, 300, 400, 200, 400)
self.dessein.coords(self.shadow1, 275, self.y2, 300, self.y2, 300, 400, 275, 400)
self.dessein.coords(self.shadow2, 290, self.y2, 300, self.y2, 300, 400, 290, 400)
if self.giggle:
self.giggle = False
self.dessein.coords(self.ball_left, 275, 350, 100, 450)
self.dessein.coords(self.ball_right, 225, 350, 400, 450)
else:
self.giggle = True
self.dessein.coords(self.ball_left, 275, 345, 100, 445)
self.dessein.coords(self.ball_right, 225, 345, 400, 445)
def add_molecule(self):
GUIAdd(self)
def get_max(self):
GUIMax(self)
def get_mean(self):
GUIMean(self)
def greater_than_mean(self):
GUIGtm(self)
def calc_mean(self):
self.mean = sum([x['poids'] for x in self.data.values()]) / len(self.data.values())
def import_data(self):
with open('mols.txt', 'r') as input_file:
input_txt = input_file.readlines()
liste_name = input_txt[0].split()
liste_weight = [float(x) for x in input_txt[1].split()]
liste_adn = input_txt[2].split()
for i in range(len(liste_name)):
self.data[liste_name[i]] = {'poids': liste_weight[i], 'ADN': liste_adn[i]}
def export_data(self):
if len(self.data) > 0:
with open('mols.txt', 'w') as output:
valeurs = self.data.values()
liste_weight = [x['poids'] for x in valeurs]
liste_adn = [x['ADN'] for x in valeurs]
output.write(' '.join(self.data.keys()) + '\n')
output.write(' '.join([str(x) for x in liste_weight]) + '\n')
output.write(' '.join(liste_adn))
class GUIAdd:
def __init__(self, menu: Menu):
self.root = menu
self.gui = Toplevel(menu.main)
self.gui.title('Ajout de molécule')
self.gui.minsize(210, 100)
Label(self.gui, text='Nom de la molécule').pack()
self.mole_nom = Entry(self.gui)
self.mole_nom.pack()
Label(self.gui, text='Poids de la molécule').pack()
self.mole_poids = Entry(self.gui)
self.mole_poids.pack()
Label(self.gui, text='ADN de la molécule').pack()
self.mole_adn = Entry(self.gui)
self.mole_adn.pack()
Button(self.gui, text='Ajouter', command=self.close_gui).pack()
self.error = Label(self.gui, text="")
self.error.pack()
self.gui.mainloop()
def close_gui(self):
try:
if len(self.mole_nom.get()) > 0 and len(self.mole_poids.get()) > 0 and len(self.mole_adn.get()) > 0:
if self.mole_nom.get() not in self.root.data.keys():
if not re.search(r'[^ACGT]', self.mole_adn.get()):
self.root.data[self.mole_nom.get()] = {'poids': float(self.mole_poids.get()),
'ADN': self.mole_adn.get()}
else:
self.error['text'] = "Séquence d'ADN non réglementaire"
return
else:
self.error['text'] = "Molecule déjà existante dans les données"
return
else:
self.error['text'] = "Tous les champs ne sont pas remplis"
return
except ValueError:
self.error['text'] = "Poids doit être un float ou un int"
return
self.gui.destroy()
class GUIMax:
def __init__(self, menu: Menu):
maxi = 0
max_list = []
self.gui = Toplevel(menu.main)
self.gui.title('Molécule au poids maximal')
self.gui.minsize(210, 100)
for mol in menu.data:
if menu.data[mol]['poids'] > maxi:
maxi = menu.data[mol]['poids']
max_list = [mol]
elif menu.data[mol]['poids'] == maxi:
max_list.append(mol)
for mol in max_list:
Label(self.gui, text="{} : {} g".format(mol, menu.data[mol]["poids"])).pack()
self.gui.mainloop()
class GUIMean:
def __init__(self, menu: Menu):
self.gui = Toplevel(menu.main)
self.gui.title('Poids moyen')
self.gui.minsize(210, 100)
menu.calc_mean()
Label(self.gui, text="Poids moyen des molécules").pack()
Label(self.gui, text=menu.mean).pack()
self.gui.mainloop()
class GUIGtm:
def __init__(self, menu: Menu):
menu.calc_mean()
self.gui = Toplevel(menu.main)
self.gui.title('Molécule au poids supérieur à la moyenne')
self.gui.minsize(210, 100)
for mol in menu.data.keys():
if menu.data[mol]['poids'] >= menu.mean:
Label(self.gui, text="{} : {} g".format(mol, menu.data[mol]["poids"])).pack()
self.gui.mainloop()
def pascal(n: int):
prec = [1]
for i in range(1, n + 2):
print(' '.join([str(x) for x in prec]))
new = []
for j in range(i + 1):
if j == 0 or j == i:
new.append(1)
else:
new.append(prec[j] + prec[j - 1])
prec = new
Menu()
# pascal(50)
|
normal
|
{
"blob_id": "4d05e65dce9f689ae533a57466bc75fa24db7b4d",
"index": 4558,
"step-1": "<mask token>\n\n\nclass Menu:\n\n def __init__(self):\n self.data = dict()\n self.main = Tk()\n self.main.title('Molécules')\n self.main.config(bg='black')\n self.main.minsize(210, 220)\n self.mean = float\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Ajouter molécule', command=self.add_molecule).grid(pady=10)\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Poids maximum', command=self.get_max).grid()\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Poids moyen', command=self.get_mean).grid(pady=10)\n Button(self.main, bg='black', fg='white', text=\n 'Molécules au poids supérieur\\nà la moyenne', command=self.\n greater_than_mean).grid(padx=10)\n self.io = Frame(self.main, bg='black')\n Button(self.io, bg='black', fg='white', text='Importer', command=\n self.import_data).grid(row=1, column=1, padx=5)\n Button(self.io, bg='black', fg='white', text='Exporter', command=\n self.export_data).grid(row=1, column=2, padx=5)\n self.io.grid(pady=10)\n self.dessein = Canvas(self.main, width=500, height=500)\n self.y = 45\n self.y2 = 50\n self.left = self.dessein.create_oval(275, self.y, 200, self.y + 155,\n fill='deeppink2', outline='')\n self.right = self.dessein.create_oval(225, self.y, 300, self.y + \n 155, fill='deeppink2', outline='')\n self.corps = self.dessein.create_polygon(200, self.y2, 300, self.y2,\n 300, 400, 200, 400, fill='salmon1')\n self.shadow1 = self.dessein.create_polygon(275, self.y2, 300, self.\n y2, 300, 400, 275, 400, fill='salmon2')\n self.shadow2 = self.dessein.create_polygon(290, self.y2, 300, self.\n y2, 300, 400, 290, 400, fill='salmon3')\n self.giggle = True\n self.ball_left = self.dessein.create_oval(275, 345, 100, 445, fill=\n 'salmon1', outline='')\n self.ball_right = self.dessein.create_oval(225, 345, 400, 445, fill\n ='salmon1', outline='')\n self.main.bind('<Down>', self.grow_penis)\n self.dessein.grid(pady=10)\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Enlarge your penis !!!', command=self.grow_penis).grid()\n self.main.mainloop()\n\n def grow_penis(self, event=None):\n if self.y >= 0:\n self.y -= 2\n if self.y2 <= 75:\n self.y2 += 1\n self.dessein.coords(self.left, 275, self.y, 200, self.y + 155)\n self.dessein.coords(self.right, 225, self.y, 300, self.y + 155)\n self.dessein.coords(self.corps, 200, self.y2, 300, self.y2, 300, \n 400, 200, 400)\n self.dessein.coords(self.shadow1, 275, self.y2, 300, self.y2, 300, \n 400, 275, 400)\n self.dessein.coords(self.shadow2, 290, self.y2, 300, self.y2, 300, \n 400, 290, 400)\n if self.giggle:\n self.giggle = False\n self.dessein.coords(self.ball_left, 275, 350, 100, 450)\n self.dessein.coords(self.ball_right, 225, 350, 400, 450)\n else:\n self.giggle = True\n self.dessein.coords(self.ball_left, 275, 345, 100, 445)\n self.dessein.coords(self.ball_right, 225, 345, 400, 445)\n\n def add_molecule(self):\n GUIAdd(self)\n\n def get_max(self):\n GUIMax(self)\n\n def get_mean(self):\n GUIMean(self)\n <mask token>\n\n def calc_mean(self):\n self.mean = sum([x['poids'] for x in self.data.values()]) / len(self\n .data.values())\n <mask token>\n\n def export_data(self):\n if len(self.data) > 0:\n with open('mols.txt', 'w') as output:\n valeurs = self.data.values()\n liste_weight = [x['poids'] for x in valeurs]\n liste_adn = [x['ADN'] for x in valeurs]\n output.write(' '.join(self.data.keys()) + '\\n')\n output.write(' '.join([str(x) for x in liste_weight]) + '\\n')\n output.write(' '.join(liste_adn))\n\n\nclass GUIAdd:\n\n def __init__(self, menu: Menu):\n self.root = menu\n self.gui = Toplevel(menu.main)\n self.gui.title('Ajout de molécule')\n self.gui.minsize(210, 100)\n Label(self.gui, text='Nom de la molécule').pack()\n self.mole_nom = Entry(self.gui)\n self.mole_nom.pack()\n Label(self.gui, text='Poids de la molécule').pack()\n self.mole_poids = Entry(self.gui)\n self.mole_poids.pack()\n Label(self.gui, text='ADN de la molécule').pack()\n self.mole_adn = Entry(self.gui)\n self.mole_adn.pack()\n Button(self.gui, text='Ajouter', command=self.close_gui).pack()\n self.error = Label(self.gui, text='')\n self.error.pack()\n self.gui.mainloop()\n\n def close_gui(self):\n try:\n if len(self.mole_nom.get()) > 0 and len(self.mole_poids.get()\n ) > 0 and len(self.mole_adn.get()) > 0:\n if self.mole_nom.get() not in self.root.data.keys():\n if not re.search('[^ACGT]', self.mole_adn.get()):\n self.root.data[self.mole_nom.get()] = {'poids':\n float(self.mole_poids.get()), 'ADN': self.\n mole_adn.get()}\n else:\n self.error['text'] = \"Séquence d'ADN non réglementaire\"\n return\n else:\n self.error['text'\n ] = 'Molecule déjà existante dans les données'\n return\n else:\n self.error['text'] = 'Tous les champs ne sont pas remplis'\n return\n except ValueError:\n self.error['text'] = 'Poids doit être un float ou un int'\n return\n self.gui.destroy()\n\n\nclass GUIMax:\n\n def __init__(self, menu: Menu):\n maxi = 0\n max_list = []\n self.gui = Toplevel(menu.main)\n self.gui.title('Molécule au poids maximal')\n self.gui.minsize(210, 100)\n for mol in menu.data:\n if menu.data[mol]['poids'] > maxi:\n maxi = menu.data[mol]['poids']\n max_list = [mol]\n elif menu.data[mol]['poids'] == maxi:\n max_list.append(mol)\n for mol in max_list:\n Label(self.gui, text='{} : {} g'.format(mol, menu.data[mol][\n 'poids'])).pack()\n self.gui.mainloop()\n\n\nclass GUIMean:\n\n def __init__(self, menu: Menu):\n self.gui = Toplevel(menu.main)\n self.gui.title('Poids moyen')\n self.gui.minsize(210, 100)\n menu.calc_mean()\n Label(self.gui, text='Poids moyen des molécules').pack()\n Label(self.gui, text=menu.mean).pack()\n self.gui.mainloop()\n\n\nclass GUIGtm:\n\n def __init__(self, menu: Menu):\n menu.calc_mean()\n self.gui = Toplevel(menu.main)\n self.gui.title('Molécule au poids supérieur à la moyenne')\n self.gui.minsize(210, 100)\n for mol in menu.data.keys():\n if menu.data[mol]['poids'] >= menu.mean:\n Label(self.gui, text='{} : {} g'.format(mol, menu.data[mol]\n ['poids'])).pack()\n self.gui.mainloop()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Menu:\n\n def __init__(self):\n self.data = dict()\n self.main = Tk()\n self.main.title('Molécules')\n self.main.config(bg='black')\n self.main.minsize(210, 220)\n self.mean = float\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Ajouter molécule', command=self.add_molecule).grid(pady=10)\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Poids maximum', command=self.get_max).grid()\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Poids moyen', command=self.get_mean).grid(pady=10)\n Button(self.main, bg='black', fg='white', text=\n 'Molécules au poids supérieur\\nà la moyenne', command=self.\n greater_than_mean).grid(padx=10)\n self.io = Frame(self.main, bg='black')\n Button(self.io, bg='black', fg='white', text='Importer', command=\n self.import_data).grid(row=1, column=1, padx=5)\n Button(self.io, bg='black', fg='white', text='Exporter', command=\n self.export_data).grid(row=1, column=2, padx=5)\n self.io.grid(pady=10)\n self.dessein = Canvas(self.main, width=500, height=500)\n self.y = 45\n self.y2 = 50\n self.left = self.dessein.create_oval(275, self.y, 200, self.y + 155,\n fill='deeppink2', outline='')\n self.right = self.dessein.create_oval(225, self.y, 300, self.y + \n 155, fill='deeppink2', outline='')\n self.corps = self.dessein.create_polygon(200, self.y2, 300, self.y2,\n 300, 400, 200, 400, fill='salmon1')\n self.shadow1 = self.dessein.create_polygon(275, self.y2, 300, self.\n y2, 300, 400, 275, 400, fill='salmon2')\n self.shadow2 = self.dessein.create_polygon(290, self.y2, 300, self.\n y2, 300, 400, 290, 400, fill='salmon3')\n self.giggle = True\n self.ball_left = self.dessein.create_oval(275, 345, 100, 445, fill=\n 'salmon1', outline='')\n self.ball_right = self.dessein.create_oval(225, 345, 400, 445, fill\n ='salmon1', outline='')\n self.main.bind('<Down>', self.grow_penis)\n self.dessein.grid(pady=10)\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Enlarge your penis !!!', command=self.grow_penis).grid()\n self.main.mainloop()\n\n def grow_penis(self, event=None):\n if self.y >= 0:\n self.y -= 2\n if self.y2 <= 75:\n self.y2 += 1\n self.dessein.coords(self.left, 275, self.y, 200, self.y + 155)\n self.dessein.coords(self.right, 225, self.y, 300, self.y + 155)\n self.dessein.coords(self.corps, 200, self.y2, 300, self.y2, 300, \n 400, 200, 400)\n self.dessein.coords(self.shadow1, 275, self.y2, 300, self.y2, 300, \n 400, 275, 400)\n self.dessein.coords(self.shadow2, 290, self.y2, 300, self.y2, 300, \n 400, 290, 400)\n if self.giggle:\n self.giggle = False\n self.dessein.coords(self.ball_left, 275, 350, 100, 450)\n self.dessein.coords(self.ball_right, 225, 350, 400, 450)\n else:\n self.giggle = True\n self.dessein.coords(self.ball_left, 275, 345, 100, 445)\n self.dessein.coords(self.ball_right, 225, 345, 400, 445)\n\n def add_molecule(self):\n GUIAdd(self)\n\n def get_max(self):\n GUIMax(self)\n\n def get_mean(self):\n GUIMean(self)\n\n def greater_than_mean(self):\n GUIGtm(self)\n\n def calc_mean(self):\n self.mean = sum([x['poids'] for x in self.data.values()]) / len(self\n .data.values())\n <mask token>\n\n def export_data(self):\n if len(self.data) > 0:\n with open('mols.txt', 'w') as output:\n valeurs = self.data.values()\n liste_weight = [x['poids'] for x in valeurs]\n liste_adn = [x['ADN'] for x in valeurs]\n output.write(' '.join(self.data.keys()) + '\\n')\n output.write(' '.join([str(x) for x in liste_weight]) + '\\n')\n output.write(' '.join(liste_adn))\n\n\nclass GUIAdd:\n\n def __init__(self, menu: Menu):\n self.root = menu\n self.gui = Toplevel(menu.main)\n self.gui.title('Ajout de molécule')\n self.gui.minsize(210, 100)\n Label(self.gui, text='Nom de la molécule').pack()\n self.mole_nom = Entry(self.gui)\n self.mole_nom.pack()\n Label(self.gui, text='Poids de la molécule').pack()\n self.mole_poids = Entry(self.gui)\n self.mole_poids.pack()\n Label(self.gui, text='ADN de la molécule').pack()\n self.mole_adn = Entry(self.gui)\n self.mole_adn.pack()\n Button(self.gui, text='Ajouter', command=self.close_gui).pack()\n self.error = Label(self.gui, text='')\n self.error.pack()\n self.gui.mainloop()\n\n def close_gui(self):\n try:\n if len(self.mole_nom.get()) > 0 and len(self.mole_poids.get()\n ) > 0 and len(self.mole_adn.get()) > 0:\n if self.mole_nom.get() not in self.root.data.keys():\n if not re.search('[^ACGT]', self.mole_adn.get()):\n self.root.data[self.mole_nom.get()] = {'poids':\n float(self.mole_poids.get()), 'ADN': self.\n mole_adn.get()}\n else:\n self.error['text'] = \"Séquence d'ADN non réglementaire\"\n return\n else:\n self.error['text'\n ] = 'Molecule déjà existante dans les données'\n return\n else:\n self.error['text'] = 'Tous les champs ne sont pas remplis'\n return\n except ValueError:\n self.error['text'] = 'Poids doit être un float ou un int'\n return\n self.gui.destroy()\n\n\nclass GUIMax:\n\n def __init__(self, menu: Menu):\n maxi = 0\n max_list = []\n self.gui = Toplevel(menu.main)\n self.gui.title('Molécule au poids maximal')\n self.gui.minsize(210, 100)\n for mol in menu.data:\n if menu.data[mol]['poids'] > maxi:\n maxi = menu.data[mol]['poids']\n max_list = [mol]\n elif menu.data[mol]['poids'] == maxi:\n max_list.append(mol)\n for mol in max_list:\n Label(self.gui, text='{} : {} g'.format(mol, menu.data[mol][\n 'poids'])).pack()\n self.gui.mainloop()\n\n\nclass GUIMean:\n\n def __init__(self, menu: Menu):\n self.gui = Toplevel(menu.main)\n self.gui.title('Poids moyen')\n self.gui.minsize(210, 100)\n menu.calc_mean()\n Label(self.gui, text='Poids moyen des molécules').pack()\n Label(self.gui, text=menu.mean).pack()\n self.gui.mainloop()\n\n\nclass GUIGtm:\n\n def __init__(self, menu: Menu):\n menu.calc_mean()\n self.gui = Toplevel(menu.main)\n self.gui.title('Molécule au poids supérieur à la moyenne')\n self.gui.minsize(210, 100)\n for mol in menu.data.keys():\n if menu.data[mol]['poids'] >= menu.mean:\n Label(self.gui, text='{} : {} g'.format(mol, menu.data[mol]\n ['poids'])).pack()\n self.gui.mainloop()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Molecule:\n\n def __init__(self, nom, poids, adn):\n self.nom = nom\n self.poids = poids\n self.adn = adn\n\n def __repr__(self):\n return '{} : {} g'.format(self.nom, self.poids)\n\n\nclass Menu:\n\n def __init__(self):\n self.data = dict()\n self.main = Tk()\n self.main.title('Molécules')\n self.main.config(bg='black')\n self.main.minsize(210, 220)\n self.mean = float\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Ajouter molécule', command=self.add_molecule).grid(pady=10)\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Poids maximum', command=self.get_max).grid()\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Poids moyen', command=self.get_mean).grid(pady=10)\n Button(self.main, bg='black', fg='white', text=\n 'Molécules au poids supérieur\\nà la moyenne', command=self.\n greater_than_mean).grid(padx=10)\n self.io = Frame(self.main, bg='black')\n Button(self.io, bg='black', fg='white', text='Importer', command=\n self.import_data).grid(row=1, column=1, padx=5)\n Button(self.io, bg='black', fg='white', text='Exporter', command=\n self.export_data).grid(row=1, column=2, padx=5)\n self.io.grid(pady=10)\n self.dessein = Canvas(self.main, width=500, height=500)\n self.y = 45\n self.y2 = 50\n self.left = self.dessein.create_oval(275, self.y, 200, self.y + 155,\n fill='deeppink2', outline='')\n self.right = self.dessein.create_oval(225, self.y, 300, self.y + \n 155, fill='deeppink2', outline='')\n self.corps = self.dessein.create_polygon(200, self.y2, 300, self.y2,\n 300, 400, 200, 400, fill='salmon1')\n self.shadow1 = self.dessein.create_polygon(275, self.y2, 300, self.\n y2, 300, 400, 275, 400, fill='salmon2')\n self.shadow2 = self.dessein.create_polygon(290, self.y2, 300, self.\n y2, 300, 400, 290, 400, fill='salmon3')\n self.giggle = True\n self.ball_left = self.dessein.create_oval(275, 345, 100, 445, fill=\n 'salmon1', outline='')\n self.ball_right = self.dessein.create_oval(225, 345, 400, 445, fill\n ='salmon1', outline='')\n self.main.bind('<Down>', self.grow_penis)\n self.dessein.grid(pady=10)\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Enlarge your penis !!!', command=self.grow_penis).grid()\n self.main.mainloop()\n\n def grow_penis(self, event=None):\n if self.y >= 0:\n self.y -= 2\n if self.y2 <= 75:\n self.y2 += 1\n self.dessein.coords(self.left, 275, self.y, 200, self.y + 155)\n self.dessein.coords(self.right, 225, self.y, 300, self.y + 155)\n self.dessein.coords(self.corps, 200, self.y2, 300, self.y2, 300, \n 400, 200, 400)\n self.dessein.coords(self.shadow1, 275, self.y2, 300, self.y2, 300, \n 400, 275, 400)\n self.dessein.coords(self.shadow2, 290, self.y2, 300, self.y2, 300, \n 400, 290, 400)\n if self.giggle:\n self.giggle = False\n self.dessein.coords(self.ball_left, 275, 350, 100, 450)\n self.dessein.coords(self.ball_right, 225, 350, 400, 450)\n else:\n self.giggle = True\n self.dessein.coords(self.ball_left, 275, 345, 100, 445)\n self.dessein.coords(self.ball_right, 225, 345, 400, 445)\n\n def add_molecule(self):\n GUIAdd(self)\n\n def get_max(self):\n GUIMax(self)\n\n def get_mean(self):\n GUIMean(self)\n\n def greater_than_mean(self):\n GUIGtm(self)\n\n def calc_mean(self):\n self.mean = sum([x['poids'] for x in self.data.values()]) / len(self\n .data.values())\n\n def import_data(self):\n with open('mols.txt', 'r') as input_file:\n input_txt = input_file.readlines()\n liste_name = input_txt[0].split()\n liste_weight = [float(x) for x in input_txt[1].split()]\n liste_adn = input_txt[2].split()\n for i in range(len(liste_name)):\n self.data[liste_name[i]] = {'poids': liste_weight[i], 'ADN':\n liste_adn[i]}\n\n def export_data(self):\n if len(self.data) > 0:\n with open('mols.txt', 'w') as output:\n valeurs = self.data.values()\n liste_weight = [x['poids'] for x in valeurs]\n liste_adn = [x['ADN'] for x in valeurs]\n output.write(' '.join(self.data.keys()) + '\\n')\n output.write(' '.join([str(x) for x in liste_weight]) + '\\n')\n output.write(' '.join(liste_adn))\n\n\nclass GUIAdd:\n\n def __init__(self, menu: Menu):\n self.root = menu\n self.gui = Toplevel(menu.main)\n self.gui.title('Ajout de molécule')\n self.gui.minsize(210, 100)\n Label(self.gui, text='Nom de la molécule').pack()\n self.mole_nom = Entry(self.gui)\n self.mole_nom.pack()\n Label(self.gui, text='Poids de la molécule').pack()\n self.mole_poids = Entry(self.gui)\n self.mole_poids.pack()\n Label(self.gui, text='ADN de la molécule').pack()\n self.mole_adn = Entry(self.gui)\n self.mole_adn.pack()\n Button(self.gui, text='Ajouter', command=self.close_gui).pack()\n self.error = Label(self.gui, text='')\n self.error.pack()\n self.gui.mainloop()\n\n def close_gui(self):\n try:\n if len(self.mole_nom.get()) > 0 and len(self.mole_poids.get()\n ) > 0 and len(self.mole_adn.get()) > 0:\n if self.mole_nom.get() not in self.root.data.keys():\n if not re.search('[^ACGT]', self.mole_adn.get()):\n self.root.data[self.mole_nom.get()] = {'poids':\n float(self.mole_poids.get()), 'ADN': self.\n mole_adn.get()}\n else:\n self.error['text'] = \"Séquence d'ADN non réglementaire\"\n return\n else:\n self.error['text'\n ] = 'Molecule déjà existante dans les données'\n return\n else:\n self.error['text'] = 'Tous les champs ne sont pas remplis'\n return\n except ValueError:\n self.error['text'] = 'Poids doit être un float ou un int'\n return\n self.gui.destroy()\n\n\nclass GUIMax:\n\n def __init__(self, menu: Menu):\n maxi = 0\n max_list = []\n self.gui = Toplevel(menu.main)\n self.gui.title('Molécule au poids maximal')\n self.gui.minsize(210, 100)\n for mol in menu.data:\n if menu.data[mol]['poids'] > maxi:\n maxi = menu.data[mol]['poids']\n max_list = [mol]\n elif menu.data[mol]['poids'] == maxi:\n max_list.append(mol)\n for mol in max_list:\n Label(self.gui, text='{} : {} g'.format(mol, menu.data[mol][\n 'poids'])).pack()\n self.gui.mainloop()\n\n\nclass GUIMean:\n\n def __init__(self, menu: Menu):\n self.gui = Toplevel(menu.main)\n self.gui.title('Poids moyen')\n self.gui.minsize(210, 100)\n menu.calc_mean()\n Label(self.gui, text='Poids moyen des molécules').pack()\n Label(self.gui, text=menu.mean).pack()\n self.gui.mainloop()\n\n\nclass GUIGtm:\n\n def __init__(self, menu: Menu):\n menu.calc_mean()\n self.gui = Toplevel(menu.main)\n self.gui.title('Molécule au poids supérieur à la moyenne')\n self.gui.minsize(210, 100)\n for mol in menu.data.keys():\n if menu.data[mol]['poids'] >= menu.mean:\n Label(self.gui, text='{} : {} g'.format(mol, menu.data[mol]\n ['poids'])).pack()\n self.gui.mainloop()\n\n\ndef pascal(n: int):\n prec = [1]\n for i in range(1, n + 2):\n print(' '.join([str(x) for x in prec]))\n new = []\n for j in range(i + 1):\n if j == 0 or j == i:\n new.append(1)\n else:\n new.append(prec[j] + prec[j - 1])\n prec = new\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Molecule:\n\n def __init__(self, nom, poids, adn):\n self.nom = nom\n self.poids = poids\n self.adn = adn\n\n def __repr__(self):\n return '{} : {} g'.format(self.nom, self.poids)\n\n\nclass Menu:\n\n def __init__(self):\n self.data = dict()\n self.main = Tk()\n self.main.title('Molécules')\n self.main.config(bg='black')\n self.main.minsize(210, 220)\n self.mean = float\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Ajouter molécule', command=self.add_molecule).grid(pady=10)\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Poids maximum', command=self.get_max).grid()\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Poids moyen', command=self.get_mean).grid(pady=10)\n Button(self.main, bg='black', fg='white', text=\n 'Molécules au poids supérieur\\nà la moyenne', command=self.\n greater_than_mean).grid(padx=10)\n self.io = Frame(self.main, bg='black')\n Button(self.io, bg='black', fg='white', text='Importer', command=\n self.import_data).grid(row=1, column=1, padx=5)\n Button(self.io, bg='black', fg='white', text='Exporter', command=\n self.export_data).grid(row=1, column=2, padx=5)\n self.io.grid(pady=10)\n self.dessein = Canvas(self.main, width=500, height=500)\n self.y = 45\n self.y2 = 50\n self.left = self.dessein.create_oval(275, self.y, 200, self.y + 155,\n fill='deeppink2', outline='')\n self.right = self.dessein.create_oval(225, self.y, 300, self.y + \n 155, fill='deeppink2', outline='')\n self.corps = self.dessein.create_polygon(200, self.y2, 300, self.y2,\n 300, 400, 200, 400, fill='salmon1')\n self.shadow1 = self.dessein.create_polygon(275, self.y2, 300, self.\n y2, 300, 400, 275, 400, fill='salmon2')\n self.shadow2 = self.dessein.create_polygon(290, self.y2, 300, self.\n y2, 300, 400, 290, 400, fill='salmon3')\n self.giggle = True\n self.ball_left = self.dessein.create_oval(275, 345, 100, 445, fill=\n 'salmon1', outline='')\n self.ball_right = self.dessein.create_oval(225, 345, 400, 445, fill\n ='salmon1', outline='')\n self.main.bind('<Down>', self.grow_penis)\n self.dessein.grid(pady=10)\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Enlarge your penis !!!', command=self.grow_penis).grid()\n self.main.mainloop()\n\n def grow_penis(self, event=None):\n if self.y >= 0:\n self.y -= 2\n if self.y2 <= 75:\n self.y2 += 1\n self.dessein.coords(self.left, 275, self.y, 200, self.y + 155)\n self.dessein.coords(self.right, 225, self.y, 300, self.y + 155)\n self.dessein.coords(self.corps, 200, self.y2, 300, self.y2, 300, \n 400, 200, 400)\n self.dessein.coords(self.shadow1, 275, self.y2, 300, self.y2, 300, \n 400, 275, 400)\n self.dessein.coords(self.shadow2, 290, self.y2, 300, self.y2, 300, \n 400, 290, 400)\n if self.giggle:\n self.giggle = False\n self.dessein.coords(self.ball_left, 275, 350, 100, 450)\n self.dessein.coords(self.ball_right, 225, 350, 400, 450)\n else:\n self.giggle = True\n self.dessein.coords(self.ball_left, 275, 345, 100, 445)\n self.dessein.coords(self.ball_right, 225, 345, 400, 445)\n\n def add_molecule(self):\n GUIAdd(self)\n\n def get_max(self):\n GUIMax(self)\n\n def get_mean(self):\n GUIMean(self)\n\n def greater_than_mean(self):\n GUIGtm(self)\n\n def calc_mean(self):\n self.mean = sum([x['poids'] for x in self.data.values()]) / len(self\n .data.values())\n\n def import_data(self):\n with open('mols.txt', 'r') as input_file:\n input_txt = input_file.readlines()\n liste_name = input_txt[0].split()\n liste_weight = [float(x) for x in input_txt[1].split()]\n liste_adn = input_txt[2].split()\n for i in range(len(liste_name)):\n self.data[liste_name[i]] = {'poids': liste_weight[i], 'ADN':\n liste_adn[i]}\n\n def export_data(self):\n if len(self.data) > 0:\n with open('mols.txt', 'w') as output:\n valeurs = self.data.values()\n liste_weight = [x['poids'] for x in valeurs]\n liste_adn = [x['ADN'] for x in valeurs]\n output.write(' '.join(self.data.keys()) + '\\n')\n output.write(' '.join([str(x) for x in liste_weight]) + '\\n')\n output.write(' '.join(liste_adn))\n\n\nclass GUIAdd:\n\n def __init__(self, menu: Menu):\n self.root = menu\n self.gui = Toplevel(menu.main)\n self.gui.title('Ajout de molécule')\n self.gui.minsize(210, 100)\n Label(self.gui, text='Nom de la molécule').pack()\n self.mole_nom = Entry(self.gui)\n self.mole_nom.pack()\n Label(self.gui, text='Poids de la molécule').pack()\n self.mole_poids = Entry(self.gui)\n self.mole_poids.pack()\n Label(self.gui, text='ADN de la molécule').pack()\n self.mole_adn = Entry(self.gui)\n self.mole_adn.pack()\n Button(self.gui, text='Ajouter', command=self.close_gui).pack()\n self.error = Label(self.gui, text='')\n self.error.pack()\n self.gui.mainloop()\n\n def close_gui(self):\n try:\n if len(self.mole_nom.get()) > 0 and len(self.mole_poids.get()\n ) > 0 and len(self.mole_adn.get()) > 0:\n if self.mole_nom.get() not in self.root.data.keys():\n if not re.search('[^ACGT]', self.mole_adn.get()):\n self.root.data[self.mole_nom.get()] = {'poids':\n float(self.mole_poids.get()), 'ADN': self.\n mole_adn.get()}\n else:\n self.error['text'] = \"Séquence d'ADN non réglementaire\"\n return\n else:\n self.error['text'\n ] = 'Molecule déjà existante dans les données'\n return\n else:\n self.error['text'] = 'Tous les champs ne sont pas remplis'\n return\n except ValueError:\n self.error['text'] = 'Poids doit être un float ou un int'\n return\n self.gui.destroy()\n\n\nclass GUIMax:\n\n def __init__(self, menu: Menu):\n maxi = 0\n max_list = []\n self.gui = Toplevel(menu.main)\n self.gui.title('Molécule au poids maximal')\n self.gui.minsize(210, 100)\n for mol in menu.data:\n if menu.data[mol]['poids'] > maxi:\n maxi = menu.data[mol]['poids']\n max_list = [mol]\n elif menu.data[mol]['poids'] == maxi:\n max_list.append(mol)\n for mol in max_list:\n Label(self.gui, text='{} : {} g'.format(mol, menu.data[mol][\n 'poids'])).pack()\n self.gui.mainloop()\n\n\nclass GUIMean:\n\n def __init__(self, menu: Menu):\n self.gui = Toplevel(menu.main)\n self.gui.title('Poids moyen')\n self.gui.minsize(210, 100)\n menu.calc_mean()\n Label(self.gui, text='Poids moyen des molécules').pack()\n Label(self.gui, text=menu.mean).pack()\n self.gui.mainloop()\n\n\nclass GUIGtm:\n\n def __init__(self, menu: Menu):\n menu.calc_mean()\n self.gui = Toplevel(menu.main)\n self.gui.title('Molécule au poids supérieur à la moyenne')\n self.gui.minsize(210, 100)\n for mol in menu.data.keys():\n if menu.data[mol]['poids'] >= menu.mean:\n Label(self.gui, text='{} : {} g'.format(mol, menu.data[mol]\n ['poids'])).pack()\n self.gui.mainloop()\n\n\ndef pascal(n: int):\n prec = [1]\n for i in range(1, n + 2):\n print(' '.join([str(x) for x in prec]))\n new = []\n for j in range(i + 1):\n if j == 0 or j == i:\n new.append(1)\n else:\n new.append(prec[j] + prec[j - 1])\n prec = new\n\n\nMenu()\n",
"step-5": "from tkinter import *\nimport re\n\n\nclass Molecule:\n def __init__(self, nom, poids, adn):\n self.nom = nom\n self.poids = poids\n self.adn = adn\n\n def __repr__(self):\n return \"{} : {} g\".format(self.nom, self.poids)\n\n\nclass Menu:\n def __init__(self):\n self.data = dict()\n self.main = Tk()\n self.main.title(\"Molécules\")\n self.main.config(bg=\"black\")\n self.main.minsize(210, 220)\n self.mean = float\n Button(self.main, width=14, bg=\"black\", fg=\"white\", text='Ajouter molécule', command=self.add_molecule).grid(\n pady=10)\n Button(self.main, width=14, bg=\"black\", fg=\"white\", text='Poids maximum', command=self.get_max).grid()\n Button(self.main, width=14, bg=\"black\", fg=\"white\", text='Poids moyen', command=self.get_mean).grid(pady=10)\n Button(self.main, bg=\"black\", fg=\"white\", text='Molécules au poids supérieur\\nà la moyenne',\n command=self.greater_than_mean).grid(padx=10)\n\n self.io = Frame(self.main, bg=\"black\")\n Button(self.io, bg=\"black\", fg=\"white\", text='Importer', command=self.import_data).grid(row=1, column=1, padx=5)\n Button(self.io, bg=\"black\", fg=\"white\", text='Exporter', command=self.export_data).grid(row=1, column=2, padx=5)\n self.io.grid(pady=10)\n self.dessein = Canvas(self.main, width=500, height=500)\n self.y = 45\n self.y2 = 50\n\n self.left = self.dessein.create_oval(275, self.y, 200, self.y + 155, fill=\"deeppink2\", outline=\"\")\n self.right = self.dessein.create_oval(225, self.y, 300, self.y + 155, fill=\"deeppink2\", outline=\"\")\n self.corps = self.dessein.create_polygon(200, self.y2, 300, self.y2, 300, 400, 200, 400, fill=\"salmon1\")\n self.shadow1 = self.dessein.create_polygon(275, self.y2, 300, self.y2, 300, 400, 275, 400, fill=\"salmon2\")\n self.shadow2 = self.dessein.create_polygon(290, self.y2, 300, self.y2, 300, 400, 290, 400, fill=\"salmon3\")\n self.giggle = True\n self.ball_left = self.dessein.create_oval(275, 345, 100, 445, fill=\"salmon1\", outline=\"\")\n self.ball_right = self.dessein.create_oval(225, 345, 400, 445, fill=\"salmon1\", outline=\"\")\n self.main.bind(\"<Down>\", self.grow_penis)\n self.dessein.grid(pady=10)\n Button(self.main, width=14, bg=\"black\", fg=\"white\", text='Enlarge your penis !!!',\n command=self.grow_penis).grid()\n self.main.mainloop()\n\n def grow_penis(self, event=None):\n if self.y >= 0:\n self.y -= 2\n if self.y2 <= 75:\n self.y2 += 1\n self.dessein.coords(self.left, 275, self.y, 200, self.y + 155)\n self.dessein.coords(self.right, 225, self.y, 300, self.y + 155)\n self.dessein.coords(self.corps, 200, self.y2, 300, self.y2, 300, 400, 200, 400)\n self.dessein.coords(self.shadow1, 275, self.y2, 300, self.y2, 300, 400, 275, 400)\n self.dessein.coords(self.shadow2, 290, self.y2, 300, self.y2, 300, 400, 290, 400)\n if self.giggle:\n self.giggle = False\n self.dessein.coords(self.ball_left, 275, 350, 100, 450)\n self.dessein.coords(self.ball_right, 225, 350, 400, 450)\n else:\n self.giggle = True\n self.dessein.coords(self.ball_left, 275, 345, 100, 445)\n self.dessein.coords(self.ball_right, 225, 345, 400, 445)\n\n def add_molecule(self):\n GUIAdd(self)\n\n def get_max(self):\n GUIMax(self)\n\n def get_mean(self):\n GUIMean(self)\n\n def greater_than_mean(self):\n GUIGtm(self)\n\n def calc_mean(self):\n self.mean = sum([x['poids'] for x in self.data.values()]) / len(self.data.values())\n\n def import_data(self):\n with open('mols.txt', 'r') as input_file:\n input_txt = input_file.readlines()\n liste_name = input_txt[0].split()\n liste_weight = [float(x) for x in input_txt[1].split()]\n liste_adn = input_txt[2].split()\n for i in range(len(liste_name)):\n self.data[liste_name[i]] = {'poids': liste_weight[i], 'ADN': liste_adn[i]}\n\n def export_data(self):\n if len(self.data) > 0:\n with open('mols.txt', 'w') as output:\n valeurs = self.data.values()\n liste_weight = [x['poids'] for x in valeurs]\n liste_adn = [x['ADN'] for x in valeurs]\n\n output.write(' '.join(self.data.keys()) + '\\n')\n output.write(' '.join([str(x) for x in liste_weight]) + '\\n')\n output.write(' '.join(liste_adn))\n\n\nclass GUIAdd:\n def __init__(self, menu: Menu):\n self.root = menu\n self.gui = Toplevel(menu.main)\n self.gui.title('Ajout de molécule')\n self.gui.minsize(210, 100)\n\n Label(self.gui, text='Nom de la molécule').pack()\n self.mole_nom = Entry(self.gui)\n self.mole_nom.pack()\n\n Label(self.gui, text='Poids de la molécule').pack()\n self.mole_poids = Entry(self.gui)\n self.mole_poids.pack()\n\n Label(self.gui, text='ADN de la molécule').pack()\n self.mole_adn = Entry(self.gui)\n self.mole_adn.pack()\n\n Button(self.gui, text='Ajouter', command=self.close_gui).pack()\n self.error = Label(self.gui, text=\"\")\n self.error.pack()\n\n self.gui.mainloop()\n\n def close_gui(self):\n try:\n if len(self.mole_nom.get()) > 0 and len(self.mole_poids.get()) > 0 and len(self.mole_adn.get()) > 0:\n if self.mole_nom.get() not in self.root.data.keys():\n if not re.search(r'[^ACGT]', self.mole_adn.get()):\n self.root.data[self.mole_nom.get()] = {'poids': float(self.mole_poids.get()),\n 'ADN': self.mole_adn.get()}\n else:\n self.error['text'] = \"Séquence d'ADN non réglementaire\"\n return\n else:\n self.error['text'] = \"Molecule déjà existante dans les données\"\n return\n else:\n self.error['text'] = \"Tous les champs ne sont pas remplis\"\n return\n except ValueError:\n self.error['text'] = \"Poids doit être un float ou un int\"\n return\n self.gui.destroy()\n\n\nclass GUIMax:\n def __init__(self, menu: Menu):\n maxi = 0\n max_list = []\n self.gui = Toplevel(menu.main)\n self.gui.title('Molécule au poids maximal')\n self.gui.minsize(210, 100)\n for mol in menu.data:\n if menu.data[mol]['poids'] > maxi:\n maxi = menu.data[mol]['poids']\n max_list = [mol]\n elif menu.data[mol]['poids'] == maxi:\n max_list.append(mol)\n for mol in max_list:\n Label(self.gui, text=\"{} : {} g\".format(mol, menu.data[mol][\"poids\"])).pack()\n self.gui.mainloop()\n\n\nclass GUIMean:\n def __init__(self, menu: Menu):\n self.gui = Toplevel(menu.main)\n self.gui.title('Poids moyen')\n self.gui.minsize(210, 100)\n menu.calc_mean()\n Label(self.gui, text=\"Poids moyen des molécules\").pack()\n Label(self.gui, text=menu.mean).pack()\n\n self.gui.mainloop()\n\n\nclass GUIGtm:\n def __init__(self, menu: Menu):\n menu.calc_mean()\n self.gui = Toplevel(menu.main)\n self.gui.title('Molécule au poids supérieur à la moyenne')\n self.gui.minsize(210, 100)\n\n for mol in menu.data.keys():\n if menu.data[mol]['poids'] >= menu.mean:\n Label(self.gui, text=\"{} : {} g\".format(mol, menu.data[mol][\"poids\"])).pack()\n\n self.gui.mainloop()\n\n\ndef pascal(n: int):\n prec = [1]\n for i in range(1, n + 2):\n print(' '.join([str(x) for x in prec]))\n new = []\n for j in range(i + 1):\n if j == 0 or j == i:\n new.append(1)\n else:\n new.append(prec[j] + prec[j - 1])\n prec = new\n\n\nMenu()\n# pascal(50)\n",
"step-ids": [
17,
18,
23,
24,
26
]
}
|
[
17,
18,
23,
24,
26
] |
<|reserved_special_token_0|>
class REvolution:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_pop(self):
ids = ['x: {} => y: {}'.format('%.3f' % i.value[0], '%.3f' % self.
fitness(i.value)) for i in self.population.individuals]
return ids
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class REvolution:
def __init__(self, original_ind, combine_params, mutate_params, fitness,
pop_params, method):
self.population = Population(1, fitness, pop_params)
self.combine_params = combine_params
self.mutate_params = mutate_params
self.fitness = fitness
self.method = method
self.result = []
self.original_ind = original_ind
<|reserved_special_token_0|>
def run_1_1(self, epochs):
for ep in range(epochs):
pop_ind = self.population.individuals[0]
offspring = pop_ind.compare(self.original_ind, self.
combine_params, self.fitness)
offspring.mutate(self.mutate_params)
self.population.arrange_population([offspring])
print('Epoch {}: {}'.format(ep, self.get_pop()))
self.result.append(self.fitness(self.population.individuals[-1]
.value))
def get_pop(self):
ids = ['x: {} => y: {}'.format('%.3f' % i.value[0], '%.3f' % self.
fitness(i.value)) for i in self.population.individuals]
return ids
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class REvolution:
def __init__(self, original_ind, combine_params, mutate_params, fitness,
pop_params, method):
self.population = Population(1, fitness, pop_params)
self.combine_params = combine_params
self.mutate_params = mutate_params
self.fitness = fitness
self.method = method
self.result = []
self.original_ind = original_ind
def run_random(self, epochs):
for ep in range(epochs):
pop_ind = self.population.individuals[0]
offspring = pop_ind.compare(self.original_ind, self.
combine_params, self.fitness)
offspring.mutate_random(self.mutate_params)
self.population.arrange_population([offspring])
print('Epoch {}: {}'.format(ep, self.get_pop()))
self.result.append(self.fitness(self.population.individuals[-1]
.value))
def run_1_1(self, epochs):
for ep in range(epochs):
pop_ind = self.population.individuals[0]
offspring = pop_ind.compare(self.original_ind, self.
combine_params, self.fitness)
offspring.mutate(self.mutate_params)
self.population.arrange_population([offspring])
print('Epoch {}: {}'.format(ep, self.get_pop()))
self.result.append(self.fitness(self.population.individuals[-1]
.value))
def get_pop(self):
ids = ['x: {} => y: {}'.format('%.3f' % i.value[0], '%.3f' % self.
fitness(i.value)) for i in self.population.individuals]
return ids
<|reserved_special_token_1|>
from population import Population
class REvolution:
def __init__(self, original_ind, combine_params, mutate_params, fitness,
pop_params, method):
self.population = Population(1, fitness, pop_params)
self.combine_params = combine_params
self.mutate_params = mutate_params
self.fitness = fitness
self.method = method
self.result = []
self.original_ind = original_ind
def run_random(self, epochs):
for ep in range(epochs):
pop_ind = self.population.individuals[0]
offspring = pop_ind.compare(self.original_ind, self.
combine_params, self.fitness)
offspring.mutate_random(self.mutate_params)
self.population.arrange_population([offspring])
print('Epoch {}: {}'.format(ep, self.get_pop()))
self.result.append(self.fitness(self.population.individuals[-1]
.value))
def run_1_1(self, epochs):
for ep in range(epochs):
pop_ind = self.population.individuals[0]
offspring = pop_ind.compare(self.original_ind, self.
combine_params, self.fitness)
offspring.mutate(self.mutate_params)
self.population.arrange_population([offspring])
print('Epoch {}: {}'.format(ep, self.get_pop()))
self.result.append(self.fitness(self.population.individuals[-1]
.value))
def get_pop(self):
ids = ['x: {} => y: {}'.format('%.3f' % i.value[0], '%.3f' % self.
fitness(i.value)) for i in self.population.individuals]
return ids
<|reserved_special_token_1|>
from population import Population
class REvolution:
def __init__(self, original_ind, combine_params, mutate_params, fitness, pop_params, method):
self.population = Population(1, fitness, pop_params)
self.combine_params = combine_params
self.mutate_params = mutate_params
self.fitness = fitness
self.method = method
self.result = []
self.original_ind = original_ind
def run_random(self, epochs):
for ep in range(epochs):
pop_ind = self.population.individuals[0]
offspring = pop_ind.compare(self.original_ind, self.combine_params, self.fitness)
offspring.mutate_random(self.mutate_params)
self.population.arrange_population([offspring])
print("Epoch {}: {}".format(ep, self.get_pop()))
self.result.append(self.fitness(self.population.individuals[-1].value))
def run_1_1(self, epochs):
for ep in range(epochs):
pop_ind = self.population.individuals[0]
offspring = pop_ind.compare(self.original_ind, self.combine_params, self.fitness)
offspring.mutate(self.mutate_params)
self.population.arrange_population([offspring])
print("Epoch {}: {}".format(ep, self.get_pop()))
self.result.append(self.fitness(self.population.individuals[-1].value))
def get_pop(self):
ids = ["x: {} => y: {}".format("%.3f" % i.value[0], "%.3f" % self.fitness(i.value))
for i in self.population.individuals]
return ids
|
flexible
|
{
"blob_id": "fe13b57484e0f0796164fda99c0d759238a67153",
"index": 7215,
"step-1": "<mask token>\n\n\nclass REvolution:\n <mask token>\n <mask token>\n <mask token>\n\n def get_pop(self):\n ids = ['x: {} => y: {}'.format('%.3f' % i.value[0], '%.3f' % self.\n fitness(i.value)) for i in self.population.individuals]\n return ids\n",
"step-2": "<mask token>\n\n\nclass REvolution:\n\n def __init__(self, original_ind, combine_params, mutate_params, fitness,\n pop_params, method):\n self.population = Population(1, fitness, pop_params)\n self.combine_params = combine_params\n self.mutate_params = mutate_params\n self.fitness = fitness\n self.method = method\n self.result = []\n self.original_ind = original_ind\n <mask token>\n\n def run_1_1(self, epochs):\n for ep in range(epochs):\n pop_ind = self.population.individuals[0]\n offspring = pop_ind.compare(self.original_ind, self.\n combine_params, self.fitness)\n offspring.mutate(self.mutate_params)\n self.population.arrange_population([offspring])\n print('Epoch {}: {}'.format(ep, self.get_pop()))\n self.result.append(self.fitness(self.population.individuals[-1]\n .value))\n\n def get_pop(self):\n ids = ['x: {} => y: {}'.format('%.3f' % i.value[0], '%.3f' % self.\n fitness(i.value)) for i in self.population.individuals]\n return ids\n",
"step-3": "<mask token>\n\n\nclass REvolution:\n\n def __init__(self, original_ind, combine_params, mutate_params, fitness,\n pop_params, method):\n self.population = Population(1, fitness, pop_params)\n self.combine_params = combine_params\n self.mutate_params = mutate_params\n self.fitness = fitness\n self.method = method\n self.result = []\n self.original_ind = original_ind\n\n def run_random(self, epochs):\n for ep in range(epochs):\n pop_ind = self.population.individuals[0]\n offspring = pop_ind.compare(self.original_ind, self.\n combine_params, self.fitness)\n offspring.mutate_random(self.mutate_params)\n self.population.arrange_population([offspring])\n print('Epoch {}: {}'.format(ep, self.get_pop()))\n self.result.append(self.fitness(self.population.individuals[-1]\n .value))\n\n def run_1_1(self, epochs):\n for ep in range(epochs):\n pop_ind = self.population.individuals[0]\n offspring = pop_ind.compare(self.original_ind, self.\n combine_params, self.fitness)\n offspring.mutate(self.mutate_params)\n self.population.arrange_population([offspring])\n print('Epoch {}: {}'.format(ep, self.get_pop()))\n self.result.append(self.fitness(self.population.individuals[-1]\n .value))\n\n def get_pop(self):\n ids = ['x: {} => y: {}'.format('%.3f' % i.value[0], '%.3f' % self.\n fitness(i.value)) for i in self.population.individuals]\n return ids\n",
"step-4": "from population import Population\n\n\nclass REvolution:\n\n def __init__(self, original_ind, combine_params, mutate_params, fitness,\n pop_params, method):\n self.population = Population(1, fitness, pop_params)\n self.combine_params = combine_params\n self.mutate_params = mutate_params\n self.fitness = fitness\n self.method = method\n self.result = []\n self.original_ind = original_ind\n\n def run_random(self, epochs):\n for ep in range(epochs):\n pop_ind = self.population.individuals[0]\n offspring = pop_ind.compare(self.original_ind, self.\n combine_params, self.fitness)\n offspring.mutate_random(self.mutate_params)\n self.population.arrange_population([offspring])\n print('Epoch {}: {}'.format(ep, self.get_pop()))\n self.result.append(self.fitness(self.population.individuals[-1]\n .value))\n\n def run_1_1(self, epochs):\n for ep in range(epochs):\n pop_ind = self.population.individuals[0]\n offspring = pop_ind.compare(self.original_ind, self.\n combine_params, self.fitness)\n offspring.mutate(self.mutate_params)\n self.population.arrange_population([offspring])\n print('Epoch {}: {}'.format(ep, self.get_pop()))\n self.result.append(self.fitness(self.population.individuals[-1]\n .value))\n\n def get_pop(self):\n ids = ['x: {} => y: {}'.format('%.3f' % i.value[0], '%.3f' % self.\n fitness(i.value)) for i in self.population.individuals]\n return ids\n",
"step-5": "from population import Population\n\n\nclass REvolution:\n def __init__(self, original_ind, combine_params, mutate_params, fitness, pop_params, method):\n self.population = Population(1, fitness, pop_params)\n self.combine_params = combine_params\n self.mutate_params = mutate_params\n self.fitness = fitness\n self.method = method\n self.result = []\n self.original_ind = original_ind\n\n def run_random(self, epochs):\n for ep in range(epochs):\n pop_ind = self.population.individuals[0]\n offspring = pop_ind.compare(self.original_ind, self.combine_params, self.fitness)\n offspring.mutate_random(self.mutate_params)\n self.population.arrange_population([offspring])\n print(\"Epoch {}: {}\".format(ep, self.get_pop()))\n self.result.append(self.fitness(self.population.individuals[-1].value))\n\n def run_1_1(self, epochs):\n for ep in range(epochs):\n pop_ind = self.population.individuals[0]\n offspring = pop_ind.compare(self.original_ind, self.combine_params, self.fitness)\n offspring.mutate(self.mutate_params)\n self.population.arrange_population([offspring])\n print(\"Epoch {}: {}\".format(ep, self.get_pop()))\n self.result.append(self.fitness(self.population.individuals[-1].value))\n\n def get_pop(self):\n ids = [\"x: {} => y: {}\".format(\"%.3f\" % i.value[0], \"%.3f\" % self.fitness(i.value))\n for i in self.population.individuals]\n return ids\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
# Comic Downloader
#! python3
import urllib, bs4, requests
url = 'http://explosm.net/comics/39/'
base_url = 'http://explosm.net'
for i in range(1,4000):
req = requests.get(url)
req.raise_for_status()
soup = bs4.BeautifulSoup(req.text, "lxml")
comic = soup.select('#main-comic')
comicUrl = 'http:' + comic[0].get('src')
urllib.request.urlretrieve(comicUrl, str(i))
print(str(i) + ' done')
next_comic = soup.select('.next-comic')
url = base_url + next_comic[0].get('href')
|
normal
|
{
"blob_id": "66e77b8237850a29127402310bfab3061f7ebca4",
"index": 2346,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(1, 4000):\n req = requests.get(url)\n req.raise_for_status()\n soup = bs4.BeautifulSoup(req.text, 'lxml')\n comic = soup.select('#main-comic')\n comicUrl = 'http:' + comic[0].get('src')\n urllib.request.urlretrieve(comicUrl, str(i))\n print(str(i) + ' done')\n next_comic = soup.select('.next-comic')\n url = base_url + next_comic[0].get('href')\n",
"step-3": "<mask token>\nurl = 'http://explosm.net/comics/39/'\nbase_url = 'http://explosm.net'\nfor i in range(1, 4000):\n req = requests.get(url)\n req.raise_for_status()\n soup = bs4.BeautifulSoup(req.text, 'lxml')\n comic = soup.select('#main-comic')\n comicUrl = 'http:' + comic[0].get('src')\n urllib.request.urlretrieve(comicUrl, str(i))\n print(str(i) + ' done')\n next_comic = soup.select('.next-comic')\n url = base_url + next_comic[0].get('href')\n",
"step-4": "import urllib, bs4, requests\nurl = 'http://explosm.net/comics/39/'\nbase_url = 'http://explosm.net'\nfor i in range(1, 4000):\n req = requests.get(url)\n req.raise_for_status()\n soup = bs4.BeautifulSoup(req.text, 'lxml')\n comic = soup.select('#main-comic')\n comicUrl = 'http:' + comic[0].get('src')\n urllib.request.urlretrieve(comicUrl, str(i))\n print(str(i) + ' done')\n next_comic = soup.select('.next-comic')\n url = base_url + next_comic[0].get('href')\n",
"step-5": "# Comic Downloader\n\n#! python3\n\nimport urllib, bs4, requests\nurl = 'http://explosm.net/comics/39/'\nbase_url = 'http://explosm.net'\n\nfor i in range(1,4000):\n\n req = requests.get(url)\n req.raise_for_status()\n soup = bs4.BeautifulSoup(req.text, \"lxml\")\n comic = soup.select('#main-comic')\n comicUrl = 'http:' + comic[0].get('src')\n urllib.request.urlretrieve(comicUrl, str(i))\n print(str(i) + ' done')\n next_comic = soup.select('.next-comic')\n url = base_url + next_comic[0].get('href')",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
# This script deletes and recreates the NIC BoD intents.
# Use nic-bod-setup.py to set up the physical network and NEMO nodes first
import requests,json
import argparse, sys
from requests.auth import HTTPBasicAuth
USERNAME='admin'
PASSWORD='admin'
NIC_INTENTS="http://%s:8181/restconf/config/intent:intents"
NIC_INTENT="http://%s:8181/restconf/config/intent:intents/intent/14ce424a-3e50-4a2a-ad5c-b29845158c8b"
def delete_nic_intents(contHost):
delete(NIC_INTENTS % contHost)
def create_nic_intent(contHost):
data = {
"intent": {
"id": "14ce424a-3e50-4a2a-ad5c-b29845158c8b",
"actions": [
{
"order": 1,
"allow": {}
}
],
"subjects": [
{
"order": 1 ,
"end-point-group": { "name": "dmz" }
}, {
"order": 2 ,
"end-point-group": { "name": "interior" }
}
],
"constraints": [
{
"order": 1,
"bandwidth-constraint": { "bandwidth": "10G" }
}
],
"conditions": [
{
"order": 1,
"daily": { "start-time": "08:00:00Z", "duration": "10h" }
}
]
}
}
put(NIC_INTENT % contHost, data)
def post(url, data):
headers = {'Content-type': 'application/yang.data+json',
'Accept': 'application/yang.data+json'}
print "POST %s" % url
print json.dumps(data, indent=4, sort_keys=True)
r = requests.post(url, data=json.dumps(data), headers=headers, auth=HTTPBasicAuth(USERNAME, PASSWORD))
print r.text
r.raise_for_status()
def put(url, data):
headers = {'Content-type': 'application/yang.data+json',
'Accept': 'application/yang.data+json'}
print "PUT %s" % url
print json.dumps(data, indent=4, sort_keys=True)
r = requests.put(url, data=json.dumps(data), headers=headers, auth=HTTPBasicAuth(USERNAME, PASSWORD))
print r.text
r.raise_for_status()
def delete(url):
headers = {'Content-type': 'application/yang.data+json',
'Accept': 'application/yang.data+json'}
print "DELETE %s" % url
r = requests.delete(url, headers=headers, auth=HTTPBasicAuth(USERNAME, PASSWORD))
print r.text
r.raise_for_status()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--controller', default='127.0.0.1', help='controller IP')
args=parser.parse_args()
delete_nic_intents(args.controller)
create_nic_intent(args.controller)
|
normal
|
{
"blob_id": "955017ad7cc9dde744b8d8a9439f63f4725d50bc",
"index": 1673,
"step-1": "#!/usr/bin/python\n\n# This script deletes and recreates the NIC BoD intents.\n# Use nic-bod-setup.py to set up the physical network and NEMO nodes first\n\nimport requests,json\nimport argparse, sys\nfrom requests.auth import HTTPBasicAuth\n\nUSERNAME='admin'\nPASSWORD='admin'\n\nNIC_INTENTS=\"http://%s:8181/restconf/config/intent:intents\"\nNIC_INTENT=\"http://%s:8181/restconf/config/intent:intents/intent/14ce424a-3e50-4a2a-ad5c-b29845158c8b\"\n\n\ndef delete_nic_intents(contHost):\n delete(NIC_INTENTS % contHost)\n\ndef create_nic_intent(contHost):\n data = {\n \"intent\": {\n \"id\": \"14ce424a-3e50-4a2a-ad5c-b29845158c8b\",\n \"actions\": [\n {\n \"order\": 1,\n \"allow\": {}\n }\n ],\n \"subjects\": [\n {\n \"order\": 1 ,\n \"end-point-group\": { \"name\": \"dmz\" }\n }, {\n \"order\": 2 ,\n \"end-point-group\": { \"name\": \"interior\" }\n }\n ],\n \"constraints\": [\n {\n \"order\": 1,\n \"bandwidth-constraint\": { \"bandwidth\": \"10G\" }\n }\n ],\n \"conditions\": [\n {\n \"order\": 1,\n \"daily\": { \"start-time\": \"08:00:00Z\", \"duration\": \"10h\" }\n }\n ]\n}\n }\n put(NIC_INTENT % contHost, data)\n\n\ndef post(url, data):\n headers = {'Content-type': 'application/yang.data+json',\n 'Accept': 'application/yang.data+json'}\n print \"POST %s\" % url\n print json.dumps(data, indent=4, sort_keys=True)\n r = requests.post(url, data=json.dumps(data), headers=headers, auth=HTTPBasicAuth(USERNAME, PASSWORD))\n print r.text\n r.raise_for_status()\n\ndef put(url, data):\n headers = {'Content-type': 'application/yang.data+json',\n 'Accept': 'application/yang.data+json'}\n print \"PUT %s\" % url\n print json.dumps(data, indent=4, sort_keys=True)\n r = requests.put(url, data=json.dumps(data), headers=headers, auth=HTTPBasicAuth(USERNAME, PASSWORD))\n print r.text\n r.raise_for_status()\n\ndef delete(url):\n headers = {'Content-type': 'application/yang.data+json',\n 'Accept': 'application/yang.data+json'}\n print \"DELETE %s\" % url\n r = requests.delete(url, headers=headers, auth=HTTPBasicAuth(USERNAME, PASSWORD))\n print r.text\n r.raise_for_status()\n\n\nif __name__ == '__main__':\n\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--controller', default='127.0.0.1', help='controller IP')\n\targs=parser.parse_args()\n\n delete_nic_intents(args.controller)\n create_nic_intent(args.controller)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import datetime
import time
from sys import exit
from matplotlib import colors, pyplot as plt
from functools import reduce
import matplotlib.cm as cm
import seaborn as sns
from astropy.io import ascii, fits
from astropy.wcs import wcs
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from scipy.interpolate import interp2d
import matplotlib.mlab as mlab
import scipy, pylab
import rpy2
import cubehelix
import math
from pysextractor import SExtractor
__author__ = 'pnovais'
ini=time.time()
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
#definindo a classe que ira ler as imagens fits
def get_image(f_sdss):
img = f_sdss[0].data
# sky = f_sdss[2].data
return img
#abertura do arquivo com o nome das imagens, nas n bandas
df_fit = pd.read_csv('data/arquivo_fits.csv')
'''
================================================================================
Rodando o SExtractor na imagem na banda r, criando uma segmentacao e um catalogo
com os objetos obtidos
ATUALIZAR NOME DA BANDA DE SEGMENTACAO
================================================================================
'''
fname = 'data/frame-r-002507-4-0226.fits'
sex = SExtractor()
sex.config['PARAMETERS_LIST'].append('FLUX_ISO')
sex.config['PARAMETERS_LIST'].append('MAG_ISOCOR')
sex.config['PARAMETERS_LIST'].append('MAG_AUTO')
sex.config['PARAMETERS_LIST'].append('PETRO_RADIUS')
sex.config['PARAMETERS_LIST'].append('ISOAREA_IMAGE')
sex.config['PARAMETERS_LIST'].append('ALPHA_J2000')
sex.config['PARAMETERS_LIST'].append('DELTA_J2000')
sex.config['PARAMETERS_LIST'].append('FWHM_WORLD')
sex.config['PARAMETERS_LIST'].append('CLASS_STAR')
sex.config['CHECKIMAGE_TYPE'] = 'SEGMENTATION'
sex.run(fname)
segmap = fits.open('check.fits')[0].data
df_cat = pd.read_table('py-sextractor.cat', delim_whitespace=True, header=16)
df_cat.columns = ['num','flux_best','fluxerr_best', 'x','y','flags',
'fwhm_image', 'flux_iso','mag_isocor','mag_auto',
'petro_radius','ISO_AREA','ra','dec',
'fwhm_world','class_star']
#selecao dos objetos que devem ser galaxias
df_cat = df_cat.ix[(df_cat['fwhm_image'] > 4.5) & (df_cat['mag_auto'] < -7)]
df_cat = df_cat.reset_index()
df_cat = df_cat.ix[:,1:15]
'''
================================================================================
Lendo as imagens, em todas as bandas, e gerando um dataframe para cada galaxia
utilizando astropy
Calculando o ceu em todas as bandas
ATUALIZAR NOME DA BANDA DE SEGMENTACAO
================================================================================
'''
df = pd.DataFrame()
df_sky = pd.DataFrame()
for i_object in range(13,14):
window_size = 250
filter_seg = 'rSDSS'
ra = df_cat['ra']
dec = df_cat['dec']
image_r = fits.open('data/frame-r-002507-4-0226.fits')
wcsys = wcs.WCS(header=image_r[0].header)
y, x = wcsys.wcs_world2pix(ra, dec, 1)
interval = (int(round(x[i_object] - window_size / 2)), int(round(x[i_object] + window_size / 2)),
int(round(y[i_object] - window_size / 2)), int(round(y[i_object] + window_size / 2)))
df = pd.DataFrame()
df_sky = pd.DataFrame()
seg_sex = segmap[interval[0]:interval[1], interval[2]:interval[3]]
for i_gal in range(len(df_fit)):
f_sdss = fits.open('data/frame-%s-%s' %(df_fit['filter'][i_gal],
df_fit['name'][i_gal]))
img = get_image(f_sdss)
img_cut = img[interval[0]:interval[1], interval[2]:interval[3]]
plt.figure(1)
plt.clf()
plt.imshow(100*np.log10(img_cut/255), cmap='spectral')
plt.colorbar()
band=df_fit['filter'][i_gal]
nrows, ncols = img_cut.shape
xx, yy = np.meshgrid( *np.ogrid[:ncols, :nrows] )
table = np.column_stack(( xx.flatten(), yy.flatten(), img_cut.flatten() ))
temp = pd.DataFrame(table, columns=['x','y',band])
df = pd.concat([df,temp], axis=1)
sky_r = fits.open('data/frame-%s-%s' %(df_fit['filter'][i_gal],
df_fit['name'][i_gal]))
sky = get_image(sky_r)
wcsys = wcs.WCS(header=sky_r[0].header)
yc, xc = wcsys.wcs_world2pix(351.101, 14.737636, 1)
delta_x = 85
delta_y = 85
interval_sky = (int(round(xc - delta_x / 2)), int(round(xc + delta_x / 2)), int(round(yc - delta_y / 2)),
int(round(yc + delta_y / 2)))
img_sky = sky[interval_sky[0]:interval_sky[1], interval_sky[2]:interval_sky[3]]
sky_nrows, sky_ncols = img_sky.shape
xxc, yyc = np.meshgrid( *np.ogrid[:sky_ncols, :sky_nrows] )
table_sky = np.column_stack(( xxc.flatten(), yyc.flatten(), img_sky.flatten() ))
temp_sky = pd.DataFrame(table_sky, columns=['x','y',band])
df_sky = pd.concat([df_sky,temp_sky], axis=1)
df = df.ix[:, [0,1,2,5,8,11,14]]
df_sky = df_sky.ix[:, [0,1,2,5,8,11,14]]
'''
Imagem da galaxia, na banda r.
'''
plt.figure(1)
plt.clf()
r_sdss = fits.open('data/frame-r-%s' %(df_fit['name'][i_gal]))
img_r = get_image(r_sdss)
img_cut_r = img_r[interval[0]:interval[1], interval[2]:interval[3]]
cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5)
imgplot = plt.imshow(100*np.log10(img_cut_r/255), cmap='spectral')
titulo='Galaxy #%s - banda r' %(df_cat['num'][i_object])
plt.title(titulo)
plt.colorbar()
figura = 'figures/galaxy_#%s' %df_cat['num'][i_object]
plt.savefig(figura)
'''
Imagem segmentada da galaxia, na banda r.
'''
plt.figure(1)
plt.clf()
cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5)
imgplot = plt.imshow(seg_sex, cmap='spectral')
titulo='Segmentation Galaxy #%s - banda r' %(df_cat['num'][i_object])
plt.title(titulo)
plt.colorbar()
figura = 'figures/seg_galaxy_#%s' %df_cat['num'][i_object]
plt.savefig(figura)
'''
================================================================================
Salvando os fluxos de cada galaxia em um arquivo txt
================================================================================
'''
saida_fluxes = 'data/all_band_fluxes_%s.txt' %df_cat['num'][i_object]
formats=['%d','%d','%5.4f','%5.4f','%5.4f','%5.4f','%5.4f']
headers2='x\ty\tu\tg\tr\ti\tz'
np.savetxt(saida_fluxes,df, delimiter='\t',header=headers2, fmt = formats)
print('')
print('>> Os dados estao em: "%s".' %saida_fluxes)
'''
================================================================================
Subtraindo o ceu, na banda r
================================================================================
'''
df_aux=df.ix[:,2:]
df_aux1=df.ix[:,:2]
df_sky_aux = df_sky.ix[:,2:]
df_aux3 = (df_aux - df_sky_aux.mean())
df_rss=df_aux1.join(df_aux3)
"""
A segmentacao consiste de usar um limiar para separar o objeto do fundo.
No nosso caso, usamos limiar = alpha*std_ceu
"""
'''
================================================================================
SEGMENTACAO
================================================================================
'''
#SELECAO DOS PIXEIS ACIMA DO LIMIAR
limiar = 2.5*df_sky.r.std()
df_seg = df_rss.ix[df_rss['r'] > limiar]
print('Pixeis acima do limiar: %d' %len(df_seg))
np.savetxt('fof2.txt',df_seg,delimiter='\t')
fim = time.time()
time_proc = fim - ini
print('')
print(bcolors.HEADER + 'tempo de processamento: %fs' %time_proc + bcolors.ENDC)
|
normal
|
{
"blob_id": "736fee6f9a46b8568b2dd217b81d54d689306630",
"index": 970,
"step-1": "<mask token>\n\n\nclass bcolors:\n HEADER = '\\x1b[95m'\n OKBLUE = '\\x1b[94m'\n OKGREEN = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n FAIL = '\\x1b[91m'\n ENDC = '\\x1b[0m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n\n\ndef get_image(f_sdss):\n img = f_sdss[0].data\n return img\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass bcolors:\n HEADER = '\\x1b[95m'\n OKBLUE = '\\x1b[94m'\n OKGREEN = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n FAIL = '\\x1b[91m'\n ENDC = '\\x1b[0m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n\n\ndef get_image(f_sdss):\n img = f_sdss[0].data\n return img\n\n\n<mask token>\nsex.config['PARAMETERS_LIST'].append('FLUX_ISO')\nsex.config['PARAMETERS_LIST'].append('MAG_ISOCOR')\nsex.config['PARAMETERS_LIST'].append('MAG_AUTO')\nsex.config['PARAMETERS_LIST'].append('PETRO_RADIUS')\nsex.config['PARAMETERS_LIST'].append('ISOAREA_IMAGE')\nsex.config['PARAMETERS_LIST'].append('ALPHA_J2000')\nsex.config['PARAMETERS_LIST'].append('DELTA_J2000')\nsex.config['PARAMETERS_LIST'].append('FWHM_WORLD')\nsex.config['PARAMETERS_LIST'].append('CLASS_STAR')\n<mask token>\nsex.run(fname)\n<mask token>\nfor i_object in range(13, 14):\n window_size = 250\n filter_seg = 'rSDSS'\n ra = df_cat['ra']\n dec = df_cat['dec']\n image_r = fits.open('data/frame-r-002507-4-0226.fits')\n wcsys = wcs.WCS(header=image_r[0].header)\n y, x = wcsys.wcs_world2pix(ra, dec, 1)\n interval = int(round(x[i_object] - window_size / 2)), int(round(x[\n i_object] + window_size / 2)), int(round(y[i_object] - window_size / 2)\n ), int(round(y[i_object] + window_size / 2))\n df = pd.DataFrame()\n df_sky = pd.DataFrame()\n seg_sex = segmap[interval[0]:interval[1], interval[2]:interval[3]]\n for i_gal in range(len(df_fit)):\n f_sdss = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n img = get_image(f_sdss)\n img_cut = img[interval[0]:interval[1], interval[2]:interval[3]]\n plt.figure(1)\n plt.clf()\n plt.imshow(100 * np.log10(img_cut / 255), cmap='spectral')\n plt.colorbar()\n band = df_fit['filter'][i_gal]\n nrows, ncols = img_cut.shape\n xx, yy = np.meshgrid(*np.ogrid[:ncols, :nrows])\n table = np.column_stack((xx.flatten(), yy.flatten(), img_cut.flatten())\n )\n temp = pd.DataFrame(table, columns=['x', 'y', band])\n df = pd.concat([df, temp], axis=1)\n sky_r = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n sky = get_image(sky_r)\n wcsys = wcs.WCS(header=sky_r[0].header)\n yc, xc = wcsys.wcs_world2pix(351.101, 14.737636, 1)\n delta_x = 85\n delta_y = 85\n interval_sky = int(round(xc - delta_x / 2)), int(round(xc + delta_x /\n 2)), int(round(yc - delta_y / 2)), int(round(yc + delta_y / 2))\n img_sky = sky[interval_sky[0]:interval_sky[1], interval_sky[2]:\n interval_sky[3]]\n sky_nrows, sky_ncols = img_sky.shape\n xxc, yyc = np.meshgrid(*np.ogrid[:sky_ncols, :sky_nrows])\n table_sky = np.column_stack((xxc.flatten(), yyc.flatten(), img_sky.\n flatten()))\n temp_sky = pd.DataFrame(table_sky, columns=['x', 'y', band])\n df_sky = pd.concat([df_sky, temp_sky], axis=1)\n df = df.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n df_sky = df_sky.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n \"\"\"\n Imagem da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n r_sdss = fits.open('data/frame-r-%s' % df_fit['name'][i_gal])\n img_r = get_image(r_sdss)\n img_cut_r = img_r[interval[0]:interval[1], interval[2]:interval[3]]\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(100 * np.log10(img_cut_r / 255), cmap='spectral')\n titulo = 'Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n Imagem segmentada da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(seg_sex, cmap='spectral')\n titulo = 'Segmentation Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/seg_galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n ================================================================================\n Salvando os fluxos de cada galaxia em um arquivo txt\n ================================================================================\n \"\"\"\n saida_fluxes = 'data/all_band_fluxes_%s.txt' % df_cat['num'][i_object]\n formats = ['%d', '%d', '%5.4f', '%5.4f', '%5.4f', '%5.4f', '%5.4f']\n headers2 = 'x\\ty\\tu\\tg\\tr\\ti\\tz'\n np.savetxt(saida_fluxes, df, delimiter='\\t', header=headers2, fmt=formats)\n print('')\n print('>> Os dados estao em: \"%s\".' % saida_fluxes)\n \"\"\"\n ================================================================================\n Subtraindo o ceu, na banda r\n ================================================================================\n \"\"\"\n df_aux = df.ix[:, 2:]\n df_aux1 = df.ix[:, :2]\n df_sky_aux = df_sky.ix[:, 2:]\n df_aux3 = df_aux - df_sky_aux.mean()\n df_rss = df_aux1.join(df_aux3)\n \"\"\"\n A segmentacao consiste de usar um limiar para separar o objeto do fundo.\n No nosso caso, usamos limiar = alpha*std_ceu\n \"\"\"\n \"\"\"\n ================================================================================\n SEGMENTACAO\n ================================================================================\n \"\"\"\n limiar = 2.5 * df_sky.r.std()\n df_seg = df_rss.ix[df_rss['r'] > limiar]\n print('Pixeis acima do limiar: %d' % len(df_seg))\n np.savetxt('fof2.txt', df_seg, delimiter='\\t')\n<mask token>\nprint('')\nprint(bcolors.HEADER + 'tempo de processamento: %fs' % time_proc + bcolors.ENDC\n )\n",
"step-3": "<mask token>\n__author__ = 'pnovais'\nini = time.time()\n\n\nclass bcolors:\n HEADER = '\\x1b[95m'\n OKBLUE = '\\x1b[94m'\n OKGREEN = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n FAIL = '\\x1b[91m'\n ENDC = '\\x1b[0m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n\n\ndef get_image(f_sdss):\n img = f_sdss[0].data\n return img\n\n\ndf_fit = pd.read_csv('data/arquivo_fits.csv')\n<mask token>\nfname = 'data/frame-r-002507-4-0226.fits'\nsex = SExtractor()\nsex.config['PARAMETERS_LIST'].append('FLUX_ISO')\nsex.config['PARAMETERS_LIST'].append('MAG_ISOCOR')\nsex.config['PARAMETERS_LIST'].append('MAG_AUTO')\nsex.config['PARAMETERS_LIST'].append('PETRO_RADIUS')\nsex.config['PARAMETERS_LIST'].append('ISOAREA_IMAGE')\nsex.config['PARAMETERS_LIST'].append('ALPHA_J2000')\nsex.config['PARAMETERS_LIST'].append('DELTA_J2000')\nsex.config['PARAMETERS_LIST'].append('FWHM_WORLD')\nsex.config['PARAMETERS_LIST'].append('CLASS_STAR')\nsex.config['CHECKIMAGE_TYPE'] = 'SEGMENTATION'\nsex.run(fname)\nsegmap = fits.open('check.fits')[0].data\ndf_cat = pd.read_table('py-sextractor.cat', delim_whitespace=True, header=16)\ndf_cat.columns = ['num', 'flux_best', 'fluxerr_best', 'x', 'y', 'flags',\n 'fwhm_image', 'flux_iso', 'mag_isocor', 'mag_auto', 'petro_radius',\n 'ISO_AREA', 'ra', 'dec', 'fwhm_world', 'class_star']\ndf_cat = df_cat.ix[(df_cat['fwhm_image'] > 4.5) & (df_cat['mag_auto'] < -7)]\ndf_cat = df_cat.reset_index()\ndf_cat = df_cat.ix[:, 1:15]\n<mask token>\ndf = pd.DataFrame()\ndf_sky = pd.DataFrame()\nfor i_object in range(13, 14):\n window_size = 250\n filter_seg = 'rSDSS'\n ra = df_cat['ra']\n dec = df_cat['dec']\n image_r = fits.open('data/frame-r-002507-4-0226.fits')\n wcsys = wcs.WCS(header=image_r[0].header)\n y, x = wcsys.wcs_world2pix(ra, dec, 1)\n interval = int(round(x[i_object] - window_size / 2)), int(round(x[\n i_object] + window_size / 2)), int(round(y[i_object] - window_size / 2)\n ), int(round(y[i_object] + window_size / 2))\n df = pd.DataFrame()\n df_sky = pd.DataFrame()\n seg_sex = segmap[interval[0]:interval[1], interval[2]:interval[3]]\n for i_gal in range(len(df_fit)):\n f_sdss = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n img = get_image(f_sdss)\n img_cut = img[interval[0]:interval[1], interval[2]:interval[3]]\n plt.figure(1)\n plt.clf()\n plt.imshow(100 * np.log10(img_cut / 255), cmap='spectral')\n plt.colorbar()\n band = df_fit['filter'][i_gal]\n nrows, ncols = img_cut.shape\n xx, yy = np.meshgrid(*np.ogrid[:ncols, :nrows])\n table = np.column_stack((xx.flatten(), yy.flatten(), img_cut.flatten())\n )\n temp = pd.DataFrame(table, columns=['x', 'y', band])\n df = pd.concat([df, temp], axis=1)\n sky_r = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n sky = get_image(sky_r)\n wcsys = wcs.WCS(header=sky_r[0].header)\n yc, xc = wcsys.wcs_world2pix(351.101, 14.737636, 1)\n delta_x = 85\n delta_y = 85\n interval_sky = int(round(xc - delta_x / 2)), int(round(xc + delta_x /\n 2)), int(round(yc - delta_y / 2)), int(round(yc + delta_y / 2))\n img_sky = sky[interval_sky[0]:interval_sky[1], interval_sky[2]:\n interval_sky[3]]\n sky_nrows, sky_ncols = img_sky.shape\n xxc, yyc = np.meshgrid(*np.ogrid[:sky_ncols, :sky_nrows])\n table_sky = np.column_stack((xxc.flatten(), yyc.flatten(), img_sky.\n flatten()))\n temp_sky = pd.DataFrame(table_sky, columns=['x', 'y', band])\n df_sky = pd.concat([df_sky, temp_sky], axis=1)\n df = df.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n df_sky = df_sky.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n \"\"\"\n Imagem da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n r_sdss = fits.open('data/frame-r-%s' % df_fit['name'][i_gal])\n img_r = get_image(r_sdss)\n img_cut_r = img_r[interval[0]:interval[1], interval[2]:interval[3]]\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(100 * np.log10(img_cut_r / 255), cmap='spectral')\n titulo = 'Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n Imagem segmentada da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(seg_sex, cmap='spectral')\n titulo = 'Segmentation Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/seg_galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n ================================================================================\n Salvando os fluxos de cada galaxia em um arquivo txt\n ================================================================================\n \"\"\"\n saida_fluxes = 'data/all_band_fluxes_%s.txt' % df_cat['num'][i_object]\n formats = ['%d', '%d', '%5.4f', '%5.4f', '%5.4f', '%5.4f', '%5.4f']\n headers2 = 'x\\ty\\tu\\tg\\tr\\ti\\tz'\n np.savetxt(saida_fluxes, df, delimiter='\\t', header=headers2, fmt=formats)\n print('')\n print('>> Os dados estao em: \"%s\".' % saida_fluxes)\n \"\"\"\n ================================================================================\n Subtraindo o ceu, na banda r\n ================================================================================\n \"\"\"\n df_aux = df.ix[:, 2:]\n df_aux1 = df.ix[:, :2]\n df_sky_aux = df_sky.ix[:, 2:]\n df_aux3 = df_aux - df_sky_aux.mean()\n df_rss = df_aux1.join(df_aux3)\n \"\"\"\n A segmentacao consiste de usar um limiar para separar o objeto do fundo.\n No nosso caso, usamos limiar = alpha*std_ceu\n \"\"\"\n \"\"\"\n ================================================================================\n SEGMENTACAO\n ================================================================================\n \"\"\"\n limiar = 2.5 * df_sky.r.std()\n df_seg = df_rss.ix[df_rss['r'] > limiar]\n print('Pixeis acima do limiar: %d' % len(df_seg))\n np.savetxt('fof2.txt', df_seg, delimiter='\\t')\nfim = time.time()\ntime_proc = fim - ini\nprint('')\nprint(bcolors.HEADER + 'tempo de processamento: %fs' % time_proc + bcolors.ENDC\n )\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport datetime\nimport time\nfrom sys import exit\nfrom matplotlib import colors, pyplot as plt\nfrom functools import reduce\nimport matplotlib.cm as cm\nimport seaborn as sns\nfrom astropy.io import ascii, fits\nfrom astropy.wcs import wcs\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nfrom scipy.interpolate import interp2d\nimport matplotlib.mlab as mlab\nimport scipy, pylab\nimport rpy2\nimport cubehelix\nimport math\nfrom pysextractor import SExtractor\n__author__ = 'pnovais'\nini = time.time()\n\n\nclass bcolors:\n HEADER = '\\x1b[95m'\n OKBLUE = '\\x1b[94m'\n OKGREEN = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n FAIL = '\\x1b[91m'\n ENDC = '\\x1b[0m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n\n\ndef get_image(f_sdss):\n img = f_sdss[0].data\n return img\n\n\ndf_fit = pd.read_csv('data/arquivo_fits.csv')\n<mask token>\nfname = 'data/frame-r-002507-4-0226.fits'\nsex = SExtractor()\nsex.config['PARAMETERS_LIST'].append('FLUX_ISO')\nsex.config['PARAMETERS_LIST'].append('MAG_ISOCOR')\nsex.config['PARAMETERS_LIST'].append('MAG_AUTO')\nsex.config['PARAMETERS_LIST'].append('PETRO_RADIUS')\nsex.config['PARAMETERS_LIST'].append('ISOAREA_IMAGE')\nsex.config['PARAMETERS_LIST'].append('ALPHA_J2000')\nsex.config['PARAMETERS_LIST'].append('DELTA_J2000')\nsex.config['PARAMETERS_LIST'].append('FWHM_WORLD')\nsex.config['PARAMETERS_LIST'].append('CLASS_STAR')\nsex.config['CHECKIMAGE_TYPE'] = 'SEGMENTATION'\nsex.run(fname)\nsegmap = fits.open('check.fits')[0].data\ndf_cat = pd.read_table('py-sextractor.cat', delim_whitespace=True, header=16)\ndf_cat.columns = ['num', 'flux_best', 'fluxerr_best', 'x', 'y', 'flags',\n 'fwhm_image', 'flux_iso', 'mag_isocor', 'mag_auto', 'petro_radius',\n 'ISO_AREA', 'ra', 'dec', 'fwhm_world', 'class_star']\ndf_cat = df_cat.ix[(df_cat['fwhm_image'] > 4.5) & (df_cat['mag_auto'] < -7)]\ndf_cat = df_cat.reset_index()\ndf_cat = df_cat.ix[:, 1:15]\n<mask token>\ndf = pd.DataFrame()\ndf_sky = pd.DataFrame()\nfor i_object in range(13, 14):\n window_size = 250\n filter_seg = 'rSDSS'\n ra = df_cat['ra']\n dec = df_cat['dec']\n image_r = fits.open('data/frame-r-002507-4-0226.fits')\n wcsys = wcs.WCS(header=image_r[0].header)\n y, x = wcsys.wcs_world2pix(ra, dec, 1)\n interval = int(round(x[i_object] - window_size / 2)), int(round(x[\n i_object] + window_size / 2)), int(round(y[i_object] - window_size / 2)\n ), int(round(y[i_object] + window_size / 2))\n df = pd.DataFrame()\n df_sky = pd.DataFrame()\n seg_sex = segmap[interval[0]:interval[1], interval[2]:interval[3]]\n for i_gal in range(len(df_fit)):\n f_sdss = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n img = get_image(f_sdss)\n img_cut = img[interval[0]:interval[1], interval[2]:interval[3]]\n plt.figure(1)\n plt.clf()\n plt.imshow(100 * np.log10(img_cut / 255), cmap='spectral')\n plt.colorbar()\n band = df_fit['filter'][i_gal]\n nrows, ncols = img_cut.shape\n xx, yy = np.meshgrid(*np.ogrid[:ncols, :nrows])\n table = np.column_stack((xx.flatten(), yy.flatten(), img_cut.flatten())\n )\n temp = pd.DataFrame(table, columns=['x', 'y', band])\n df = pd.concat([df, temp], axis=1)\n sky_r = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n sky = get_image(sky_r)\n wcsys = wcs.WCS(header=sky_r[0].header)\n yc, xc = wcsys.wcs_world2pix(351.101, 14.737636, 1)\n delta_x = 85\n delta_y = 85\n interval_sky = int(round(xc - delta_x / 2)), int(round(xc + delta_x /\n 2)), int(round(yc - delta_y / 2)), int(round(yc + delta_y / 2))\n img_sky = sky[interval_sky[0]:interval_sky[1], interval_sky[2]:\n interval_sky[3]]\n sky_nrows, sky_ncols = img_sky.shape\n xxc, yyc = np.meshgrid(*np.ogrid[:sky_ncols, :sky_nrows])\n table_sky = np.column_stack((xxc.flatten(), yyc.flatten(), img_sky.\n flatten()))\n temp_sky = pd.DataFrame(table_sky, columns=['x', 'y', band])\n df_sky = pd.concat([df_sky, temp_sky], axis=1)\n df = df.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n df_sky = df_sky.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n \"\"\"\n Imagem da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n r_sdss = fits.open('data/frame-r-%s' % df_fit['name'][i_gal])\n img_r = get_image(r_sdss)\n img_cut_r = img_r[interval[0]:interval[1], interval[2]:interval[3]]\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(100 * np.log10(img_cut_r / 255), cmap='spectral')\n titulo = 'Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n Imagem segmentada da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(seg_sex, cmap='spectral')\n titulo = 'Segmentation Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/seg_galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n ================================================================================\n Salvando os fluxos de cada galaxia em um arquivo txt\n ================================================================================\n \"\"\"\n saida_fluxes = 'data/all_band_fluxes_%s.txt' % df_cat['num'][i_object]\n formats = ['%d', '%d', '%5.4f', '%5.4f', '%5.4f', '%5.4f', '%5.4f']\n headers2 = 'x\\ty\\tu\\tg\\tr\\ti\\tz'\n np.savetxt(saida_fluxes, df, delimiter='\\t', header=headers2, fmt=formats)\n print('')\n print('>> Os dados estao em: \"%s\".' % saida_fluxes)\n \"\"\"\n ================================================================================\n Subtraindo o ceu, na banda r\n ================================================================================\n \"\"\"\n df_aux = df.ix[:, 2:]\n df_aux1 = df.ix[:, :2]\n df_sky_aux = df_sky.ix[:, 2:]\n df_aux3 = df_aux - df_sky_aux.mean()\n df_rss = df_aux1.join(df_aux3)\n \"\"\"\n A segmentacao consiste de usar um limiar para separar o objeto do fundo.\n No nosso caso, usamos limiar = alpha*std_ceu\n \"\"\"\n \"\"\"\n ================================================================================\n SEGMENTACAO\n ================================================================================\n \"\"\"\n limiar = 2.5 * df_sky.r.std()\n df_seg = df_rss.ix[df_rss['r'] > limiar]\n print('Pixeis acima do limiar: %d' % len(df_seg))\n np.savetxt('fof2.txt', df_seg, delimiter='\\t')\nfim = time.time()\ntime_proc = fim - ini\nprint('')\nprint(bcolors.HEADER + 'tempo de processamento: %fs' % time_proc + bcolors.ENDC\n )\n",
"step-5": "\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport time\nfrom sys import exit\nfrom matplotlib import colors, pyplot as plt\nfrom functools import reduce\nimport matplotlib.cm as cm\nimport seaborn as sns\nfrom astropy.io import ascii, fits\nfrom astropy.wcs import wcs\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nfrom scipy.interpolate import interp2d\nimport matplotlib.mlab as mlab\nimport scipy, pylab\nimport rpy2\nimport cubehelix\nimport math\nfrom pysextractor import SExtractor\n\n__author__ = 'pnovais'\n\nini=time.time()\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n#definindo a classe que ira ler as imagens fits\ndef get_image(f_sdss):\n img = f_sdss[0].data\n# sky = f_sdss[2].data\n return img\n\n#abertura do arquivo com o nome das imagens, nas n bandas\ndf_fit = pd.read_csv('data/arquivo_fits.csv')\n\n'''\n================================================================================\nRodando o SExtractor na imagem na banda r, criando uma segmentacao e um catalogo\ncom os objetos obtidos\nATUALIZAR NOME DA BANDA DE SEGMENTACAO\n================================================================================\n'''\nfname = 'data/frame-r-002507-4-0226.fits'\nsex = SExtractor()\nsex.config['PARAMETERS_LIST'].append('FLUX_ISO')\nsex.config['PARAMETERS_LIST'].append('MAG_ISOCOR')\nsex.config['PARAMETERS_LIST'].append('MAG_AUTO')\nsex.config['PARAMETERS_LIST'].append('PETRO_RADIUS')\nsex.config['PARAMETERS_LIST'].append('ISOAREA_IMAGE')\nsex.config['PARAMETERS_LIST'].append('ALPHA_J2000')\nsex.config['PARAMETERS_LIST'].append('DELTA_J2000')\nsex.config['PARAMETERS_LIST'].append('FWHM_WORLD')\nsex.config['PARAMETERS_LIST'].append('CLASS_STAR')\nsex.config['CHECKIMAGE_TYPE'] = 'SEGMENTATION'\nsex.run(fname)\nsegmap = fits.open('check.fits')[0].data\n\ndf_cat = pd.read_table('py-sextractor.cat', delim_whitespace=True, header=16)\ndf_cat.columns = ['num','flux_best','fluxerr_best', 'x','y','flags',\n 'fwhm_image', 'flux_iso','mag_isocor','mag_auto',\n 'petro_radius','ISO_AREA','ra','dec',\n 'fwhm_world','class_star']\n\n#selecao dos objetos que devem ser galaxias\ndf_cat = df_cat.ix[(df_cat['fwhm_image'] > 4.5) & (df_cat['mag_auto'] < -7)]\ndf_cat = df_cat.reset_index()\ndf_cat = df_cat.ix[:,1:15]\n\n'''\n================================================================================\nLendo as imagens, em todas as bandas, e gerando um dataframe para cada galaxia\nutilizando astropy\nCalculando o ceu em todas as bandas\n\nATUALIZAR NOME DA BANDA DE SEGMENTACAO\n================================================================================\n'''\n\ndf = pd.DataFrame()\ndf_sky = pd.DataFrame()\n\n\nfor i_object in range(13,14):\n window_size = 250\n filter_seg = 'rSDSS'\n ra = df_cat['ra']\n dec = df_cat['dec']\n image_r = fits.open('data/frame-r-002507-4-0226.fits')\n wcsys = wcs.WCS(header=image_r[0].header)\n y, x = wcsys.wcs_world2pix(ra, dec, 1)\n interval = (int(round(x[i_object] - window_size / 2)), int(round(x[i_object] + window_size / 2)),\n int(round(y[i_object] - window_size / 2)), int(round(y[i_object] + window_size / 2)))\n df = pd.DataFrame()\n df_sky = pd.DataFrame()\n seg_sex = segmap[interval[0]:interval[1], interval[2]:interval[3]]\n\n for i_gal in range(len(df_fit)):\n f_sdss = fits.open('data/frame-%s-%s' %(df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n img = get_image(f_sdss)\n img_cut = img[interval[0]:interval[1], interval[2]:interval[3]]\n plt.figure(1)\n plt.clf()\n plt.imshow(100*np.log10(img_cut/255), cmap='spectral')\n plt.colorbar()\n band=df_fit['filter'][i_gal]\n nrows, ncols = img_cut.shape\n xx, yy = np.meshgrid( *np.ogrid[:ncols, :nrows] )\n table = np.column_stack(( xx.flatten(), yy.flatten(), img_cut.flatten() ))\n temp = pd.DataFrame(table, columns=['x','y',band])\n df = pd.concat([df,temp], axis=1)\n\n sky_r = fits.open('data/frame-%s-%s' %(df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n sky = get_image(sky_r)\n wcsys = wcs.WCS(header=sky_r[0].header)\n yc, xc = wcsys.wcs_world2pix(351.101, 14.737636, 1)\n delta_x = 85\n delta_y = 85\n interval_sky = (int(round(xc - delta_x / 2)), int(round(xc + delta_x / 2)), int(round(yc - delta_y / 2)),\n int(round(yc + delta_y / 2)))\n img_sky = sky[interval_sky[0]:interval_sky[1], interval_sky[2]:interval_sky[3]]\n sky_nrows, sky_ncols = img_sky.shape\n xxc, yyc = np.meshgrid( *np.ogrid[:sky_ncols, :sky_nrows] )\n table_sky = np.column_stack(( xxc.flatten(), yyc.flatten(), img_sky.flatten() ))\n temp_sky = pd.DataFrame(table_sky, columns=['x','y',band])\n df_sky = pd.concat([df_sky,temp_sky], axis=1)\n\n df = df.ix[:, [0,1,2,5,8,11,14]]\n df_sky = df_sky.ix[:, [0,1,2,5,8,11,14]]\n\n '''\n Imagem da galaxia, na banda r.\n '''\n plt.figure(1)\n plt.clf()\n r_sdss = fits.open('data/frame-r-%s' %(df_fit['name'][i_gal]))\n img_r = get_image(r_sdss)\n img_cut_r = img_r[interval[0]:interval[1], interval[2]:interval[3]]\n cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5)\n imgplot = plt.imshow(100*np.log10(img_cut_r/255), cmap='spectral')\n titulo='Galaxy #%s - banda r' %(df_cat['num'][i_object])\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/galaxy_#%s' %df_cat['num'][i_object]\n plt.savefig(figura)\n '''\n Imagem segmentada da galaxia, na banda r.\n '''\n plt.figure(1)\n plt.clf()\n cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5)\n imgplot = plt.imshow(seg_sex, cmap='spectral')\n titulo='Segmentation Galaxy #%s - banda r' %(df_cat['num'][i_object])\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/seg_galaxy_#%s' %df_cat['num'][i_object]\n plt.savefig(figura)\n\n '''\n ================================================================================\n Salvando os fluxos de cada galaxia em um arquivo txt\n ================================================================================\n '''\n saida_fluxes = 'data/all_band_fluxes_%s.txt' %df_cat['num'][i_object]\n formats=['%d','%d','%5.4f','%5.4f','%5.4f','%5.4f','%5.4f']\n headers2='x\\ty\\tu\\tg\\tr\\ti\\tz'\n np.savetxt(saida_fluxes,df, delimiter='\\t',header=headers2, fmt = formats)\n print('')\n print('>> Os dados estao em: \"%s\".' %saida_fluxes)\n\n '''\n ================================================================================\n Subtraindo o ceu, na banda r\n ================================================================================\n '''\n df_aux=df.ix[:,2:]\n df_aux1=df.ix[:,:2]\n df_sky_aux = df_sky.ix[:,2:]\n df_aux3 = (df_aux - df_sky_aux.mean())\n df_rss=df_aux1.join(df_aux3)\n\n \"\"\"\n A segmentacao consiste de usar um limiar para separar o objeto do fundo.\n No nosso caso, usamos limiar = alpha*std_ceu\n \"\"\"\n '''\n ================================================================================\n SEGMENTACAO\n ================================================================================\n '''\n #SELECAO DOS PIXEIS ACIMA DO LIMIAR\n limiar = 2.5*df_sky.r.std()\n df_seg = df_rss.ix[df_rss['r'] > limiar]\n print('Pixeis acima do limiar: %d' %len(df_seg))\n np.savetxt('fof2.txt',df_seg,delimiter='\\t')\n\n\n\n\nfim = time.time()\ntime_proc = fim - ini\nprint('')\nprint(bcolors.HEADER + 'tempo de processamento: %fs' %time_proc + bcolors.ENDC)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
L5 = [0]*10
print(L5)
L5[2] = 20
print(L5)
print(L5[1:4])
L5.append(30)
print(L5)
L5.remove(30) #Elimina la primera ocurrencia del objeto
print(L5)
L6 = [1,2,3,4,5,6]
print(L6[1::2])
print(L6[::2])
|
normal
|
{
"blob_id": "052824082854c5f7721efb7faaf5a794e9be2789",
"index": 6517,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(L5)\n<mask token>\nprint(L5)\nprint(L5[1:4])\nL5.append(30)\nprint(L5)\nL5.remove(30)\nprint(L5)\n<mask token>\nprint(L6[1::2])\nprint(L6[::2])\n",
"step-3": "L5 = [0] * 10\nprint(L5)\nL5[2] = 20\nprint(L5)\nprint(L5[1:4])\nL5.append(30)\nprint(L5)\nL5.remove(30)\nprint(L5)\nL6 = [1, 2, 3, 4, 5, 6]\nprint(L6[1::2])\nprint(L6[::2])\n",
"step-4": "L5 = [0]*10\nprint(L5)\n\nL5[2] = 20\nprint(L5)\n\nprint(L5[1:4])\n\nL5.append(30)\nprint(L5)\n\n\nL5.remove(30) #Elimina la primera ocurrencia del objeto\nprint(L5) \n\nL6 = [1,2,3,4,5,6]\nprint(L6[1::2])\nprint(L6[::2])",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import tcod as libtcod
import color
from input_handlers import consts
from input_handlers.ask_user_event_handler import AskUserEventHandler
class SelectIndexHandler(AskUserEventHandler):
"""
Handles asking the user for an index on the map.
"""
def __init__(self, engine):
super().__init__(engine)
player = self.engine.player
engine.mouse_location = (player.x, player.y)
def on_render(self, console):
"""
Highlight the tile under the cursor.
"""
super().on_render(console)
x, y = self.engine.mouse_location
console.tiles_rgb['bg'][x, y] = color.white
console.tiles_rgb['fg'][x, y] = color.black
def ev_keydown(self, event):
key = event.sym
if key in consts.MOVE_KEYS:
modifier = 1 # Holding modifier keys will speed up key movement
if event.mod & (libtcod.event.KMOD_LSHIFT | libtcod.event.KMOD_RSHIFT):
modifier *= 5
if event.mod & (libtcod.event.KMOD_LCTRL | libtcod.event.KMOD_RCTRL):
modifier *= 10
if event.mod & (libtcod.event.KMOD_LALT | libtcod.event.KMOD_RALT):
modifier *= 20
x, y = self.engine.mouse_location
dx, dy = consts.MOVE_KEYS[key]
x += dx * modifier
y += dy * modifier
# Restrict the cursor inddex to the map size.
x = max(0, min(x, self.engine.game_map.width - 1))
y = max(0, min(y, self.engine.game_map.height - 1))
self.engine.mouse_location = (x, y)
return None
elif key in consts.CONFIRM_KEYS:
return self.on_index_selected(*self.engine.mouse_location)
return super().ev_keydown(event)
def ev_mousebuttondown(self, event):
"""
Left click confirms a selection
"""
if self.engine.game_map.in_bounds(*event.tile):
if event.button == 1:
return self.on_index_selected(*event.tile)
return super().ev_mousebuttondown(event)
def on_index_selected(self, x, y):
raise NotImplementedError()
|
normal
|
{
"blob_id": "8c7dcff80eeb8d7d425cfb25da8a30fc15daf5f9",
"index": 4872,
"step-1": "<mask token>\n\n\nclass SelectIndexHandler(AskUserEventHandler):\n <mask token>\n <mask token>\n\n def on_render(self, console):\n \"\"\"\n Highlight the tile under the cursor.\n \"\"\"\n super().on_render(console)\n x, y = self.engine.mouse_location\n console.tiles_rgb['bg'][x, y] = color.white\n console.tiles_rgb['fg'][x, y] = color.black\n <mask token>\n <mask token>\n\n def on_index_selected(self, x, y):\n raise NotImplementedError()\n",
"step-2": "<mask token>\n\n\nclass SelectIndexHandler(AskUserEventHandler):\n <mask token>\n\n def __init__(self, engine):\n super().__init__(engine)\n player = self.engine.player\n engine.mouse_location = player.x, player.y\n\n def on_render(self, console):\n \"\"\"\n Highlight the tile under the cursor.\n \"\"\"\n super().on_render(console)\n x, y = self.engine.mouse_location\n console.tiles_rgb['bg'][x, y] = color.white\n console.tiles_rgb['fg'][x, y] = color.black\n\n def ev_keydown(self, event):\n key = event.sym\n if key in consts.MOVE_KEYS:\n modifier = 1\n if event.mod & (libtcod.event.KMOD_LSHIFT | libtcod.event.\n KMOD_RSHIFT):\n modifier *= 5\n if event.mod & (libtcod.event.KMOD_LCTRL | libtcod.event.KMOD_RCTRL\n ):\n modifier *= 10\n if event.mod & (libtcod.event.KMOD_LALT | libtcod.event.KMOD_RALT):\n modifier *= 20\n x, y = self.engine.mouse_location\n dx, dy = consts.MOVE_KEYS[key]\n x += dx * modifier\n y += dy * modifier\n x = max(0, min(x, self.engine.game_map.width - 1))\n y = max(0, min(y, self.engine.game_map.height - 1))\n self.engine.mouse_location = x, y\n return None\n elif key in consts.CONFIRM_KEYS:\n return self.on_index_selected(*self.engine.mouse_location)\n return super().ev_keydown(event)\n\n def ev_mousebuttondown(self, event):\n \"\"\"\n Left click confirms a selection\n \"\"\"\n if self.engine.game_map.in_bounds(*event.tile):\n if event.button == 1:\n return self.on_index_selected(*event.tile)\n return super().ev_mousebuttondown(event)\n\n def on_index_selected(self, x, y):\n raise NotImplementedError()\n",
"step-3": "<mask token>\n\n\nclass SelectIndexHandler(AskUserEventHandler):\n \"\"\"\n Handles asking the user for an index on the map.\n \"\"\"\n\n def __init__(self, engine):\n super().__init__(engine)\n player = self.engine.player\n engine.mouse_location = player.x, player.y\n\n def on_render(self, console):\n \"\"\"\n Highlight the tile under the cursor.\n \"\"\"\n super().on_render(console)\n x, y = self.engine.mouse_location\n console.tiles_rgb['bg'][x, y] = color.white\n console.tiles_rgb['fg'][x, y] = color.black\n\n def ev_keydown(self, event):\n key = event.sym\n if key in consts.MOVE_KEYS:\n modifier = 1\n if event.mod & (libtcod.event.KMOD_LSHIFT | libtcod.event.\n KMOD_RSHIFT):\n modifier *= 5\n if event.mod & (libtcod.event.KMOD_LCTRL | libtcod.event.KMOD_RCTRL\n ):\n modifier *= 10\n if event.mod & (libtcod.event.KMOD_LALT | libtcod.event.KMOD_RALT):\n modifier *= 20\n x, y = self.engine.mouse_location\n dx, dy = consts.MOVE_KEYS[key]\n x += dx * modifier\n y += dy * modifier\n x = max(0, min(x, self.engine.game_map.width - 1))\n y = max(0, min(y, self.engine.game_map.height - 1))\n self.engine.mouse_location = x, y\n return None\n elif key in consts.CONFIRM_KEYS:\n return self.on_index_selected(*self.engine.mouse_location)\n return super().ev_keydown(event)\n\n def ev_mousebuttondown(self, event):\n \"\"\"\n Left click confirms a selection\n \"\"\"\n if self.engine.game_map.in_bounds(*event.tile):\n if event.button == 1:\n return self.on_index_selected(*event.tile)\n return super().ev_mousebuttondown(event)\n\n def on_index_selected(self, x, y):\n raise NotImplementedError()\n",
"step-4": "import tcod as libtcod\nimport color\nfrom input_handlers import consts\nfrom input_handlers.ask_user_event_handler import AskUserEventHandler\n\n\nclass SelectIndexHandler(AskUserEventHandler):\n \"\"\"\n Handles asking the user for an index on the map.\n \"\"\"\n\n def __init__(self, engine):\n super().__init__(engine)\n player = self.engine.player\n engine.mouse_location = player.x, player.y\n\n def on_render(self, console):\n \"\"\"\n Highlight the tile under the cursor.\n \"\"\"\n super().on_render(console)\n x, y = self.engine.mouse_location\n console.tiles_rgb['bg'][x, y] = color.white\n console.tiles_rgb['fg'][x, y] = color.black\n\n def ev_keydown(self, event):\n key = event.sym\n if key in consts.MOVE_KEYS:\n modifier = 1\n if event.mod & (libtcod.event.KMOD_LSHIFT | libtcod.event.\n KMOD_RSHIFT):\n modifier *= 5\n if event.mod & (libtcod.event.KMOD_LCTRL | libtcod.event.KMOD_RCTRL\n ):\n modifier *= 10\n if event.mod & (libtcod.event.KMOD_LALT | libtcod.event.KMOD_RALT):\n modifier *= 20\n x, y = self.engine.mouse_location\n dx, dy = consts.MOVE_KEYS[key]\n x += dx * modifier\n y += dy * modifier\n x = max(0, min(x, self.engine.game_map.width - 1))\n y = max(0, min(y, self.engine.game_map.height - 1))\n self.engine.mouse_location = x, y\n return None\n elif key in consts.CONFIRM_KEYS:\n return self.on_index_selected(*self.engine.mouse_location)\n return super().ev_keydown(event)\n\n def ev_mousebuttondown(self, event):\n \"\"\"\n Left click confirms a selection\n \"\"\"\n if self.engine.game_map.in_bounds(*event.tile):\n if event.button == 1:\n return self.on_index_selected(*event.tile)\n return super().ev_mousebuttondown(event)\n\n def on_index_selected(self, x, y):\n raise NotImplementedError()\n",
"step-5": "import tcod as libtcod\n\nimport color\nfrom input_handlers import consts\nfrom input_handlers.ask_user_event_handler import AskUserEventHandler\n\n\nclass SelectIndexHandler(AskUserEventHandler):\n \"\"\"\n Handles asking the user for an index on the map.\n \"\"\"\n\n def __init__(self, engine):\n super().__init__(engine)\n player = self.engine.player\n engine.mouse_location = (player.x, player.y)\n\n def on_render(self, console):\n \"\"\"\n Highlight the tile under the cursor.\n \"\"\"\n super().on_render(console)\n x, y = self.engine.mouse_location\n console.tiles_rgb['bg'][x, y] = color.white\n console.tiles_rgb['fg'][x, y] = color.black\n\n def ev_keydown(self, event):\n key = event.sym\n if key in consts.MOVE_KEYS:\n modifier = 1 # Holding modifier keys will speed up key movement\n if event.mod & (libtcod.event.KMOD_LSHIFT | libtcod.event.KMOD_RSHIFT):\n modifier *= 5\n if event.mod & (libtcod.event.KMOD_LCTRL | libtcod.event.KMOD_RCTRL):\n modifier *= 10\n if event.mod & (libtcod.event.KMOD_LALT | libtcod.event.KMOD_RALT):\n modifier *= 20\n\n x, y = self.engine.mouse_location\n dx, dy = consts.MOVE_KEYS[key]\n x += dx * modifier\n y += dy * modifier\n # Restrict the cursor inddex to the map size.\n x = max(0, min(x, self.engine.game_map.width - 1))\n y = max(0, min(y, self.engine.game_map.height - 1))\n self.engine.mouse_location = (x, y)\n return None\n elif key in consts.CONFIRM_KEYS:\n return self.on_index_selected(*self.engine.mouse_location)\n return super().ev_keydown(event)\n\n def ev_mousebuttondown(self, event):\n \"\"\"\n Left click confirms a selection\n \"\"\"\n if self.engine.game_map.in_bounds(*event.tile):\n if event.button == 1:\n return self.on_index_selected(*event.tile)\n return super().ev_mousebuttondown(event)\n\n def on_index_selected(self, x, y):\n raise NotImplementedError()\n",
"step-ids": [
3,
6,
7,
8,
9
]
}
|
[
3,
6,
7,
8,
9
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/6/20 下午4:00
# @Author : Huang HUi
# @Site :
# @File : query_parse.py
# @Software: PyCharm
from mysqlConnection import mysqlConnection
import yaml
import copy
import time
import csv
import json
from collections import OrderedDict
import ast
#
# GIVEN_QUERY = {'days': [10,14], 'countries': [{'country_id': 28, 'day': None}],
# 'regions': [{'region_id': 2, 'day': None}, {'region_id': 27, 'day': None}, {'region_id': 69, 'day': None}], 'pois': [],
# 'regionNotGo': [], 'poiNotGo': [], 'regionSorted': [135, 131], 'availableMonths': [],
# 'price': [5000, 15000], 'hotelRating': None, 'arrivalRegionId': 27, 'departRegionId': None}
# GIVEN_QUERY={'days': [10,13], 'countries': [{'country_id': 11, 'day': None}], 'regions': [{'region_id': 266, 'day': None},
# {'region_id': 220, 'day': None}], 'pois': [795, 800,878,1536]}
# GIVEN_QUERY={'days': [12], 'countries': [{'country_id': 28, 'day': None}],
# 'regions': [{'region_id': 2, 'day': None}, {'region_id': 70, 'day': None}],
# 'pois': [1361, 1380, 1382, 1385, 1386, 1413, 1512, 1700, 1701, 1712, 1713]}
def query_parse(GIVEN_QUERY):
try:
countryIds_query = list(map(lambda x: x['country_id'], GIVEN_QUERY['countries']))
except :
countryIds_query=None
try:
days_query=GIVEN_QUERY['days']
except :
days_query=None
try:
regions_query = GIVEN_QUERY['regions']
except :
regions_query=[]
try:
regionDic_query = list(map(lambda x: {x['region_id']: x['day']}, regions_query))
except :
regionDic_query=[]
try:
pois_query=GIVEN_QUERY['pois']
except :
pois_query=[]
try:
regionNotGo_query=GIVEN_QUERY['regionNotGo']
except :
regionNotGo_query=[]
try:
poiNotGo_query=GIVEN_QUERY['poiNotGo']
except :
poiNotGo_query=[]
try:
regionSorted_query=GIVEN_QUERY['regionSorted']
except :
regionSorted_query=[]
try:
availableMonths_query=GIVEN_QUERY['availableMonths']
except :
availableMonths_query=[]
try:
price_query=GIVEN_QUERY['price']
except :
price_query=None
try:
hotelRating_query=GIVEN_QUERY['hotelRating']
except :
hotelRating_query=None
try:
arrivalRegionId_query=GIVEN_QUERY['arrivalRegionId']
except :
arrivalRegionId_query=None
try:
departRegionId_query=GIVEN_QUERY['departRegionId']
except:
departRegionId_query=None
connection=mysqlConnection()
try:
with connection.cursor() as cursor:
if GIVEN_QUERY['countries']:
# country condition
if arrivalRegionId_query:
sql = "SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)" % (arrivalRegionId_query,str(countryIds_query)[1:-1])
else:
sql = "SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)" % str(countryIds_query)[1:-1]
else:
# all
sql = "SELECT id FROM tidy_parts WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null "
cursor.execute(sql)
startParts = cursor.fetchall()
if GIVEN_QUERY['countries']:
if departRegionId_query:
sql = "SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)" % (departRegionId_query, str(countryIds_query)[1:-1])
else:
sql = "SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)" % str(countryIds_query)[1:-1]
else:
sql = "SELECT id FROM tidy_parts WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null "
cursor.execute(sql)
endParts = cursor.fetchall()
finally:
connection.close()
startParts = [dict['id'] for dict in startParts]
endParts = [dict['id'] for dict in endParts]
return countryIds_query, days_query, regions_query, regionDic_query, \
pois_query, regionNotGo_query, poiNotGo_query, regionSorted_query, availableMonths_query, price_query, \
hotelRating_query, arrivalRegionId_query, departRegionId_query,startParts,endParts
|
normal
|
{
"blob_id": "b52807a15cef8f07f685f8761a470d4a24d9c3dc",
"index": 6603,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef query_parse(GIVEN_QUERY):\n try:\n countryIds_query = list(map(lambda x: x['country_id'], GIVEN_QUERY[\n 'countries']))\n except:\n countryIds_query = None\n try:\n days_query = GIVEN_QUERY['days']\n except:\n days_query = None\n try:\n regions_query = GIVEN_QUERY['regions']\n except:\n regions_query = []\n try:\n regionDic_query = list(map(lambda x: {x['region_id']: x['day']},\n regions_query))\n except:\n regionDic_query = []\n try:\n pois_query = GIVEN_QUERY['pois']\n except:\n pois_query = []\n try:\n regionNotGo_query = GIVEN_QUERY['regionNotGo']\n except:\n regionNotGo_query = []\n try:\n poiNotGo_query = GIVEN_QUERY['poiNotGo']\n except:\n poiNotGo_query = []\n try:\n regionSorted_query = GIVEN_QUERY['regionSorted']\n except:\n regionSorted_query = []\n try:\n availableMonths_query = GIVEN_QUERY['availableMonths']\n except:\n availableMonths_query = []\n try:\n price_query = GIVEN_QUERY['price']\n except:\n price_query = None\n try:\n hotelRating_query = GIVEN_QUERY['hotelRating']\n except:\n hotelRating_query = None\n try:\n arrivalRegionId_query = GIVEN_QUERY['arrivalRegionId']\n except:\n arrivalRegionId_query = None\n try:\n departRegionId_query = GIVEN_QUERY['departRegionId']\n except:\n departRegionId_query = None\n connection = mysqlConnection()\n try:\n with connection.cursor() as cursor:\n if GIVEN_QUERY['countries']:\n if arrivalRegionId_query:\n sql = (\n \"SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)\"\n % (arrivalRegionId_query, str(countryIds_query)[1:-1])\n )\n else:\n sql = (\n \"SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)\"\n % str(countryIds_query)[1:-1])\n else:\n sql = (\n \"SELECT id FROM tidy_parts WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null \"\n )\n cursor.execute(sql)\n startParts = cursor.fetchall()\n if GIVEN_QUERY['countries']:\n if departRegionId_query:\n sql = (\n \"SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)\"\n % (departRegionId_query, str(countryIds_query)[1:-1]))\n else:\n sql = (\n \"SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)\"\n % str(countryIds_query)[1:-1])\n else:\n sql = (\n \"SELECT id FROM tidy_parts WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null \"\n )\n cursor.execute(sql)\n endParts = cursor.fetchall()\n finally:\n connection.close()\n startParts = [dict['id'] for dict in startParts]\n endParts = [dict['id'] for dict in endParts]\n return (countryIds_query, days_query, regions_query, regionDic_query,\n pois_query, regionNotGo_query, poiNotGo_query, regionSorted_query,\n availableMonths_query, price_query, hotelRating_query,\n arrivalRegionId_query, departRegionId_query, startParts, endParts)\n",
"step-3": "from mysqlConnection import mysqlConnection\nimport yaml\nimport copy\nimport time\nimport csv\nimport json\nfrom collections import OrderedDict\nimport ast\n\n\ndef query_parse(GIVEN_QUERY):\n try:\n countryIds_query = list(map(lambda x: x['country_id'], GIVEN_QUERY[\n 'countries']))\n except:\n countryIds_query = None\n try:\n days_query = GIVEN_QUERY['days']\n except:\n days_query = None\n try:\n regions_query = GIVEN_QUERY['regions']\n except:\n regions_query = []\n try:\n regionDic_query = list(map(lambda x: {x['region_id']: x['day']},\n regions_query))\n except:\n regionDic_query = []\n try:\n pois_query = GIVEN_QUERY['pois']\n except:\n pois_query = []\n try:\n regionNotGo_query = GIVEN_QUERY['regionNotGo']\n except:\n regionNotGo_query = []\n try:\n poiNotGo_query = GIVEN_QUERY['poiNotGo']\n except:\n poiNotGo_query = []\n try:\n regionSorted_query = GIVEN_QUERY['regionSorted']\n except:\n regionSorted_query = []\n try:\n availableMonths_query = GIVEN_QUERY['availableMonths']\n except:\n availableMonths_query = []\n try:\n price_query = GIVEN_QUERY['price']\n except:\n price_query = None\n try:\n hotelRating_query = GIVEN_QUERY['hotelRating']\n except:\n hotelRating_query = None\n try:\n arrivalRegionId_query = GIVEN_QUERY['arrivalRegionId']\n except:\n arrivalRegionId_query = None\n try:\n departRegionId_query = GIVEN_QUERY['departRegionId']\n except:\n departRegionId_query = None\n connection = mysqlConnection()\n try:\n with connection.cursor() as cursor:\n if GIVEN_QUERY['countries']:\n if arrivalRegionId_query:\n sql = (\n \"SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)\"\n % (arrivalRegionId_query, str(countryIds_query)[1:-1])\n )\n else:\n sql = (\n \"SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)\"\n % str(countryIds_query)[1:-1])\n else:\n sql = (\n \"SELECT id FROM tidy_parts WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null \"\n )\n cursor.execute(sql)\n startParts = cursor.fetchall()\n if GIVEN_QUERY['countries']:\n if departRegionId_query:\n sql = (\n \"SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)\"\n % (departRegionId_query, str(countryIds_query)[1:-1]))\n else:\n sql = (\n \"SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)\"\n % str(countryIds_query)[1:-1])\n else:\n sql = (\n \"SELECT id FROM tidy_parts WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null \"\n )\n cursor.execute(sql)\n endParts = cursor.fetchall()\n finally:\n connection.close()\n startParts = [dict['id'] for dict in startParts]\n endParts = [dict['id'] for dict in endParts]\n return (countryIds_query, days_query, regions_query, regionDic_query,\n pois_query, regionNotGo_query, poiNotGo_query, regionSorted_query,\n availableMonths_query, price_query, hotelRating_query,\n arrivalRegionId_query, departRegionId_query, startParts, endParts)\n",
"step-4": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/6/20 下午4:00\n# @Author : Huang HUi\n# @Site : \n# @File : query_parse.py\n# @Software: PyCharm\nfrom mysqlConnection import mysqlConnection\nimport yaml\nimport copy\nimport time\nimport csv\nimport json\nfrom collections import OrderedDict\nimport ast\n\n#\n# GIVEN_QUERY = {'days': [10,14], 'countries': [{'country_id': 28, 'day': None}],\n# 'regions': [{'region_id': 2, 'day': None}, {'region_id': 27, 'day': None}, {'region_id': 69, 'day': None}], 'pois': [],\n# 'regionNotGo': [], 'poiNotGo': [], 'regionSorted': [135, 131], 'availableMonths': [],\n# 'price': [5000, 15000], 'hotelRating': None, 'arrivalRegionId': 27, 'departRegionId': None}\n\n# GIVEN_QUERY={'days': [10,13], 'countries': [{'country_id': 11, 'day': None}], 'regions': [{'region_id': 266, 'day': None},\n# {'region_id': 220, 'day': None}], 'pois': [795, 800,878,1536]}\n\n# GIVEN_QUERY={'days': [12], 'countries': [{'country_id': 28, 'day': None}],\n# 'regions': [{'region_id': 2, 'day': None}, {'region_id': 70, 'day': None}],\n# 'pois': [1361, 1380, 1382, 1385, 1386, 1413, 1512, 1700, 1701, 1712, 1713]}\n\ndef query_parse(GIVEN_QUERY):\n\n try:\n countryIds_query = list(map(lambda x: x['country_id'], GIVEN_QUERY['countries']))\n except :\n countryIds_query=None\n try:\n days_query=GIVEN_QUERY['days']\n except :\n days_query=None\n try:\n regions_query = GIVEN_QUERY['regions']\n except :\n regions_query=[]\n try:\n regionDic_query = list(map(lambda x: {x['region_id']: x['day']}, regions_query))\n except :\n regionDic_query=[]\n try:\n pois_query=GIVEN_QUERY['pois']\n except :\n pois_query=[]\n try:\n regionNotGo_query=GIVEN_QUERY['regionNotGo']\n except :\n regionNotGo_query=[]\n try:\n poiNotGo_query=GIVEN_QUERY['poiNotGo']\n except :\n poiNotGo_query=[]\n try:\n regionSorted_query=GIVEN_QUERY['regionSorted']\n except :\n regionSorted_query=[]\n try:\n availableMonths_query=GIVEN_QUERY['availableMonths']\n except :\n availableMonths_query=[]\n try:\n price_query=GIVEN_QUERY['price']\n except :\n price_query=None\n try:\n hotelRating_query=GIVEN_QUERY['hotelRating']\n except :\n hotelRating_query=None\n try:\n arrivalRegionId_query=GIVEN_QUERY['arrivalRegionId']\n except :\n arrivalRegionId_query=None\n try:\n departRegionId_query=GIVEN_QUERY['departRegionId']\n except:\n departRegionId_query=None\n\n\n connection=mysqlConnection()\n try:\n with connection.cursor() as cursor:\n\n if GIVEN_QUERY['countries']:\n # country condition\n if arrivalRegionId_query:\n sql = \"SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)\" % (arrivalRegionId_query,str(countryIds_query)[1:-1])\n else:\n sql = \"SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)\" % str(countryIds_query)[1:-1]\n else:\n # all\n sql = \"SELECT id FROM tidy_parts WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null \"\n cursor.execute(sql)\n startParts = cursor.fetchall()\n if GIVEN_QUERY['countries']:\n if departRegionId_query:\n sql = \"SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)\" % (departRegionId_query, str(countryIds_query)[1:-1])\n else:\n sql = \"SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)\" % str(countryIds_query)[1:-1]\n else:\n sql = \"SELECT id FROM tidy_parts WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null \"\n cursor.execute(sql)\n endParts = cursor.fetchall()\n\n\n\n\n finally:\n connection.close()\n\n startParts = [dict['id'] for dict in startParts]\n endParts = [dict['id'] for dict in endParts]\n\n\n return countryIds_query, days_query, regions_query, regionDic_query, \\\n pois_query, regionNotGo_query, poiNotGo_query, regionSorted_query, availableMonths_query, price_query, \\\n hotelRating_query, arrivalRegionId_query, departRegionId_query,startParts,endParts\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class max31865(object):
<|reserved_special_token_0|>
def __init__(self, csPin=8, misoPin=9, mosiPin=10, clkPin=11):
self.csPin = csPin
self.misoPin = misoPin
self.mosiPin = mosiPin
self.clkPin = clkPin
self.setupGPIO()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def writeRegister(self, regNum, dataByte):
GPIO.output(self.csPin, GPIO.LOW)
addressByte = 128 | regNum
self.sendByte(addressByte)
self.sendByte(dataByte)
GPIO.output(self.csPin, GPIO.HIGH)
def readRegisters(self, regNumStart, numRegisters):
out = []
GPIO.output(self.csPin, GPIO.LOW)
self.sendByte(regNumStart)
for byte in range(numRegisters):
data = self.recvByte()
out.append(data)
GPIO.output(self.csPin, GPIO.HIGH)
return out
def sendByte(self, byte):
for bit in range(8):
GPIO.output(self.clkPin, GPIO.HIGH)
if byte & 128:
GPIO.output(self.mosiPin, GPIO.HIGH)
else:
GPIO.output(self.mosiPin, GPIO.LOW)
byte <<= 1
GPIO.output(self.clkPin, GPIO.LOW)
def recvByte(self):
byte = 0
for bit in range(8):
GPIO.output(self.clkPin, GPIO.HIGH)
byte <<= 1
if GPIO.input(self.misoPin):
byte |= 1
GPIO.output(self.clkPin, GPIO.LOW)
return byte
def calcPT100Temp(self, RTD_ADC_Code):
R_REF = 430.0
Res0 = 100.0
a = 0.0039083
b = -5.775e-07
c = -4.18301e-12
Res_RTD = RTD_ADC_Code * R_REF / 32768.0
temp_C = -(a * Res0) + math.sqrt(a * a * Res0 * Res0 - 4 * (b *
Res0) * (Res0 - Res_RTD))
temp_C = temp_C / (2 * (b * Res0))
temp_C_line = RTD_ADC_Code / 32.0 - 256.0
if temp_C < 0:
temp_C = temp_C_line
return temp_C
class FaultError(Exception):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class max31865(object):
<|reserved_special_token_0|>
def __init__(self, csPin=8, misoPin=9, mosiPin=10, clkPin=11):
self.csPin = csPin
self.misoPin = misoPin
self.mosiPin = mosiPin
self.clkPin = clkPin
self.setupGPIO()
def setupGPIO(self):
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.csPin, GPIO.OUT)
GPIO.setup(self.misoPin, GPIO.IN)
GPIO.setup(self.mosiPin, GPIO.OUT)
GPIO.setup(self.clkPin, GPIO.OUT)
GPIO.output(self.csPin, GPIO.HIGH)
GPIO.output(self.clkPin, GPIO.LOW)
GPIO.output(self.mosiPin, GPIO.LOW)
<|reserved_special_token_0|>
def writeRegister(self, regNum, dataByte):
GPIO.output(self.csPin, GPIO.LOW)
addressByte = 128 | regNum
self.sendByte(addressByte)
self.sendByte(dataByte)
GPIO.output(self.csPin, GPIO.HIGH)
def readRegisters(self, regNumStart, numRegisters):
out = []
GPIO.output(self.csPin, GPIO.LOW)
self.sendByte(regNumStart)
for byte in range(numRegisters):
data = self.recvByte()
out.append(data)
GPIO.output(self.csPin, GPIO.HIGH)
return out
def sendByte(self, byte):
for bit in range(8):
GPIO.output(self.clkPin, GPIO.HIGH)
if byte & 128:
GPIO.output(self.mosiPin, GPIO.HIGH)
else:
GPIO.output(self.mosiPin, GPIO.LOW)
byte <<= 1
GPIO.output(self.clkPin, GPIO.LOW)
def recvByte(self):
byte = 0
for bit in range(8):
GPIO.output(self.clkPin, GPIO.HIGH)
byte <<= 1
if GPIO.input(self.misoPin):
byte |= 1
GPIO.output(self.clkPin, GPIO.LOW)
return byte
def calcPT100Temp(self, RTD_ADC_Code):
R_REF = 430.0
Res0 = 100.0
a = 0.0039083
b = -5.775e-07
c = -4.18301e-12
Res_RTD = RTD_ADC_Code * R_REF / 32768.0
temp_C = -(a * Res0) + math.sqrt(a * a * Res0 * Res0 - 4 * (b *
Res0) * (Res0 - Res_RTD))
temp_C = temp_C / (2 * (b * Res0))
temp_C_line = RTD_ADC_Code / 32.0 - 256.0
if temp_C < 0:
temp_C = temp_C_line
return temp_C
class FaultError(Exception):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class max31865(object):
"""Reading Temperature from the MAX31865 with GPIO using
the Raspberry Pi. Any pins can be used.
Numpy can be used to completely solve the Callendar-Van Dusen equation
but it slows the temp reading down. I commented it out in the code.
Both the quadratic formula using Callendar-Van Dusen equation (ignoring the
3rd and 4th degree parts of the polynomial) and the straight line approx.
temperature is calculated with the quadratic formula one being the most accurate.
"""
def __init__(self, csPin=8, misoPin=9, mosiPin=10, clkPin=11):
self.csPin = csPin
self.misoPin = misoPin
self.mosiPin = mosiPin
self.clkPin = clkPin
self.setupGPIO()
def setupGPIO(self):
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.csPin, GPIO.OUT)
GPIO.setup(self.misoPin, GPIO.IN)
GPIO.setup(self.mosiPin, GPIO.OUT)
GPIO.setup(self.clkPin, GPIO.OUT)
GPIO.output(self.csPin, GPIO.HIGH)
GPIO.output(self.clkPin, GPIO.LOW)
GPIO.output(self.mosiPin, GPIO.LOW)
def readTemp(self):
self.writeRegister(0, 178)
time.sleep(0.1)
out = self.readRegisters(0, 8)
conf_reg = out[0]
[rtd_msb, rtd_lsb] = [out[1], out[2]]
rtd_ADC_Code = (rtd_msb << 8 | rtd_lsb) >> 1
temp_C = self.calcPT100Temp(rtd_ADC_Code)
[hft_msb, hft_lsb] = [out[3], out[4]]
hft = (hft_msb << 8 | hft_lsb) >> 1
[lft_msb, lft_lsb] = [out[5], out[6]]
lft = (lft_msb << 8 | lft_lsb) >> 1
status = out[7]
if status & 128 == 1:
raise FaultError('High threshold limit (Cable fault/open)')
if status & 64 == 1:
raise FaultError('Low threshold limit (Cable fault/short)')
if status & 4 == 1:
raise FaultError('Overvoltage or Undervoltage Error')
return temp_C
def writeRegister(self, regNum, dataByte):
GPIO.output(self.csPin, GPIO.LOW)
addressByte = 128 | regNum
self.sendByte(addressByte)
self.sendByte(dataByte)
GPIO.output(self.csPin, GPIO.HIGH)
def readRegisters(self, regNumStart, numRegisters):
out = []
GPIO.output(self.csPin, GPIO.LOW)
self.sendByte(regNumStart)
for byte in range(numRegisters):
data = self.recvByte()
out.append(data)
GPIO.output(self.csPin, GPIO.HIGH)
return out
def sendByte(self, byte):
for bit in range(8):
GPIO.output(self.clkPin, GPIO.HIGH)
if byte & 128:
GPIO.output(self.mosiPin, GPIO.HIGH)
else:
GPIO.output(self.mosiPin, GPIO.LOW)
byte <<= 1
GPIO.output(self.clkPin, GPIO.LOW)
def recvByte(self):
byte = 0
for bit in range(8):
GPIO.output(self.clkPin, GPIO.HIGH)
byte <<= 1
if GPIO.input(self.misoPin):
byte |= 1
GPIO.output(self.clkPin, GPIO.LOW)
return byte
def calcPT100Temp(self, RTD_ADC_Code):
R_REF = 430.0
Res0 = 100.0
a = 0.0039083
b = -5.775e-07
c = -4.18301e-12
Res_RTD = RTD_ADC_Code * R_REF / 32768.0
temp_C = -(a * Res0) + math.sqrt(a * a * Res0 * Res0 - 4 * (b *
Res0) * (Res0 - Res_RTD))
temp_C = temp_C / (2 * (b * Res0))
temp_C_line = RTD_ADC_Code / 32.0 - 256.0
if temp_C < 0:
temp_C = temp_C_line
return temp_C
class FaultError(Exception):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class max31865(object):
"""Reading Temperature from the MAX31865 with GPIO using
the Raspberry Pi. Any pins can be used.
Numpy can be used to completely solve the Callendar-Van Dusen equation
but it slows the temp reading down. I commented it out in the code.
Both the quadratic formula using Callendar-Van Dusen equation (ignoring the
3rd and 4th degree parts of the polynomial) and the straight line approx.
temperature is calculated with the quadratic formula one being the most accurate.
"""
def __init__(self, csPin=8, misoPin=9, mosiPin=10, clkPin=11):
self.csPin = csPin
self.misoPin = misoPin
self.mosiPin = mosiPin
self.clkPin = clkPin
self.setupGPIO()
def setupGPIO(self):
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.csPin, GPIO.OUT)
GPIO.setup(self.misoPin, GPIO.IN)
GPIO.setup(self.mosiPin, GPIO.OUT)
GPIO.setup(self.clkPin, GPIO.OUT)
GPIO.output(self.csPin, GPIO.HIGH)
GPIO.output(self.clkPin, GPIO.LOW)
GPIO.output(self.mosiPin, GPIO.LOW)
def readTemp(self):
self.writeRegister(0, 178)
time.sleep(0.1)
out = self.readRegisters(0, 8)
conf_reg = out[0]
[rtd_msb, rtd_lsb] = [out[1], out[2]]
rtd_ADC_Code = (rtd_msb << 8 | rtd_lsb) >> 1
temp_C = self.calcPT100Temp(rtd_ADC_Code)
[hft_msb, hft_lsb] = [out[3], out[4]]
hft = (hft_msb << 8 | hft_lsb) >> 1
[lft_msb, lft_lsb] = [out[5], out[6]]
lft = (lft_msb << 8 | lft_lsb) >> 1
status = out[7]
if status & 128 == 1:
raise FaultError('High threshold limit (Cable fault/open)')
if status & 64 == 1:
raise FaultError('Low threshold limit (Cable fault/short)')
if status & 4 == 1:
raise FaultError('Overvoltage or Undervoltage Error')
return temp_C
def writeRegister(self, regNum, dataByte):
GPIO.output(self.csPin, GPIO.LOW)
addressByte = 128 | regNum
self.sendByte(addressByte)
self.sendByte(dataByte)
GPIO.output(self.csPin, GPIO.HIGH)
def readRegisters(self, regNumStart, numRegisters):
out = []
GPIO.output(self.csPin, GPIO.LOW)
self.sendByte(regNumStart)
for byte in range(numRegisters):
data = self.recvByte()
out.append(data)
GPIO.output(self.csPin, GPIO.HIGH)
return out
def sendByte(self, byte):
for bit in range(8):
GPIO.output(self.clkPin, GPIO.HIGH)
if byte & 128:
GPIO.output(self.mosiPin, GPIO.HIGH)
else:
GPIO.output(self.mosiPin, GPIO.LOW)
byte <<= 1
GPIO.output(self.clkPin, GPIO.LOW)
def recvByte(self):
byte = 0
for bit in range(8):
GPIO.output(self.clkPin, GPIO.HIGH)
byte <<= 1
if GPIO.input(self.misoPin):
byte |= 1
GPIO.output(self.clkPin, GPIO.LOW)
return byte
def calcPT100Temp(self, RTD_ADC_Code):
R_REF = 430.0
Res0 = 100.0
a = 0.0039083
b = -5.775e-07
c = -4.18301e-12
Res_RTD = RTD_ADC_Code * R_REF / 32768.0
temp_C = -(a * Res0) + math.sqrt(a * a * Res0 * Res0 - 4 * (b *
Res0) * (Res0 - Res_RTD))
temp_C = temp_C / (2 * (b * Res0))
temp_C_line = RTD_ADC_Code / 32.0 - 256.0
if temp_C < 0:
temp_C = temp_C_line
return temp_C
class FaultError(Exception):
pass
if __name__ == '__main__':
try:
csPin = 24
misoPin = 21
mosiPin = 17
clkPin = 23
max = max31865(csPin, misoPin, mosiPin, clkPin)
while True:
tempC = max.readTemp()
print(tempC)
time.sleep(0.1)
except KeyboardInterrupt:
pass
GPIO.cleanup()
<|reserved_special_token_1|>
#!/usr/bin/python
#The MIT License (MIT)
#
#Copyright (c) 2015 Stephen P. Smith
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import time, math
import RPi.GPIO as GPIO
#import numpy
class max31865(object):
"""Reading Temperature from the MAX31865 with GPIO using
the Raspberry Pi. Any pins can be used.
Numpy can be used to completely solve the Callendar-Van Dusen equation
but it slows the temp reading down. I commented it out in the code.
Both the quadratic formula using Callendar-Van Dusen equation (ignoring the
3rd and 4th degree parts of the polynomial) and the straight line approx.
temperature is calculated with the quadratic formula one being the most accurate.
"""
def __init__(self, csPin = 8, misoPin = 9, mosiPin = 10, clkPin = 11):
self.csPin = csPin
self.misoPin = misoPin
self.mosiPin = mosiPin
self.clkPin = clkPin
self.setupGPIO()
def setupGPIO(self):
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.csPin, GPIO.OUT)
GPIO.setup(self.misoPin, GPIO.IN)
GPIO.setup(self.mosiPin, GPIO.OUT)
GPIO.setup(self.clkPin, GPIO.OUT)
GPIO.output(self.csPin, GPIO.HIGH)
GPIO.output(self.clkPin, GPIO.LOW)
GPIO.output(self.mosiPin, GPIO.LOW)
def readTemp(self):
#
# b10000000 = 0x80
# 0x8x to specify 'write register value'
# 0xx0 to specify 'configuration register'
#
# 0b10110010 = 0xB2
# Config Register
# ---------------
# bit 7: Vbias -> 1 (ON)
# bit 6: Conversion Mode -> 0 (MANUAL)
# bit5: 1-shot ->1 (ON)
# bit4: 3-wire select -> 1 (3 wire config)
# bits 3-2: fault detection cycle -> 0 (none)
# bit 1: fault status clear -> 1 (clear any fault)
# bit 0: 50/60 Hz filter select -> 0 (60Hz)
#
# 0b11010010 or 0xD2 for continuous auto conversion
# at 60Hz (faster conversion)
#
#one shot
self.writeRegister(0, 0xB2)
# conversion time is less than 100ms
time.sleep(.1) #give it 100ms for conversion
# read all registers
out = self.readRegisters(0,8)
conf_reg = out[0]
# print("config register byte: %x" % conf_reg)
[rtd_msb, rtd_lsb] = [out[1], out[2]]
rtd_ADC_Code = (( rtd_msb << 8 ) | rtd_lsb ) >> 1
temp_C = self.calcPT100Temp(rtd_ADC_Code)
[hft_msb, hft_lsb] = [out[3], out[4]]
hft = (( hft_msb << 8 ) | hft_lsb ) >> 1
# print("high fault threshold: %d" % hft)
[lft_msb, lft_lsb] = [out[5], out[6]]
lft = (( lft_msb << 8 ) | lft_lsb ) >> 1
# print("low fault threshold: %d" % lft)
status = out[7]
#
# 10 Mohm resistor is on breakout board to help
# detect cable faults
# bit 7: RTD High Threshold / cable fault open
# bit 6: RTD Low Threshold / cable fault short
# bit 5: REFIN- > 0.85 x VBias -> must be requested
# bit 4: REFIN- < 0.85 x VBias (FORCE- open) -> must be requested
# bit 3: RTDIN- < 0.85 x VBias (FORCE- open) -> must be requested
# bit 2: Overvoltage / undervoltage fault
# bits 1,0 don't care
#print "Status byte: %x" % status
if ((status & 0x80) == 1):
raise FaultError("High threshold limit (Cable fault/open)")
if ((status & 0x40) == 1):
raise FaultError("Low threshold limit (Cable fault/short)")
if ((status & 0x04) == 1):
raise FaultError("Overvoltage or Undervoltage Error")
return temp_C
def writeRegister(self, regNum, dataByte):
GPIO.output(self.csPin, GPIO.LOW)
# 0x8x to specify 'write register value'
addressByte = 0x80 | regNum;
# first byte is address byte
self.sendByte(addressByte)
# the rest are data bytes
self.sendByte(dataByte)
GPIO.output(self.csPin, GPIO.HIGH)
def readRegisters(self, regNumStart, numRegisters):
out = []
GPIO.output(self.csPin, GPIO.LOW)
# 0x to specify 'read register value'
self.sendByte(regNumStart)
for byte in range(numRegisters):
data = self.recvByte()
out.append(data)
GPIO.output(self.csPin, GPIO.HIGH)
return out
def sendByte(self,byte):
for bit in range(8):
GPIO.output(self.clkPin, GPIO.HIGH)
if (byte & 0x80):
GPIO.output(self.mosiPin, GPIO.HIGH)
else:
GPIO.output(self.mosiPin, GPIO.LOW)
byte <<= 1
GPIO.output(self.clkPin, GPIO.LOW)
def recvByte(self):
byte = 0x00
for bit in range(8):
GPIO.output(self.clkPin, GPIO.HIGH)
byte <<= 1
if GPIO.input(self.misoPin):
byte |= 0x1
GPIO.output(self.clkPin, GPIO.LOW)
return byte
def calcPT100Temp(self, RTD_ADC_Code):
R_REF = 430.0 # Reference Resistor
Res0 = 100.0; # Resistance at 0 degC for 400ohm R_Ref
a = .00390830
b = -.000000577500
# c = -4.18301e-12 # for -200 <= T <= 0 (degC)
c = -0.00000000000418301
# c = 0 # for 0 <= T <= 850 (degC)
#print("RTD ADC Code: %d" % RTD_ADC_Code)
Res_RTD = (RTD_ADC_Code * R_REF) / 32768.0 # PT100 Resistance
#print("PT100 Resistance: %f ohms" % Res_RTD)
#
# Callendar-Van Dusen equation
# Res_RTD = Res0 * (1 + a*T + b*T**2 + c*(T-100)*T**3)
# Res_RTD = Res0 + a*Res0*T + b*Res0*T**2 # c = 0
# (c*Res0)T**4 - (c*Res0)*100*T**3
# + (b*Res0)*T**2 + (a*Res0)*T + (Res0 - Res_RTD) = 0
#
# quadratic formula:
# for 0 <= T <= 850 (degC)
temp_C = -(a*Res0) + math.sqrt(a*a*Res0*Res0 - 4*(b*Res0)*(Res0 - Res_RTD))
temp_C = temp_C / (2*(b*Res0))
temp_C_line = (RTD_ADC_Code/32.0) - 256.0
# removing numpy.roots will greatly speed things up
#temp_C_numpy = numpy.roots([c*Res0, -c*Res0*100, b*Res0, a*Res0, (Res0 - Res_RTD)])
#temp_C_numpy = abs(temp_C_numpy[-1])
#print("Straight Line Approx. Temp: %f degC" % temp_C_line)
#print("Callendar-Van Dusen Temp (degC > 0): %f degC" % temp_C)
#print "Solving Full Callendar-Van Dusen using numpy: %f" % temp_C_numpy
if (temp_C < 0): #use straight line approximation if less than 0
# Can also use python lib numpy to solve cubic
# Should never get here in this application
temp_C = temp_C_line
return temp_C
class FaultError(Exception):
pass
if __name__ == "__main__":
try:
csPin = 24
misoPin = 21
mosiPin = 17
clkPin = 23
max = max31865(csPin,misoPin,mosiPin,clkPin)
while True:
tempC = max.readTemp()
print(tempC)
time.sleep(0.1)
except KeyboardInterrupt:
pass
GPIO.cleanup()
|
flexible
|
{
"blob_id": "5d92c68e0fe7f37d4719fb9ca4274b29ff1cbb43",
"index": 4699,
"step-1": "<mask token>\n\n\nclass max31865(object):\n <mask token>\n\n def __init__(self, csPin=8, misoPin=9, mosiPin=10, clkPin=11):\n self.csPin = csPin\n self.misoPin = misoPin\n self.mosiPin = mosiPin\n self.clkPin = clkPin\n self.setupGPIO()\n <mask token>\n <mask token>\n\n def writeRegister(self, regNum, dataByte):\n GPIO.output(self.csPin, GPIO.LOW)\n addressByte = 128 | regNum\n self.sendByte(addressByte)\n self.sendByte(dataByte)\n GPIO.output(self.csPin, GPIO.HIGH)\n\n def readRegisters(self, regNumStart, numRegisters):\n out = []\n GPIO.output(self.csPin, GPIO.LOW)\n self.sendByte(regNumStart)\n for byte in range(numRegisters):\n data = self.recvByte()\n out.append(data)\n GPIO.output(self.csPin, GPIO.HIGH)\n return out\n\n def sendByte(self, byte):\n for bit in range(8):\n GPIO.output(self.clkPin, GPIO.HIGH)\n if byte & 128:\n GPIO.output(self.mosiPin, GPIO.HIGH)\n else:\n GPIO.output(self.mosiPin, GPIO.LOW)\n byte <<= 1\n GPIO.output(self.clkPin, GPIO.LOW)\n\n def recvByte(self):\n byte = 0\n for bit in range(8):\n GPIO.output(self.clkPin, GPIO.HIGH)\n byte <<= 1\n if GPIO.input(self.misoPin):\n byte |= 1\n GPIO.output(self.clkPin, GPIO.LOW)\n return byte\n\n def calcPT100Temp(self, RTD_ADC_Code):\n R_REF = 430.0\n Res0 = 100.0\n a = 0.0039083\n b = -5.775e-07\n c = -4.18301e-12\n Res_RTD = RTD_ADC_Code * R_REF / 32768.0\n temp_C = -(a * Res0) + math.sqrt(a * a * Res0 * Res0 - 4 * (b *\n Res0) * (Res0 - Res_RTD))\n temp_C = temp_C / (2 * (b * Res0))\n temp_C_line = RTD_ADC_Code / 32.0 - 256.0\n if temp_C < 0:\n temp_C = temp_C_line\n return temp_C\n\n\nclass FaultError(Exception):\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass max31865(object):\n <mask token>\n\n def __init__(self, csPin=8, misoPin=9, mosiPin=10, clkPin=11):\n self.csPin = csPin\n self.misoPin = misoPin\n self.mosiPin = mosiPin\n self.clkPin = clkPin\n self.setupGPIO()\n\n def setupGPIO(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.csPin, GPIO.OUT)\n GPIO.setup(self.misoPin, GPIO.IN)\n GPIO.setup(self.mosiPin, GPIO.OUT)\n GPIO.setup(self.clkPin, GPIO.OUT)\n GPIO.output(self.csPin, GPIO.HIGH)\n GPIO.output(self.clkPin, GPIO.LOW)\n GPIO.output(self.mosiPin, GPIO.LOW)\n <mask token>\n\n def writeRegister(self, regNum, dataByte):\n GPIO.output(self.csPin, GPIO.LOW)\n addressByte = 128 | regNum\n self.sendByte(addressByte)\n self.sendByte(dataByte)\n GPIO.output(self.csPin, GPIO.HIGH)\n\n def readRegisters(self, regNumStart, numRegisters):\n out = []\n GPIO.output(self.csPin, GPIO.LOW)\n self.sendByte(regNumStart)\n for byte in range(numRegisters):\n data = self.recvByte()\n out.append(data)\n GPIO.output(self.csPin, GPIO.HIGH)\n return out\n\n def sendByte(self, byte):\n for bit in range(8):\n GPIO.output(self.clkPin, GPIO.HIGH)\n if byte & 128:\n GPIO.output(self.mosiPin, GPIO.HIGH)\n else:\n GPIO.output(self.mosiPin, GPIO.LOW)\n byte <<= 1\n GPIO.output(self.clkPin, GPIO.LOW)\n\n def recvByte(self):\n byte = 0\n for bit in range(8):\n GPIO.output(self.clkPin, GPIO.HIGH)\n byte <<= 1\n if GPIO.input(self.misoPin):\n byte |= 1\n GPIO.output(self.clkPin, GPIO.LOW)\n return byte\n\n def calcPT100Temp(self, RTD_ADC_Code):\n R_REF = 430.0\n Res0 = 100.0\n a = 0.0039083\n b = -5.775e-07\n c = -4.18301e-12\n Res_RTD = RTD_ADC_Code * R_REF / 32768.0\n temp_C = -(a * Res0) + math.sqrt(a * a * Res0 * Res0 - 4 * (b *\n Res0) * (Res0 - Res_RTD))\n temp_C = temp_C / (2 * (b * Res0))\n temp_C_line = RTD_ADC_Code / 32.0 - 256.0\n if temp_C < 0:\n temp_C = temp_C_line\n return temp_C\n\n\nclass FaultError(Exception):\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass max31865(object):\n \"\"\"Reading Temperature from the MAX31865 with GPIO using \n\t the Raspberry Pi. Any pins can be used.\n\t Numpy can be used to completely solve the Callendar-Van Dusen equation \n\t but it slows the temp reading down. I commented it out in the code. \n\t Both the quadratic formula using Callendar-Van Dusen equation (ignoring the\n\t 3rd and 4th degree parts of the polynomial) and the straight line approx.\n\t temperature is calculated with the quadratic formula one being the most accurate.\n\t\"\"\"\n\n def __init__(self, csPin=8, misoPin=9, mosiPin=10, clkPin=11):\n self.csPin = csPin\n self.misoPin = misoPin\n self.mosiPin = mosiPin\n self.clkPin = clkPin\n self.setupGPIO()\n\n def setupGPIO(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.csPin, GPIO.OUT)\n GPIO.setup(self.misoPin, GPIO.IN)\n GPIO.setup(self.mosiPin, GPIO.OUT)\n GPIO.setup(self.clkPin, GPIO.OUT)\n GPIO.output(self.csPin, GPIO.HIGH)\n GPIO.output(self.clkPin, GPIO.LOW)\n GPIO.output(self.mosiPin, GPIO.LOW)\n\n def readTemp(self):\n self.writeRegister(0, 178)\n time.sleep(0.1)\n out = self.readRegisters(0, 8)\n conf_reg = out[0]\n [rtd_msb, rtd_lsb] = [out[1], out[2]]\n rtd_ADC_Code = (rtd_msb << 8 | rtd_lsb) >> 1\n temp_C = self.calcPT100Temp(rtd_ADC_Code)\n [hft_msb, hft_lsb] = [out[3], out[4]]\n hft = (hft_msb << 8 | hft_lsb) >> 1\n [lft_msb, lft_lsb] = [out[5], out[6]]\n lft = (lft_msb << 8 | lft_lsb) >> 1\n status = out[7]\n if status & 128 == 1:\n raise FaultError('High threshold limit (Cable fault/open)')\n if status & 64 == 1:\n raise FaultError('Low threshold limit (Cable fault/short)')\n if status & 4 == 1:\n raise FaultError('Overvoltage or Undervoltage Error')\n return temp_C\n\n def writeRegister(self, regNum, dataByte):\n GPIO.output(self.csPin, GPIO.LOW)\n addressByte = 128 | regNum\n self.sendByte(addressByte)\n self.sendByte(dataByte)\n GPIO.output(self.csPin, GPIO.HIGH)\n\n def readRegisters(self, regNumStart, numRegisters):\n out = []\n GPIO.output(self.csPin, GPIO.LOW)\n self.sendByte(regNumStart)\n for byte in range(numRegisters):\n data = self.recvByte()\n out.append(data)\n GPIO.output(self.csPin, GPIO.HIGH)\n return out\n\n def sendByte(self, byte):\n for bit in range(8):\n GPIO.output(self.clkPin, GPIO.HIGH)\n if byte & 128:\n GPIO.output(self.mosiPin, GPIO.HIGH)\n else:\n GPIO.output(self.mosiPin, GPIO.LOW)\n byte <<= 1\n GPIO.output(self.clkPin, GPIO.LOW)\n\n def recvByte(self):\n byte = 0\n for bit in range(8):\n GPIO.output(self.clkPin, GPIO.HIGH)\n byte <<= 1\n if GPIO.input(self.misoPin):\n byte |= 1\n GPIO.output(self.clkPin, GPIO.LOW)\n return byte\n\n def calcPT100Temp(self, RTD_ADC_Code):\n R_REF = 430.0\n Res0 = 100.0\n a = 0.0039083\n b = -5.775e-07\n c = -4.18301e-12\n Res_RTD = RTD_ADC_Code * R_REF / 32768.0\n temp_C = -(a * Res0) + math.sqrt(a * a * Res0 * Res0 - 4 * (b *\n Res0) * (Res0 - Res_RTD))\n temp_C = temp_C / (2 * (b * Res0))\n temp_C_line = RTD_ADC_Code / 32.0 - 256.0\n if temp_C < 0:\n temp_C = temp_C_line\n return temp_C\n\n\nclass FaultError(Exception):\n pass\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass max31865(object):\n \"\"\"Reading Temperature from the MAX31865 with GPIO using \n\t the Raspberry Pi. Any pins can be used.\n\t Numpy can be used to completely solve the Callendar-Van Dusen equation \n\t but it slows the temp reading down. I commented it out in the code. \n\t Both the quadratic formula using Callendar-Van Dusen equation (ignoring the\n\t 3rd and 4th degree parts of the polynomial) and the straight line approx.\n\t temperature is calculated with the quadratic formula one being the most accurate.\n\t\"\"\"\n\n def __init__(self, csPin=8, misoPin=9, mosiPin=10, clkPin=11):\n self.csPin = csPin\n self.misoPin = misoPin\n self.mosiPin = mosiPin\n self.clkPin = clkPin\n self.setupGPIO()\n\n def setupGPIO(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.csPin, GPIO.OUT)\n GPIO.setup(self.misoPin, GPIO.IN)\n GPIO.setup(self.mosiPin, GPIO.OUT)\n GPIO.setup(self.clkPin, GPIO.OUT)\n GPIO.output(self.csPin, GPIO.HIGH)\n GPIO.output(self.clkPin, GPIO.LOW)\n GPIO.output(self.mosiPin, GPIO.LOW)\n\n def readTemp(self):\n self.writeRegister(0, 178)\n time.sleep(0.1)\n out = self.readRegisters(0, 8)\n conf_reg = out[0]\n [rtd_msb, rtd_lsb] = [out[1], out[2]]\n rtd_ADC_Code = (rtd_msb << 8 | rtd_lsb) >> 1\n temp_C = self.calcPT100Temp(rtd_ADC_Code)\n [hft_msb, hft_lsb] = [out[3], out[4]]\n hft = (hft_msb << 8 | hft_lsb) >> 1\n [lft_msb, lft_lsb] = [out[5], out[6]]\n lft = (lft_msb << 8 | lft_lsb) >> 1\n status = out[7]\n if status & 128 == 1:\n raise FaultError('High threshold limit (Cable fault/open)')\n if status & 64 == 1:\n raise FaultError('Low threshold limit (Cable fault/short)')\n if status & 4 == 1:\n raise FaultError('Overvoltage or Undervoltage Error')\n return temp_C\n\n def writeRegister(self, regNum, dataByte):\n GPIO.output(self.csPin, GPIO.LOW)\n addressByte = 128 | regNum\n self.sendByte(addressByte)\n self.sendByte(dataByte)\n GPIO.output(self.csPin, GPIO.HIGH)\n\n def readRegisters(self, regNumStart, numRegisters):\n out = []\n GPIO.output(self.csPin, GPIO.LOW)\n self.sendByte(regNumStart)\n for byte in range(numRegisters):\n data = self.recvByte()\n out.append(data)\n GPIO.output(self.csPin, GPIO.HIGH)\n return out\n\n def sendByte(self, byte):\n for bit in range(8):\n GPIO.output(self.clkPin, GPIO.HIGH)\n if byte & 128:\n GPIO.output(self.mosiPin, GPIO.HIGH)\n else:\n GPIO.output(self.mosiPin, GPIO.LOW)\n byte <<= 1\n GPIO.output(self.clkPin, GPIO.LOW)\n\n def recvByte(self):\n byte = 0\n for bit in range(8):\n GPIO.output(self.clkPin, GPIO.HIGH)\n byte <<= 1\n if GPIO.input(self.misoPin):\n byte |= 1\n GPIO.output(self.clkPin, GPIO.LOW)\n return byte\n\n def calcPT100Temp(self, RTD_ADC_Code):\n R_REF = 430.0\n Res0 = 100.0\n a = 0.0039083\n b = -5.775e-07\n c = -4.18301e-12\n Res_RTD = RTD_ADC_Code * R_REF / 32768.0\n temp_C = -(a * Res0) + math.sqrt(a * a * Res0 * Res0 - 4 * (b *\n Res0) * (Res0 - Res_RTD))\n temp_C = temp_C / (2 * (b * Res0))\n temp_C_line = RTD_ADC_Code / 32.0 - 256.0\n if temp_C < 0:\n temp_C = temp_C_line\n return temp_C\n\n\nclass FaultError(Exception):\n pass\n\n\nif __name__ == '__main__':\n try:\n csPin = 24\n misoPin = 21\n mosiPin = 17\n clkPin = 23\n max = max31865(csPin, misoPin, mosiPin, clkPin)\n while True:\n tempC = max.readTemp()\n print(tempC)\n time.sleep(0.1)\n except KeyboardInterrupt:\n pass\n GPIO.cleanup()\n",
"step-5": "#!/usr/bin/python\n#The MIT License (MIT)\n#\n#Copyright (c) 2015 Stephen P. Smith\n#\n#Permission is hereby granted, free of charge, to any person obtaining a copy\n#of this software and associated documentation files (the \"Software\"), to deal\n#in the Software without restriction, including without limitation the rights\n#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n#copies of the Software, and to permit persons to whom the Software is\n#furnished to do so, subject to the following conditions:\n#\n#The above copyright notice and this permission notice shall be included in all\n#copies or substantial portions of the Software.\n#\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n#SOFTWARE.\n\nimport time, math\nimport RPi.GPIO as GPIO\n#import numpy\n\nclass max31865(object):\n\t\"\"\"Reading Temperature from the MAX31865 with GPIO using \n\t the Raspberry Pi. Any pins can be used.\n\t Numpy can be used to completely solve the Callendar-Van Dusen equation \n\t but it slows the temp reading down. I commented it out in the code. \n\t Both the quadratic formula using Callendar-Van Dusen equation (ignoring the\n\t 3rd and 4th degree parts of the polynomial) and the straight line approx.\n\t temperature is calculated with the quadratic formula one being the most accurate.\n\t\"\"\"\n\tdef __init__(self, csPin = 8, misoPin = 9, mosiPin = 10, clkPin = 11):\n\t\tself.csPin = csPin\n\t\tself.misoPin = misoPin\n\t\tself.mosiPin = mosiPin\n\t\tself.clkPin = clkPin\n\t\tself.setupGPIO()\n\t\t\n\tdef setupGPIO(self):\n\t\tGPIO.setwarnings(False)\n\t\tGPIO.setmode(GPIO.BCM)\n\t\tGPIO.setup(self.csPin, GPIO.OUT)\n\t\tGPIO.setup(self.misoPin, GPIO.IN)\n\t\tGPIO.setup(self.mosiPin, GPIO.OUT)\n\t\tGPIO.setup(self.clkPin, GPIO.OUT)\n\n\t\tGPIO.output(self.csPin, GPIO.HIGH)\n\t\tGPIO.output(self.clkPin, GPIO.LOW)\n\t\tGPIO.output(self.mosiPin, GPIO.LOW)\n\n\tdef readTemp(self):\n\t\t#\n\t\t# b10000000 = 0x80\n\t\t# 0x8x to specify 'write register value'\n\t\t# 0xx0 to specify 'configuration register'\n\t\t#\n\t\t# 0b10110010 = 0xB2\n\t\t# Config Register\n\t\t# ---------------\n\t\t# bit 7: Vbias -> 1 (ON)\n\t\t# bit 6: Conversion Mode -> 0 (MANUAL)\n\t\t# bit5: 1-shot ->1 (ON)\n\t\t# bit4: 3-wire select -> 1 (3 wire config)\n\t\t# bits 3-2: fault detection cycle -> 0 (none)\n\t\t# bit 1: fault status clear -> 1 (clear any fault)\n\t\t# bit 0: 50/60 Hz filter select -> 0 (60Hz)\n\t\t#\n\t\t# 0b11010010 or 0xD2 for continuous auto conversion \n\t\t# at 60Hz (faster conversion)\n\t\t#\n\n\t\t#one shot\n\t\tself.writeRegister(0, 0xB2)\n\n\t\t# conversion time is less than 100ms\n\t\ttime.sleep(.1) #give it 100ms for conversion\n\n\t\t# read all registers\n\t\tout = self.readRegisters(0,8)\n\n\t\tconf_reg = out[0]\n\t\t# print(\"config register byte: %x\" % conf_reg)\n\n\t\t[rtd_msb, rtd_lsb] = [out[1], out[2]]\n\t\trtd_ADC_Code = (( rtd_msb << 8 ) | rtd_lsb ) >> 1\n\n\t\ttemp_C = self.calcPT100Temp(rtd_ADC_Code)\n\n\t\t[hft_msb, hft_lsb] = [out[3], out[4]]\n\t\thft = (( hft_msb << 8 ) | hft_lsb ) >> 1\n\t\t# print(\"high fault threshold: %d\" % hft)\n\n\t\t[lft_msb, lft_lsb] = [out[5], out[6]]\n\t\tlft = (( lft_msb << 8 ) | lft_lsb ) >> 1\n\t\t# print(\"low fault threshold: %d\" % lft)\n\n\t\tstatus = out[7]\n\t\t#\n\t\t# 10 Mohm resistor is on breakout board to help\n\t\t# detect cable faults\n\t\t# bit 7: RTD High Threshold / cable fault open \n\t\t# bit 6: RTD Low Threshold / cable fault short\n\t\t# bit 5: REFIN- > 0.85 x VBias -> must be requested\n\t\t# bit 4: REFIN- < 0.85 x VBias (FORCE- open) -> must be requested\n\t\t# bit 3: RTDIN- < 0.85 x VBias (FORCE- open) -> must be requested\n\t\t# bit 2: Overvoltage / undervoltage fault\n\t\t# bits 1,0 don't care\t\n\t\t#print \"Status byte: %x\" % status\n\n\t\tif ((status & 0x80) == 1):\n\t\t\traise FaultError(\"High threshold limit (Cable fault/open)\")\n\t\tif ((status & 0x40) == 1):\n\t\t\traise FaultError(\"Low threshold limit (Cable fault/short)\")\n\t\tif ((status & 0x04) == 1):\n\t\t\traise FaultError(\"Overvoltage or Undervoltage Error\") \n\n\t\treturn temp_C\n\t\t\n\tdef writeRegister(self, regNum, dataByte):\n\t\tGPIO.output(self.csPin, GPIO.LOW)\n\t\t\n\t\t# 0x8x to specify 'write register value'\n\t\taddressByte = 0x80 | regNum;\n\t\t\n\t\t# first byte is address byte\n\t\tself.sendByte(addressByte)\n\t\t# the rest are data bytes\n\t\tself.sendByte(dataByte)\n\n\t\tGPIO.output(self.csPin, GPIO.HIGH)\n\t\t\n\tdef readRegisters(self, regNumStart, numRegisters):\n\t\tout = []\n\t\tGPIO.output(self.csPin, GPIO.LOW)\n\t\t\n\t\t# 0x to specify 'read register value'\n\t\tself.sendByte(regNumStart)\n\t\t\n\t\tfor byte in range(numRegisters):\t\n\t\t\tdata = self.recvByte()\n\t\t\tout.append(data)\n\n\t\tGPIO.output(self.csPin, GPIO.HIGH)\n\t\treturn out\n\n\tdef sendByte(self,byte):\n\t\tfor bit in range(8):\n\t\t\tGPIO.output(self.clkPin, GPIO.HIGH)\n\t\t\tif (byte & 0x80):\n\t\t\t\tGPIO.output(self.mosiPin, GPIO.HIGH)\n\t\t\telse:\n\t\t\t\tGPIO.output(self.mosiPin, GPIO.LOW)\n\t\t\tbyte <<= 1\n\t\t\tGPIO.output(self.clkPin, GPIO.LOW)\n\n\tdef recvByte(self):\n\t\tbyte = 0x00\n\t\tfor bit in range(8):\n\t\t\tGPIO.output(self.clkPin, GPIO.HIGH)\n\t\t\tbyte <<= 1\n\t\t\tif GPIO.input(self.misoPin):\n\t\t\t\tbyte |= 0x1\n\t\t\tGPIO.output(self.clkPin, GPIO.LOW)\n\t\treturn byte\t\n\t\n\tdef calcPT100Temp(self, RTD_ADC_Code):\n\t\tR_REF = 430.0 # Reference Resistor\n\t\tRes0 = 100.0; # Resistance at 0 degC for 400ohm R_Ref\n\t\ta = .00390830\n\t\tb = -.000000577500\n\t\t# c = -4.18301e-12 # for -200 <= T <= 0 (degC)\n\t\tc = -0.00000000000418301\n\t\t# c = 0 # for 0 <= T <= 850 (degC)\n\t\t#print(\"RTD ADC Code: %d\" % RTD_ADC_Code)\n\t\tRes_RTD = (RTD_ADC_Code * R_REF) / 32768.0 # PT100 Resistance\n\t\t#print(\"PT100 Resistance: %f ohms\" % Res_RTD)\n\t\t#\n\t\t# Callendar-Van Dusen equation\n\t\t# Res_RTD = Res0 * (1 + a*T + b*T**2 + c*(T-100)*T**3)\n\t\t# Res_RTD = Res0 + a*Res0*T + b*Res0*T**2 # c = 0\n\t\t# (c*Res0)T**4 - (c*Res0)*100*T**3 \n\t\t# + (b*Res0)*T**2 + (a*Res0)*T + (Res0 - Res_RTD) = 0\n\t\t#\n\t\t# quadratic formula:\n\t\t# for 0 <= T <= 850 (degC)\n\t\ttemp_C = -(a*Res0) + math.sqrt(a*a*Res0*Res0 - 4*(b*Res0)*(Res0 - Res_RTD))\n\t\ttemp_C = temp_C / (2*(b*Res0))\n\t\ttemp_C_line = (RTD_ADC_Code/32.0) - 256.0\n\t\t# removing numpy.roots will greatly speed things up\n\t\t#temp_C_numpy = numpy.roots([c*Res0, -c*Res0*100, b*Res0, a*Res0, (Res0 - Res_RTD)])\n\t\t#temp_C_numpy = abs(temp_C_numpy[-1])\n\t\t#print(\"Straight Line Approx. Temp: %f degC\" % temp_C_line)\n\t\t#print(\"Callendar-Van Dusen Temp (degC > 0): %f degC\" % temp_C)\n\t\t#print \"Solving Full Callendar-Van Dusen using numpy: %f\" % temp_C_numpy\n\t\tif (temp_C < 0): #use straight line approximation if less than 0\n\t\t\t# Can also use python lib numpy to solve cubic\n\t\t\t# Should never get here in this application\n\t\t\ttemp_C = temp_C_line\n\t\treturn temp_C\n\nclass FaultError(Exception):\n\tpass\n\nif __name__ == \"__main__\":\n\ttry:\n\t\tcsPin = 24\n\t\tmisoPin = 21\n\t\tmosiPin = 17\n\t\tclkPin = 23\n\t\tmax = max31865(csPin,misoPin,mosiPin,clkPin)\n\t\twhile True:\n\t\t\ttempC = max.readTemp()\n\t\t\tprint(tempC)\n\t\t\ttime.sleep(0.1)\n\texcept KeyboardInterrupt:\n\t\tpass\n\tGPIO.cleanup()\n",
"step-ids": [
8,
9,
11,
12,
14
]
}
|
[
8,
9,
11,
12,
14
] |
#!/usr/bin/env python3
import pandas as pd
import csv
def get_apriori_input(input_file,output_file,sample_col="Sample",gene_id_col="Gene_ID"):
df=pd.read_csv(input_file,sep="\t")
sample_names=df[sample_col].unique()
with open(output_file,"w") as out:
csv_writer=csv.writer(out,delimiter="\t")
for sample_name in sample_names:
bool=df[sample_col]==sample_name
df_sample=df[bool]
gene_ids=df_sample[gene_id_col]
gene_string=",".join(gene_ids)
csv_writer.writerow([sample_name,gene_string])
if __name__ == "__main__":
import sys
program,input_file,output_file,sample_col,gene_id_col=sys.argv
get_apriori_input(input_file,output_file,sample_col,gene_id_col)
|
normal
|
{
"blob_id": "e14bea6376c8649bf9c9c5759d530af773664cd4",
"index": 891,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_apriori_input(input_file, output_file, sample_col='Sample',\n gene_id_col='Gene_ID'):\n df = pd.read_csv(input_file, sep='\\t')\n sample_names = df[sample_col].unique()\n with open(output_file, 'w') as out:\n csv_writer = csv.writer(out, delimiter='\\t')\n for sample_name in sample_names:\n bool = df[sample_col] == sample_name\n df_sample = df[bool]\n gene_ids = df_sample[gene_id_col]\n gene_string = ','.join(gene_ids)\n csv_writer.writerow([sample_name, gene_string])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_apriori_input(input_file, output_file, sample_col='Sample',\n gene_id_col='Gene_ID'):\n df = pd.read_csv(input_file, sep='\\t')\n sample_names = df[sample_col].unique()\n with open(output_file, 'w') as out:\n csv_writer = csv.writer(out, delimiter='\\t')\n for sample_name in sample_names:\n bool = df[sample_col] == sample_name\n df_sample = df[bool]\n gene_ids = df_sample[gene_id_col]\n gene_string = ','.join(gene_ids)\n csv_writer.writerow([sample_name, gene_string])\n\n\nif __name__ == '__main__':\n import sys\n program, input_file, output_file, sample_col, gene_id_col = sys.argv\n get_apriori_input(input_file, output_file, sample_col, gene_id_col)\n",
"step-4": "import pandas as pd\nimport csv\n\n\ndef get_apriori_input(input_file, output_file, sample_col='Sample',\n gene_id_col='Gene_ID'):\n df = pd.read_csv(input_file, sep='\\t')\n sample_names = df[sample_col].unique()\n with open(output_file, 'w') as out:\n csv_writer = csv.writer(out, delimiter='\\t')\n for sample_name in sample_names:\n bool = df[sample_col] == sample_name\n df_sample = df[bool]\n gene_ids = df_sample[gene_id_col]\n gene_string = ','.join(gene_ids)\n csv_writer.writerow([sample_name, gene_string])\n\n\nif __name__ == '__main__':\n import sys\n program, input_file, output_file, sample_col, gene_id_col = sys.argv\n get_apriori_input(input_file, output_file, sample_col, gene_id_col)\n",
"step-5": "#!/usr/bin/env python3\nimport pandas as pd\nimport csv\ndef get_apriori_input(input_file,output_file,sample_col=\"Sample\",gene_id_col=\"Gene_ID\"):\n df=pd.read_csv(input_file,sep=\"\\t\")\n sample_names=df[sample_col].unique()\n with open(output_file,\"w\") as out:\n csv_writer=csv.writer(out,delimiter=\"\\t\")\n for sample_name in sample_names:\n bool=df[sample_col]==sample_name\n df_sample=df[bool]\n gene_ids=df_sample[gene_id_col]\n gene_string=\",\".join(gene_ids)\n csv_writer.writerow([sample_name,gene_string])\n\n\nif __name__ == \"__main__\":\n import sys\n program,input_file,output_file,sample_col,gene_id_col=sys.argv\n get_apriori_input(input_file,output_file,sample_col,gene_id_col)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class BaseSearchFilterSet(django_filters.FilterSet):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, *args, **kwargs):
self.facet_config = kwargs.pop('facet_config', {})
self.view = kwargs.pop('view', None)
super().__init__(*args, **kwargs)
def apply_filter(self, qs, name, *args, **kwargs):
if name in self.facet_config:
return qs.post_filter(name, *args, **kwargs)
return qs.filter(*args, **kwargs)
<|reserved_special_token_0|>
def auto_query(self, qs, name, value):
if value:
return qs.set_query(Q('simple_query_string', query=value,
fields=self.query_fields, default_operator='and', lenient=True)
)
return qs
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseSearchFilterSet(django_filters.FilterSet):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, *args, **kwargs):
self.facet_config = kwargs.pop('facet_config', {})
self.view = kwargs.pop('view', None)
super().__init__(*args, **kwargs)
def apply_filter(self, qs, name, *args, **kwargs):
if name in self.facet_config:
return qs.post_filter(name, *args, **kwargs)
return qs.filter(*args, **kwargs)
def filter_queryset(self, queryset):
"""
Filter the queryset with the underlying form's `cleaned_data`. You must
call `is_valid()` or `errors` before calling this method.
This method should be overridden if additional filtering needs to be
applied to the queryset before it is cached.
"""
for name, value in self.form.cleaned_data.items():
queryset = self.filters[name].filter(queryset, value)
return queryset
def auto_query(self, qs, name, value):
if value:
return qs.set_query(Q('simple_query_string', query=value,
fields=self.query_fields, default_operator='and', lenient=True)
)
return qs
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseSearchFilterSet(django_filters.FilterSet):
query_fields = ['content']
q = django_filters.CharFilter(method='auto_query', widget=forms.
TextInput(attrs={'placeholder': _('Enter search term'), 'class':
'form-control'}))
def __init__(self, *args, **kwargs):
self.facet_config = kwargs.pop('facet_config', {})
self.view = kwargs.pop('view', None)
super().__init__(*args, **kwargs)
def apply_filter(self, qs, name, *args, **kwargs):
if name in self.facet_config:
return qs.post_filter(name, *args, **kwargs)
return qs.filter(*args, **kwargs)
def filter_queryset(self, queryset):
"""
Filter the queryset with the underlying form's `cleaned_data`. You must
call `is_valid()` or `errors` before calling this method.
This method should be overridden if additional filtering needs to be
applied to the queryset before it is cached.
"""
for name, value in self.form.cleaned_data.items():
queryset = self.filters[name].filter(queryset, value)
return queryset
def auto_query(self, qs, name, value):
if value:
return qs.set_query(Q('simple_query_string', query=value,
fields=self.query_fields, default_operator='and', lenient=True)
)
return qs
<|reserved_special_token_1|>
from django import forms
from django.utils.translation import gettext_lazy as _
import django_filters
from elasticsearch_dsl.query import Q
class BaseSearchFilterSet(django_filters.FilterSet):
query_fields = ['content']
q = django_filters.CharFilter(method='auto_query', widget=forms.
TextInput(attrs={'placeholder': _('Enter search term'), 'class':
'form-control'}))
def __init__(self, *args, **kwargs):
self.facet_config = kwargs.pop('facet_config', {})
self.view = kwargs.pop('view', None)
super().__init__(*args, **kwargs)
def apply_filter(self, qs, name, *args, **kwargs):
if name in self.facet_config:
return qs.post_filter(name, *args, **kwargs)
return qs.filter(*args, **kwargs)
def filter_queryset(self, queryset):
"""
Filter the queryset with the underlying form's `cleaned_data`. You must
call `is_valid()` or `errors` before calling this method.
This method should be overridden if additional filtering needs to be
applied to the queryset before it is cached.
"""
for name, value in self.form.cleaned_data.items():
queryset = self.filters[name].filter(queryset, value)
return queryset
def auto_query(self, qs, name, value):
if value:
return qs.set_query(Q('simple_query_string', query=value,
fields=self.query_fields, default_operator='and', lenient=True)
)
return qs
<|reserved_special_token_1|>
from django import forms
from django.utils.translation import gettext_lazy as _
import django_filters
from elasticsearch_dsl.query import Q
class BaseSearchFilterSet(django_filters.FilterSet):
query_fields = ["content"]
q = django_filters.CharFilter(
method="auto_query",
widget=forms.TextInput(
attrs={"placeholder": _("Enter search term"), "class": "form-control"}
),
)
def __init__(self, *args, **kwargs):
self.facet_config = kwargs.pop("facet_config", {})
self.view = kwargs.pop("view", None)
super().__init__(*args, **kwargs)
def apply_filter(self, qs, name, *args, **kwargs):
if name in self.facet_config:
return qs.post_filter(name, *args, **kwargs)
return qs.filter(*args, **kwargs)
def filter_queryset(self, queryset):
"""
Filter the queryset with the underlying form's `cleaned_data`. You must
call `is_valid()` or `errors` before calling this method.
This method should be overridden if additional filtering needs to be
applied to the queryset before it is cached.
"""
for name, value in self.form.cleaned_data.items():
queryset = self.filters[name].filter(queryset, value)
# assert isinstance(queryset, models.QuerySet), \
# "Expected '%s.%s' to return a QuerySet, but got a %s instead." \
# % (type(self).__name__, name, type(queryset).__name__)
return queryset
def auto_query(self, qs, name, value):
if value:
return qs.set_query(
Q(
"simple_query_string",
query=value,
fields=self.query_fields,
default_operator="and",
lenient=True,
)
)
return qs
|
flexible
|
{
"blob_id": "f225fbf363f1b170704418ed339f2e57ca790975",
"index": 5317,
"step-1": "<mask token>\n\n\nclass BaseSearchFilterSet(django_filters.FilterSet):\n <mask token>\n <mask token>\n\n def __init__(self, *args, **kwargs):\n self.facet_config = kwargs.pop('facet_config', {})\n self.view = kwargs.pop('view', None)\n super().__init__(*args, **kwargs)\n\n def apply_filter(self, qs, name, *args, **kwargs):\n if name in self.facet_config:\n return qs.post_filter(name, *args, **kwargs)\n return qs.filter(*args, **kwargs)\n <mask token>\n\n def auto_query(self, qs, name, value):\n if value:\n return qs.set_query(Q('simple_query_string', query=value,\n fields=self.query_fields, default_operator='and', lenient=True)\n )\n return qs\n",
"step-2": "<mask token>\n\n\nclass BaseSearchFilterSet(django_filters.FilterSet):\n <mask token>\n <mask token>\n\n def __init__(self, *args, **kwargs):\n self.facet_config = kwargs.pop('facet_config', {})\n self.view = kwargs.pop('view', None)\n super().__init__(*args, **kwargs)\n\n def apply_filter(self, qs, name, *args, **kwargs):\n if name in self.facet_config:\n return qs.post_filter(name, *args, **kwargs)\n return qs.filter(*args, **kwargs)\n\n def filter_queryset(self, queryset):\n \"\"\"\n Filter the queryset with the underlying form's `cleaned_data`. You must\n call `is_valid()` or `errors` before calling this method.\n This method should be overridden if additional filtering needs to be\n applied to the queryset before it is cached.\n \"\"\"\n for name, value in self.form.cleaned_data.items():\n queryset = self.filters[name].filter(queryset, value)\n return queryset\n\n def auto_query(self, qs, name, value):\n if value:\n return qs.set_query(Q('simple_query_string', query=value,\n fields=self.query_fields, default_operator='and', lenient=True)\n )\n return qs\n",
"step-3": "<mask token>\n\n\nclass BaseSearchFilterSet(django_filters.FilterSet):\n query_fields = ['content']\n q = django_filters.CharFilter(method='auto_query', widget=forms.\n TextInput(attrs={'placeholder': _('Enter search term'), 'class':\n 'form-control'}))\n\n def __init__(self, *args, **kwargs):\n self.facet_config = kwargs.pop('facet_config', {})\n self.view = kwargs.pop('view', None)\n super().__init__(*args, **kwargs)\n\n def apply_filter(self, qs, name, *args, **kwargs):\n if name in self.facet_config:\n return qs.post_filter(name, *args, **kwargs)\n return qs.filter(*args, **kwargs)\n\n def filter_queryset(self, queryset):\n \"\"\"\n Filter the queryset with the underlying form's `cleaned_data`. You must\n call `is_valid()` or `errors` before calling this method.\n This method should be overridden if additional filtering needs to be\n applied to the queryset before it is cached.\n \"\"\"\n for name, value in self.form.cleaned_data.items():\n queryset = self.filters[name].filter(queryset, value)\n return queryset\n\n def auto_query(self, qs, name, value):\n if value:\n return qs.set_query(Q('simple_query_string', query=value,\n fields=self.query_fields, default_operator='and', lenient=True)\n )\n return qs\n",
"step-4": "from django import forms\nfrom django.utils.translation import gettext_lazy as _\nimport django_filters\nfrom elasticsearch_dsl.query import Q\n\n\nclass BaseSearchFilterSet(django_filters.FilterSet):\n query_fields = ['content']\n q = django_filters.CharFilter(method='auto_query', widget=forms.\n TextInput(attrs={'placeholder': _('Enter search term'), 'class':\n 'form-control'}))\n\n def __init__(self, *args, **kwargs):\n self.facet_config = kwargs.pop('facet_config', {})\n self.view = kwargs.pop('view', None)\n super().__init__(*args, **kwargs)\n\n def apply_filter(self, qs, name, *args, **kwargs):\n if name in self.facet_config:\n return qs.post_filter(name, *args, **kwargs)\n return qs.filter(*args, **kwargs)\n\n def filter_queryset(self, queryset):\n \"\"\"\n Filter the queryset with the underlying form's `cleaned_data`. You must\n call `is_valid()` or `errors` before calling this method.\n This method should be overridden if additional filtering needs to be\n applied to the queryset before it is cached.\n \"\"\"\n for name, value in self.form.cleaned_data.items():\n queryset = self.filters[name].filter(queryset, value)\n return queryset\n\n def auto_query(self, qs, name, value):\n if value:\n return qs.set_query(Q('simple_query_string', query=value,\n fields=self.query_fields, default_operator='and', lenient=True)\n )\n return qs\n",
"step-5": "from django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nimport django_filters\nfrom elasticsearch_dsl.query import Q\n\n\nclass BaseSearchFilterSet(django_filters.FilterSet):\n query_fields = [\"content\"]\n\n q = django_filters.CharFilter(\n method=\"auto_query\",\n widget=forms.TextInput(\n attrs={\"placeholder\": _(\"Enter search term\"), \"class\": \"form-control\"}\n ),\n )\n\n def __init__(self, *args, **kwargs):\n self.facet_config = kwargs.pop(\"facet_config\", {})\n self.view = kwargs.pop(\"view\", None)\n super().__init__(*args, **kwargs)\n\n def apply_filter(self, qs, name, *args, **kwargs):\n if name in self.facet_config:\n return qs.post_filter(name, *args, **kwargs)\n return qs.filter(*args, **kwargs)\n\n def filter_queryset(self, queryset):\n \"\"\"\n Filter the queryset with the underlying form's `cleaned_data`. You must\n call `is_valid()` or `errors` before calling this method.\n This method should be overridden if additional filtering needs to be\n applied to the queryset before it is cached.\n \"\"\"\n for name, value in self.form.cleaned_data.items():\n queryset = self.filters[name].filter(queryset, value)\n # assert isinstance(queryset, models.QuerySet), \\\n # \"Expected '%s.%s' to return a QuerySet, but got a %s instead.\" \\\n # % (type(self).__name__, name, type(queryset).__name__)\n return queryset\n\n def auto_query(self, qs, name, value):\n if value:\n return qs.set_query(\n Q(\n \"simple_query_string\",\n query=value,\n fields=self.query_fields,\n default_operator=\"and\",\n lenient=True,\n )\n )\n return qs\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import h5py
import numpy as np
from matplotlib import pyplot
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed("Dropping to IPython shell")
filename = "SPY-VXX-20090507-20100427.hdf5"
start_day = 1
end_day = 245
#start_day = 108
#end_day = 111
start_day = 120
end_day = 245
start_day = 1
end_day = 120
start_day = 120
end_day = 180
start_day = 0
end_day = 245
days = end_day - start_day
|
normal
|
{
"blob_id": "175e8ecdd0c9faa5fc981447f821763e0eb58b4d",
"index": 5609,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nipshell = IPShellEmbed('Dropping to IPython shell')\nfilename = 'SPY-VXX-20090507-20100427.hdf5'\nstart_day = 1\nend_day = 245\nstart_day = 120\nend_day = 245\nstart_day = 1\nend_day = 120\nstart_day = 120\nend_day = 180\nstart_day = 0\nend_day = 245\ndays = end_day - start_day\n",
"step-3": "import h5py\nimport numpy as np\nfrom matplotlib import pyplot\nfrom IPython.Shell import IPShellEmbed\nipshell = IPShellEmbed('Dropping to IPython shell')\nfilename = 'SPY-VXX-20090507-20100427.hdf5'\nstart_day = 1\nend_day = 245\nstart_day = 120\nend_day = 245\nstart_day = 1\nend_day = 120\nstart_day = 120\nend_day = 180\nstart_day = 0\nend_day = 245\ndays = end_day - start_day\n",
"step-4": "import h5py\nimport numpy as np\nfrom matplotlib import pyplot\n\nfrom IPython.Shell import IPShellEmbed\nipshell = IPShellEmbed(\"Dropping to IPython shell\")\n\nfilename = \"SPY-VXX-20090507-20100427.hdf5\"\n\nstart_day = 1\nend_day = 245\n\n#start_day = 108\n#end_day = 111\n\nstart_day = 120\nend_day = 245\nstart_day = 1\nend_day = 120\nstart_day = 120\nend_day = 180\nstart_day = 0\nend_day = 245\n\ndays = end_day - start_day\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#! /usr/bin/env python
# coding: utf-8
'''
Author: xiezhw3@163.com
@contact: xiezhw3@163.com
@version: $Id$
Last modified: 2016-01-17
FileName: consumer.py
Description: 从 rabbitmq 拿到消息并存储到数据库
'''
import pika
import json
import logging
import pymongo
import traceback
from conf import config
from code.modules.db_processor.db_processor import DbProcessor
MAX_TRY_TIME = 5
class Consumer(object):
'''队列消息消费者'''
def __init__(self):
self.db_processor = DbProcessor()
credentials = pika.PlainCredentials(config.RABBITMQ_USER,
config.RABBITMQ_PASS)
parameters = pika.ConnectionParameters(config.RABBITMQ_HOST,
config.RABBITMQ_PORT_1,
'/', credentials)
connection = pika.BlockingConnection(parameters)
self.channel = connection.channel()
self.channel.exchange_declare(exchange=config.RABBITMQ_EXCHANGE,
type='topic')
result = self.channel.queue_declare(exclusive=True)
self.queue_name = result.method.queue
self.channel.queue_bind(exchange=config.RABBITMQ_EXCHANGE,
queue=self.queue_name,
routing_key=config.RABBITMQ_ROUT_KEY)
def callback(self, ch, method, properties, body):
if isinstance(body, str):
body = json.loads(body)
try_time = 0
while try_time < MAX_TRY_TIME:
try_time += 1
try:
self.db_processor.insert(body)
break
except pymongo.errors.ServerSelectionTimeoutError as error:
logging.error("Insert record timeout: [%s], [%s], [%s]" %
(error.__class__.__name__,
error,
traceback.format_exc()))
except Exception as error:
logging.error("Insert record error: [%s], [%s], [%s]" %
(error.__class__.__name__,
error,
traceback.format_exc()))
def start(self):
self.channel.basic_consume(self.callback,
queue=self.queue_name,
no_ack=True)
self.channel.start_consuming()
def stop(self):
self.channel.close()
|
normal
|
{
"blob_id": "ff26a2c2d8427f1ad4617669e701ea88b34616cd",
"index": 9152,
"step-1": "<mask token>\n\n\nclass Consumer(object):\n <mask token>\n\n def __init__(self):\n self.db_processor = DbProcessor()\n credentials = pika.PlainCredentials(config.RABBITMQ_USER, config.\n RABBITMQ_PASS)\n parameters = pika.ConnectionParameters(config.RABBITMQ_HOST, config\n .RABBITMQ_PORT_1, '/', credentials)\n connection = pika.BlockingConnection(parameters)\n self.channel = connection.channel()\n self.channel.exchange_declare(exchange=config.RABBITMQ_EXCHANGE,\n type='topic')\n result = self.channel.queue_declare(exclusive=True)\n self.queue_name = result.method.queue\n self.channel.queue_bind(exchange=config.RABBITMQ_EXCHANGE, queue=\n self.queue_name, routing_key=config.RABBITMQ_ROUT_KEY)\n <mask token>\n <mask token>\n\n def stop(self):\n self.channel.close()\n",
"step-2": "<mask token>\n\n\nclass Consumer(object):\n <mask token>\n\n def __init__(self):\n self.db_processor = DbProcessor()\n credentials = pika.PlainCredentials(config.RABBITMQ_USER, config.\n RABBITMQ_PASS)\n parameters = pika.ConnectionParameters(config.RABBITMQ_HOST, config\n .RABBITMQ_PORT_1, '/', credentials)\n connection = pika.BlockingConnection(parameters)\n self.channel = connection.channel()\n self.channel.exchange_declare(exchange=config.RABBITMQ_EXCHANGE,\n type='topic')\n result = self.channel.queue_declare(exclusive=True)\n self.queue_name = result.method.queue\n self.channel.queue_bind(exchange=config.RABBITMQ_EXCHANGE, queue=\n self.queue_name, routing_key=config.RABBITMQ_ROUT_KEY)\n\n def callback(self, ch, method, properties, body):\n if isinstance(body, str):\n body = json.loads(body)\n try_time = 0\n while try_time < MAX_TRY_TIME:\n try_time += 1\n try:\n self.db_processor.insert(body)\n break\n except pymongo.errors.ServerSelectionTimeoutError as error:\n logging.error('Insert record timeout: [%s], [%s], [%s]' % (\n error.__class__.__name__, error, traceback.format_exc()))\n except Exception as error:\n logging.error('Insert record error: [%s], [%s], [%s]' % (\n error.__class__.__name__, error, traceback.format_exc()))\n\n def start(self):\n self.channel.basic_consume(self.callback, queue=self.queue_name,\n no_ack=True)\n self.channel.start_consuming()\n\n def stop(self):\n self.channel.close()\n",
"step-3": "<mask token>\n\n\nclass Consumer(object):\n \"\"\"队列消息消费者\"\"\"\n\n def __init__(self):\n self.db_processor = DbProcessor()\n credentials = pika.PlainCredentials(config.RABBITMQ_USER, config.\n RABBITMQ_PASS)\n parameters = pika.ConnectionParameters(config.RABBITMQ_HOST, config\n .RABBITMQ_PORT_1, '/', credentials)\n connection = pika.BlockingConnection(parameters)\n self.channel = connection.channel()\n self.channel.exchange_declare(exchange=config.RABBITMQ_EXCHANGE,\n type='topic')\n result = self.channel.queue_declare(exclusive=True)\n self.queue_name = result.method.queue\n self.channel.queue_bind(exchange=config.RABBITMQ_EXCHANGE, queue=\n self.queue_name, routing_key=config.RABBITMQ_ROUT_KEY)\n\n def callback(self, ch, method, properties, body):\n if isinstance(body, str):\n body = json.loads(body)\n try_time = 0\n while try_time < MAX_TRY_TIME:\n try_time += 1\n try:\n self.db_processor.insert(body)\n break\n except pymongo.errors.ServerSelectionTimeoutError as error:\n logging.error('Insert record timeout: [%s], [%s], [%s]' % (\n error.__class__.__name__, error, traceback.format_exc()))\n except Exception as error:\n logging.error('Insert record error: [%s], [%s], [%s]' % (\n error.__class__.__name__, error, traceback.format_exc()))\n\n def start(self):\n self.channel.basic_consume(self.callback, queue=self.queue_name,\n no_ack=True)\n self.channel.start_consuming()\n\n def stop(self):\n self.channel.close()\n",
"step-4": "<mask token>\nMAX_TRY_TIME = 5\n\n\nclass Consumer(object):\n \"\"\"队列消息消费者\"\"\"\n\n def __init__(self):\n self.db_processor = DbProcessor()\n credentials = pika.PlainCredentials(config.RABBITMQ_USER, config.\n RABBITMQ_PASS)\n parameters = pika.ConnectionParameters(config.RABBITMQ_HOST, config\n .RABBITMQ_PORT_1, '/', credentials)\n connection = pika.BlockingConnection(parameters)\n self.channel = connection.channel()\n self.channel.exchange_declare(exchange=config.RABBITMQ_EXCHANGE,\n type='topic')\n result = self.channel.queue_declare(exclusive=True)\n self.queue_name = result.method.queue\n self.channel.queue_bind(exchange=config.RABBITMQ_EXCHANGE, queue=\n self.queue_name, routing_key=config.RABBITMQ_ROUT_KEY)\n\n def callback(self, ch, method, properties, body):\n if isinstance(body, str):\n body = json.loads(body)\n try_time = 0\n while try_time < MAX_TRY_TIME:\n try_time += 1\n try:\n self.db_processor.insert(body)\n break\n except pymongo.errors.ServerSelectionTimeoutError as error:\n logging.error('Insert record timeout: [%s], [%s], [%s]' % (\n error.__class__.__name__, error, traceback.format_exc()))\n except Exception as error:\n logging.error('Insert record error: [%s], [%s], [%s]' % (\n error.__class__.__name__, error, traceback.format_exc()))\n\n def start(self):\n self.channel.basic_consume(self.callback, queue=self.queue_name,\n no_ack=True)\n self.channel.start_consuming()\n\n def stop(self):\n self.channel.close()\n",
"step-5": "#! /usr/bin/env python\n# coding: utf-8\n\n'''\nAuthor: xiezhw3@163.com\n@contact: xiezhw3@163.com\n@version: $Id$\nLast modified: 2016-01-17\nFileName: consumer.py\nDescription: 从 rabbitmq 拿到消息并存储到数据库\n'''\n\nimport pika\nimport json\nimport logging\nimport pymongo\nimport traceback\n\nfrom conf import config\nfrom code.modules.db_processor.db_processor import DbProcessor\n\nMAX_TRY_TIME = 5\n\n\nclass Consumer(object):\n '''队列消息消费者'''\n def __init__(self):\n self.db_processor = DbProcessor()\n credentials = pika.PlainCredentials(config.RABBITMQ_USER,\n config.RABBITMQ_PASS)\n parameters = pika.ConnectionParameters(config.RABBITMQ_HOST,\n config.RABBITMQ_PORT_1,\n '/', credentials)\n connection = pika.BlockingConnection(parameters)\n self.channel = connection.channel()\n self.channel.exchange_declare(exchange=config.RABBITMQ_EXCHANGE,\n type='topic')\n\n result = self.channel.queue_declare(exclusive=True)\n self.queue_name = result.method.queue\n self.channel.queue_bind(exchange=config.RABBITMQ_EXCHANGE,\n queue=self.queue_name,\n routing_key=config.RABBITMQ_ROUT_KEY)\n\n def callback(self, ch, method, properties, body):\n if isinstance(body, str):\n body = json.loads(body)\n try_time = 0\n while try_time < MAX_TRY_TIME:\n try_time += 1\n try:\n self.db_processor.insert(body)\n break\n except pymongo.errors.ServerSelectionTimeoutError as error:\n logging.error(\"Insert record timeout: [%s], [%s], [%s]\" %\n (error.__class__.__name__,\n error,\n traceback.format_exc()))\n except Exception as error:\n logging.error(\"Insert record error: [%s], [%s], [%s]\" %\n (error.__class__.__name__,\n error,\n traceback.format_exc()))\n\n def start(self):\n self.channel.basic_consume(self.callback,\n queue=self.queue_name,\n no_ack=True)\n self.channel.start_consuming()\n\n def stop(self):\n self.channel.close()\n",
"step-ids": [
3,
5,
6,
7,
9
]
}
|
[
3,
5,
6,
7,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print("""
Your current directory is: """ + curr_path +
"""
It contains the following files and directories:
""" + str(os.
listdir('.')))
<|reserved_special_token_0|>
os.mkdir(project)
os.chdir(project)
<|reserved_special_token_0|>
print("""
-- Your directories are ready to keep your project organized. --""")
print(
"""
The current main project diretory has the following subdirectories:
"""
, os.listdir('.'))
print("""
The diretory data has the following subdirectories:
""", os.
listdir('results'))
print("""
The diretory analysis has the following subdirectories:
""", os.
listdir('analysis'))
print("""
The diretory results has the following subdirectories:
""", os.
listdir('results'))
print("""
---------- Enjoy your research! :) ----------""")
<|reserved_special_token_1|>
<|reserved_special_token_0|>
curr_path = os.getcwd()
print("""
Your current directory is: """ + curr_path +
"""
It contains the following files and directories:
""" + str(os.
listdir('.')))
project_path = input(str(
"""
Path of the directory where you want to start your project: """))
path_to = os.chdir(project_path)
response = input(str(
'Are you ready to start a new bioinformatics project? (Y or N): '))
project = input(str('Project name: ' if response == 'Y' else print(
"""
That's ok, you can try it later.""") + sys.exit()))
os.mkdir(project)
os.chdir(project)
data = os.mkdir('data')
raw_data = os.mkdir('data/raw_data')
processed_data = os.mkdir('data/processed_data')
genome_references = os.mkdir('data/genome_references')
programs = os.mkdir('programs')
analysis = os.mkdir('analysis')
data_pre_process = os.mkdir('analysis/data_pre_process')
assembly = os.mkdir('analysis/assembly')
annotations = os.mkdir('analysis/annotations')
alignements = os.mkdir('analysis/alignements')
quantification = os.mkdir('analysis/quantifications')
results = os.mkdir('results')
logs = os.mkdir('results/logs')
output = os.mkdir('results/output')
html = os.mkdir('results/html')
errors_out = os.mkdir('results/errors_out')
notebook = os.mkdir('notebooks')
scripts = os.mkdir('scripts')
print("""
-- Your directories are ready to keep your project organized. --""")
print(
"""
The current main project diretory has the following subdirectories:
"""
, os.listdir('.'))
print("""
The diretory data has the following subdirectories:
""", os.
listdir('results'))
print("""
The diretory analysis has the following subdirectories:
""", os.
listdir('analysis'))
print("""
The diretory results has the following subdirectories:
""", os.
listdir('results'))
print("""
---------- Enjoy your research! :) ----------""")
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
import sys
curr_path = os.getcwd()
print("""
Your current directory is: """ + curr_path +
"""
It contains the following files and directories:
""" + str(os.
listdir('.')))
project_path = input(str(
"""
Path of the directory where you want to start your project: """))
path_to = os.chdir(project_path)
response = input(str(
'Are you ready to start a new bioinformatics project? (Y or N): '))
project = input(str('Project name: ' if response == 'Y' else print(
"""
That's ok, you can try it later.""") + sys.exit()))
os.mkdir(project)
os.chdir(project)
data = os.mkdir('data')
raw_data = os.mkdir('data/raw_data')
processed_data = os.mkdir('data/processed_data')
genome_references = os.mkdir('data/genome_references')
programs = os.mkdir('programs')
analysis = os.mkdir('analysis')
data_pre_process = os.mkdir('analysis/data_pre_process')
assembly = os.mkdir('analysis/assembly')
annotations = os.mkdir('analysis/annotations')
alignements = os.mkdir('analysis/alignements')
quantification = os.mkdir('analysis/quantifications')
results = os.mkdir('results')
logs = os.mkdir('results/logs')
output = os.mkdir('results/output')
html = os.mkdir('results/html')
errors_out = os.mkdir('results/errors_out')
notebook = os.mkdir('notebooks')
scripts = os.mkdir('scripts')
print("""
-- Your directories are ready to keep your project organized. --""")
print(
"""
The current main project diretory has the following subdirectories:
"""
, os.listdir('.'))
print("""
The diretory data has the following subdirectories:
""", os.
listdir('results'))
print("""
The diretory analysis has the following subdirectories:
""", os.
listdir('analysis'))
print("""
The diretory results has the following subdirectories:
""", os.
listdir('results'))
print("""
---------- Enjoy your research! :) ----------""")
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Default organizer for bioinfoinformatics project directiories - RNA-Seq based model
"""
import os
import sys
#main path
curr_path = os.getcwd()
print("\nYour current directory is: " + curr_path + "\n\nIt contains the following files and directories:\n\n" + str(os.listdir("."))) # displays the current directory and list it subdirectories
project_path = input(str("\nPath of the directory where you want to start your project: "))
path_to = os.chdir(project_path) # change to the desired directory to start the project
response = input(str("Are you ready to start a new bioinformatics project? (Y or N): " ))
project = input(str("Project name: " if response == "Y" else print("\nThat's ok, you can try it later.") + sys.exit()))
os.mkdir(project) # cria um diretório para o trabalho de raiz de cana
os.chdir(project) # muda para o diretorio sugarcane_raiz (igual ao cd)
#print(os.getcwd()) # mostra o diretório atual
data = os.mkdir("data")
raw_data = os.mkdir("data/raw_data")
processed_data = os.mkdir("data/processed_data")
genome_references = os.mkdir("data/genome_references")
programs = os.mkdir("programs")
analysis = os.mkdir("analysis")
data_pre_process = os.mkdir("analysis/data_pre_process")
assembly = os.mkdir("analysis/assembly")
annotations = os.mkdir("analysis/annotations")
alignements = os.mkdir("analysis/alignements")
quantification = os.mkdir("analysis/quantifications")
results = os.mkdir("results")
logs = os.mkdir("results/logs")
output = os.mkdir("results/output")
html = os.mkdir("results/html")
errors_out = os.mkdir("results/errors_out")
notebook = os.mkdir("notebooks")
scripts = os.mkdir("scripts")
print("\n\n-- Your directories are ready to keep your project organized. --")
print("\n\nThe current main project diretory has the following subdirectories:\n", os.listdir("."))
print("\nThe diretory data has the following subdirectories:\n", os.listdir("results"))
print("\nThe diretory analysis has the following subdirectories:\n", os.listdir("analysis"))
print("\nThe diretory results has the following subdirectories:\n", os.listdir("results"))
print("\n\n ---------- Enjoy your research! :) ----------")
|
flexible
|
{
"blob_id": "0131657a7675904ee2743448f514a9f11e0dc0ad",
"index": 7561,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\"\"\"\nYour current directory is: \"\"\" + curr_path +\n \"\"\"\n\nIt contains the following files and directories:\n\n\"\"\" + str(os.\n listdir('.')))\n<mask token>\nos.mkdir(project)\nos.chdir(project)\n<mask token>\nprint(\"\"\"\n\n-- Your directories are ready to keep your project organized. --\"\"\")\nprint(\n \"\"\"\n\nThe current main project diretory has the following subdirectories:\n\"\"\"\n , os.listdir('.'))\nprint(\"\"\"\nThe diretory data has the following subdirectories:\n\"\"\", os.\n listdir('results'))\nprint(\"\"\"\nThe diretory analysis has the following subdirectories:\n\"\"\", os.\n listdir('analysis'))\nprint(\"\"\"\nThe diretory results has the following subdirectories:\n\"\"\", os.\n listdir('results'))\nprint(\"\"\"\n\n ---------- Enjoy your research! :) ----------\"\"\")\n",
"step-3": "<mask token>\ncurr_path = os.getcwd()\nprint(\"\"\"\nYour current directory is: \"\"\" + curr_path +\n \"\"\"\n\nIt contains the following files and directories:\n\n\"\"\" + str(os.\n listdir('.')))\nproject_path = input(str(\n \"\"\"\nPath of the directory where you want to start your project: \"\"\"))\npath_to = os.chdir(project_path)\nresponse = input(str(\n 'Are you ready to start a new bioinformatics project? (Y or N): '))\nproject = input(str('Project name: ' if response == 'Y' else print(\n \"\"\"\nThat's ok, you can try it later.\"\"\") + sys.exit()))\nos.mkdir(project)\nos.chdir(project)\ndata = os.mkdir('data')\nraw_data = os.mkdir('data/raw_data')\nprocessed_data = os.mkdir('data/processed_data')\ngenome_references = os.mkdir('data/genome_references')\nprograms = os.mkdir('programs')\nanalysis = os.mkdir('analysis')\ndata_pre_process = os.mkdir('analysis/data_pre_process')\nassembly = os.mkdir('analysis/assembly')\nannotations = os.mkdir('analysis/annotations')\nalignements = os.mkdir('analysis/alignements')\nquantification = os.mkdir('analysis/quantifications')\nresults = os.mkdir('results')\nlogs = os.mkdir('results/logs')\noutput = os.mkdir('results/output')\nhtml = os.mkdir('results/html')\nerrors_out = os.mkdir('results/errors_out')\nnotebook = os.mkdir('notebooks')\nscripts = os.mkdir('scripts')\nprint(\"\"\"\n\n-- Your directories are ready to keep your project organized. --\"\"\")\nprint(\n \"\"\"\n\nThe current main project diretory has the following subdirectories:\n\"\"\"\n , os.listdir('.'))\nprint(\"\"\"\nThe diretory data has the following subdirectories:\n\"\"\", os.\n listdir('results'))\nprint(\"\"\"\nThe diretory analysis has the following subdirectories:\n\"\"\", os.\n listdir('analysis'))\nprint(\"\"\"\nThe diretory results has the following subdirectories:\n\"\"\", os.\n listdir('results'))\nprint(\"\"\"\n\n ---------- Enjoy your research! :) ----------\"\"\")\n",
"step-4": "<mask token>\nimport os\nimport sys\ncurr_path = os.getcwd()\nprint(\"\"\"\nYour current directory is: \"\"\" + curr_path +\n \"\"\"\n\nIt contains the following files and directories:\n\n\"\"\" + str(os.\n listdir('.')))\nproject_path = input(str(\n \"\"\"\nPath of the directory where you want to start your project: \"\"\"))\npath_to = os.chdir(project_path)\nresponse = input(str(\n 'Are you ready to start a new bioinformatics project? (Y or N): '))\nproject = input(str('Project name: ' if response == 'Y' else print(\n \"\"\"\nThat's ok, you can try it later.\"\"\") + sys.exit()))\nos.mkdir(project)\nos.chdir(project)\ndata = os.mkdir('data')\nraw_data = os.mkdir('data/raw_data')\nprocessed_data = os.mkdir('data/processed_data')\ngenome_references = os.mkdir('data/genome_references')\nprograms = os.mkdir('programs')\nanalysis = os.mkdir('analysis')\ndata_pre_process = os.mkdir('analysis/data_pre_process')\nassembly = os.mkdir('analysis/assembly')\nannotations = os.mkdir('analysis/annotations')\nalignements = os.mkdir('analysis/alignements')\nquantification = os.mkdir('analysis/quantifications')\nresults = os.mkdir('results')\nlogs = os.mkdir('results/logs')\noutput = os.mkdir('results/output')\nhtml = os.mkdir('results/html')\nerrors_out = os.mkdir('results/errors_out')\nnotebook = os.mkdir('notebooks')\nscripts = os.mkdir('scripts')\nprint(\"\"\"\n\n-- Your directories are ready to keep your project organized. --\"\"\")\nprint(\n \"\"\"\n\nThe current main project diretory has the following subdirectories:\n\"\"\"\n , os.listdir('.'))\nprint(\"\"\"\nThe diretory data has the following subdirectories:\n\"\"\", os.\n listdir('results'))\nprint(\"\"\"\nThe diretory analysis has the following subdirectories:\n\"\"\", os.\n listdir('analysis'))\nprint(\"\"\"\nThe diretory results has the following subdirectories:\n\"\"\", os.\n listdir('results'))\nprint(\"\"\"\n\n ---------- Enjoy your research! :) ----------\"\"\")\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nDefault organizer for bioinfoinformatics project directiories - RNA-Seq based model\n\"\"\"\nimport os \nimport sys\n\n#main path\n\ncurr_path = os.getcwd()\nprint(\"\\nYour current directory is: \" + curr_path + \"\\n\\nIt contains the following files and directories:\\n\\n\" + str(os.listdir(\".\"))) # displays the current directory and list it subdirectories\n\nproject_path = input(str(\"\\nPath of the directory where you want to start your project: \"))\n\npath_to = os.chdir(project_path) # change to the desired directory to start the project\n\nresponse = input(str(\"Are you ready to start a new bioinformatics project? (Y or N): \" ))\nproject = input(str(\"Project name: \" if response == \"Y\" else print(\"\\nThat's ok, you can try it later.\") + sys.exit()))\n\nos.mkdir(project) # cria um diretório para o trabalho de raiz de cana\n\nos.chdir(project) # muda para o diretorio sugarcane_raiz (igual ao cd)\n\n#print(os.getcwd()) # mostra o diretório atual\n\ndata = os.mkdir(\"data\")\nraw_data = os.mkdir(\"data/raw_data\")\nprocessed_data = os.mkdir(\"data/processed_data\")\ngenome_references = os.mkdir(\"data/genome_references\")\nprograms = os.mkdir(\"programs\")\nanalysis = os.mkdir(\"analysis\")\ndata_pre_process = os.mkdir(\"analysis/data_pre_process\")\nassembly = os.mkdir(\"analysis/assembly\")\nannotations = os.mkdir(\"analysis/annotations\")\nalignements = os.mkdir(\"analysis/alignements\")\nquantification = os.mkdir(\"analysis/quantifications\")\nresults = os.mkdir(\"results\")\nlogs = os.mkdir(\"results/logs\")\noutput = os.mkdir(\"results/output\")\nhtml = os.mkdir(\"results/html\")\nerrors_out = os.mkdir(\"results/errors_out\")\nnotebook = os.mkdir(\"notebooks\")\nscripts = os.mkdir(\"scripts\")\n\nprint(\"\\n\\n-- Your directories are ready to keep your project organized. --\")\n\nprint(\"\\n\\nThe current main project diretory has the following subdirectories:\\n\", os.listdir(\".\"))\nprint(\"\\nThe diretory data has the following subdirectories:\\n\", os.listdir(\"results\"))\nprint(\"\\nThe diretory analysis has the following subdirectories:\\n\", os.listdir(\"analysis\"))\nprint(\"\\nThe diretory results has the following subdirectories:\\n\", os.listdir(\"results\"))\n\nprint(\"\\n\\n ---------- Enjoy your research! :) ----------\")\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ClassromConfig(AppConfig):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ClassromConfig(AppConfig):
name = 'classrom'
<|reserved_special_token_1|>
from django.apps import AppConfig
class ClassromConfig(AppConfig):
name = 'classrom'
|
flexible
|
{
"blob_id": "a995305cb5589fa0cbb246ae3ca6337f4f2c3ca1",
"index": 8798,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ClassromConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ClassromConfig(AppConfig):\n name = 'classrom'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass ClassromConfig(AppConfig):\n name = 'classrom'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# SPDX-FileCopyrightText: 2023 spdx contributors
#
# SPDX-License-Identifier: Apache-2.0
from dataclasses import field
from beartype.typing import List, Optional
from spdx_tools.common.typing.dataclass_with_properties import dataclass_with_properties
from spdx_tools.common.typing.type_checks import check_types_and_set_values
from spdx_tools.spdx3.model import IntegrityMethod
@dataclass_with_properties
class ExternalMap:
external_id: str # anyURI
verified_using: List[IntegrityMethod] = field(default_factory=list)
location_hint: Optional[str] = None # anyURI
defining_document: Optional[str] = None
def __init__(
self,
external_id: str,
verified_using: List[IntegrityMethod] = None,
location_hint: Optional[str] = None,
defining_document: Optional[str] = None,
):
verified_using = [] if verified_using is None else verified_using
check_types_and_set_values(self, locals())
|
normal
|
{
"blob_id": "1c085ea8f9b21ea7bef94ad4ecbb1771a57f697a",
"index": 2208,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@dataclass_with_properties\nclass ExternalMap:\n external_id: str\n verified_using: List[IntegrityMethod] = field(default_factory=list)\n location_hint: Optional[str] = None\n defining_document: Optional[str] = None\n <mask token>\n",
"step-3": "<mask token>\n\n\n@dataclass_with_properties\nclass ExternalMap:\n external_id: str\n verified_using: List[IntegrityMethod] = field(default_factory=list)\n location_hint: Optional[str] = None\n defining_document: Optional[str] = None\n\n def __init__(self, external_id: str, verified_using: List[\n IntegrityMethod]=None, location_hint: Optional[str]=None,\n defining_document: Optional[str]=None):\n verified_using = [] if verified_using is None else verified_using\n check_types_and_set_values(self, locals())\n",
"step-4": "from dataclasses import field\nfrom beartype.typing import List, Optional\nfrom spdx_tools.common.typing.dataclass_with_properties import dataclass_with_properties\nfrom spdx_tools.common.typing.type_checks import check_types_and_set_values\nfrom spdx_tools.spdx3.model import IntegrityMethod\n\n\n@dataclass_with_properties\nclass ExternalMap:\n external_id: str\n verified_using: List[IntegrityMethod] = field(default_factory=list)\n location_hint: Optional[str] = None\n defining_document: Optional[str] = None\n\n def __init__(self, external_id: str, verified_using: List[\n IntegrityMethod]=None, location_hint: Optional[str]=None,\n defining_document: Optional[str]=None):\n verified_using = [] if verified_using is None else verified_using\n check_types_and_set_values(self, locals())\n",
"step-5": "# SPDX-FileCopyrightText: 2023 spdx contributors\n#\n# SPDX-License-Identifier: Apache-2.0\nfrom dataclasses import field\n\nfrom beartype.typing import List, Optional\n\nfrom spdx_tools.common.typing.dataclass_with_properties import dataclass_with_properties\nfrom spdx_tools.common.typing.type_checks import check_types_and_set_values\nfrom spdx_tools.spdx3.model import IntegrityMethod\n\n\n@dataclass_with_properties\nclass ExternalMap:\n external_id: str # anyURI\n verified_using: List[IntegrityMethod] = field(default_factory=list)\n location_hint: Optional[str] = None # anyURI\n defining_document: Optional[str] = None\n\n def __init__(\n self,\n external_id: str,\n verified_using: List[IntegrityMethod] = None,\n location_hint: Optional[str] = None,\n defining_document: Optional[str] = None,\n ):\n verified_using = [] if verified_using is None else verified_using\n check_types_and_set_values(self, locals())\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#Answer to The Ship Teams - https://py.checkio.org/en/mission/the-ship-teams/
def two_teams(sailors):
result = [] #To store the result
temp = [[],[]] #To store the intermediatary values
for i in sailors.items(): #To get the values of dictionary as Tuple
if i[1] > 40 or i[1] < 20: #To get the people to be added to the First Ship
temp[0].append(i[0]) #Adding each person name to first Temp List
else: #To get the people to be added to the Second Ship
temp[1].append(i[0]) #Adding each person name to second Temp List
result.append(sorted(temp[0])) #Adding all the names of the Ship 1 to resultant
result.append(sorted(temp[1])) #Adding all the names of the Ship 2 to resultant
return result #Return the result
if __name__ == '__main__':
print("Example:")
print(two_teams({'Smith': 34, 'Wesson': 22, 'Coleman': 45, 'Abrahams': 19}))
print(two_teams({'Fernandes': 18, 'Johnson': 22, 'Kale': 41, 'McCortney': 54}))
#These "asserts" using only for self-checking and not necessary for auto-testing
assert two_teams({
'Smith': 34,
'Wesson': 22,
'Coleman': 45,
'Abrahams': 19}) == [
['Abrahams', 'Coleman'],
['Smith', 'Wesson']
]
assert two_teams({
'Fernandes': 18,
'Johnson': 22,
'Kale': 41,
'McCortney': 54}) == [
['Fernandes', 'Kale', 'McCortney'],
['Johnson']
]
print("Coding complete? Click 'Check' to earn cool rewards!")
|
normal
|
{
"blob_id": "de634c95fddf4591cb15cd0eb20e798043075798",
"index": 2464,
"step-1": "<mask token>\n",
"step-2": "def two_teams(sailors):\n result = []\n temp = [[], []]\n for i in sailors.items():\n if i[1] > 40 or i[1] < 20:\n temp[0].append(i[0])\n else:\n temp[1].append(i[0])\n result.append(sorted(temp[0]))\n result.append(sorted(temp[1]))\n return result\n\n\n<mask token>\n",
"step-3": "def two_teams(sailors):\n result = []\n temp = [[], []]\n for i in sailors.items():\n if i[1] > 40 or i[1] < 20:\n temp[0].append(i[0])\n else:\n temp[1].append(i[0])\n result.append(sorted(temp[0]))\n result.append(sorted(temp[1]))\n return result\n\n\nif __name__ == '__main__':\n print('Example:')\n print(two_teams({'Smith': 34, 'Wesson': 22, 'Coleman': 45, 'Abrahams': 19})\n )\n print(two_teams({'Fernandes': 18, 'Johnson': 22, 'Kale': 41,\n 'McCortney': 54}))\n assert two_teams({'Smith': 34, 'Wesson': 22, 'Coleman': 45, 'Abrahams': 19}\n ) == [['Abrahams', 'Coleman'], ['Smith', 'Wesson']]\n assert two_teams({'Fernandes': 18, 'Johnson': 22, 'Kale': 41,\n 'McCortney': 54}) == [['Fernandes', 'Kale', 'McCortney'], ['Johnson']]\n print(\"Coding complete? Click 'Check' to earn cool rewards!\")\n",
"step-4": "#Answer to The Ship Teams - https://py.checkio.org/en/mission/the-ship-teams/\n\ndef two_teams(sailors):\n result = [] #To store the result\n temp = [[],[]] #To store the intermediatary values\n for i in sailors.items(): #To get the values of dictionary as Tuple\n if i[1] > 40 or i[1] < 20: #To get the people to be added to the First Ship\n temp[0].append(i[0]) #Adding each person name to first Temp List\n else: #To get the people to be added to the Second Ship\n temp[1].append(i[0]) #Adding each person name to second Temp List\n result.append(sorted(temp[0])) #Adding all the names of the Ship 1 to resultant\n result.append(sorted(temp[1])) #Adding all the names of the Ship 2 to resultant\n return result #Return the result\n\nif __name__ == '__main__':\n print(\"Example:\")\n print(two_teams({'Smith': 34, 'Wesson': 22, 'Coleman': 45, 'Abrahams': 19}))\n print(two_teams({'Fernandes': 18, 'Johnson': 22, 'Kale': 41, 'McCortney': 54}))\n\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert two_teams({\n 'Smith': 34, \n 'Wesson': 22, \n 'Coleman': 45, \n 'Abrahams': 19}) == [\n ['Abrahams', 'Coleman'], \n ['Smith', 'Wesson']\n ]\n\n assert two_teams({\n 'Fernandes': 18,\n 'Johnson': 22,\n 'Kale': 41,\n 'McCortney': 54}) == [\n ['Fernandes', 'Kale', 'McCortney'], \n ['Johnson']\n ]\n print(\"Coding complete? Click 'Check' to earn cool rewards!\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cv2.imshow('Original', image)
cv2.waitKey(0)
<|reserved_special_token_0|>
cv2.imshow('Rotated by 45 degrees', rotated)
cv2.waitKey(0)
<|reserved_special_token_0|>
cv2.imshow('Rotated by -90 degrees', rotated)
cv2.waitKey(0)
<|reserved_special_token_0|>
cv2.imshow('Rotated by 180', rotated)
cv2.waitKey(0)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
image = cv2.imread('D:\\Github\\python-opencv\\images\\trex.png')
cv2.imshow('Original', image)
cv2.waitKey(0)
h, w = image.shape[:2]
center = w / 2, h / 2
M = cv2.getRotationMatrix2D(center, 45, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
cv2.imshow('Rotated by 45 degrees', rotated)
cv2.waitKey(0)
M = cv2.getRotationMatrix2D(center, -90, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
cv2.imshow('Rotated by -90 degrees', rotated)
cv2.waitKey(0)
rotated = imutils.rotate(image, 180)
cv2.imshow('Rotated by 180', rotated)
cv2.waitKey(0)
<|reserved_special_token_1|>
import numpy as np
import imutils
import cv2
image = cv2.imread('D:\\Github\\python-opencv\\images\\trex.png')
cv2.imshow('Original', image)
cv2.waitKey(0)
h, w = image.shape[:2]
center = w / 2, h / 2
M = cv2.getRotationMatrix2D(center, 45, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
cv2.imshow('Rotated by 45 degrees', rotated)
cv2.waitKey(0)
M = cv2.getRotationMatrix2D(center, -90, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
cv2.imshow('Rotated by -90 degrees', rotated)
cv2.waitKey(0)
rotated = imutils.rotate(image, 180)
cv2.imshow('Rotated by 180', rotated)
cv2.waitKey(0)
<|reserved_special_token_1|>
import numpy as np
import imutils
import cv2
image = cv2.imread("D:\\Github\\python-opencv\\images\\trex.png")
cv2.imshow("Original", image)
cv2.waitKey(0)
(h, w) = image.shape[:2] # get height and width of the image
center = (w/2, h/2) # which point to rotate around
M = cv2.getRotationMatrix2D(center, 45, 1.0) # rotation matrix
rotated = cv2.warpAffine(image, M, (w, h)) # apply the rotation
cv2. imshow("Rotated by 45 degrees", rotated)
cv2.waitKey(0)
M = cv2.getRotationMatrix2D(center, -90, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
cv2.imshow("Rotated by -90 degrees", rotated)
cv2.waitKey(0)
rotated = imutils.rotate(image, 180)
cv2.imshow("Rotated by 180", rotated)
cv2.waitKey(0)
|
flexible
|
{
"blob_id": "4462fec6e0edc25530c93ffeeae2372c86fef2cc",
"index": 528,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncv2.imshow('Original', image)\ncv2.waitKey(0)\n<mask token>\ncv2.imshow('Rotated by 45 degrees', rotated)\ncv2.waitKey(0)\n<mask token>\ncv2.imshow('Rotated by -90 degrees', rotated)\ncv2.waitKey(0)\n<mask token>\ncv2.imshow('Rotated by 180', rotated)\ncv2.waitKey(0)\n",
"step-3": "<mask token>\nimage = cv2.imread('D:\\\\Github\\\\python-opencv\\\\images\\\\trex.png')\ncv2.imshow('Original', image)\ncv2.waitKey(0)\nh, w = image.shape[:2]\ncenter = w / 2, h / 2\nM = cv2.getRotationMatrix2D(center, 45, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h))\ncv2.imshow('Rotated by 45 degrees', rotated)\ncv2.waitKey(0)\nM = cv2.getRotationMatrix2D(center, -90, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h))\ncv2.imshow('Rotated by -90 degrees', rotated)\ncv2.waitKey(0)\nrotated = imutils.rotate(image, 180)\ncv2.imshow('Rotated by 180', rotated)\ncv2.waitKey(0)\n",
"step-4": "import numpy as np\nimport imutils\nimport cv2\nimage = cv2.imread('D:\\\\Github\\\\python-opencv\\\\images\\\\trex.png')\ncv2.imshow('Original', image)\ncv2.waitKey(0)\nh, w = image.shape[:2]\ncenter = w / 2, h / 2\nM = cv2.getRotationMatrix2D(center, 45, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h))\ncv2.imshow('Rotated by 45 degrees', rotated)\ncv2.waitKey(0)\nM = cv2.getRotationMatrix2D(center, -90, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h))\ncv2.imshow('Rotated by -90 degrees', rotated)\ncv2.waitKey(0)\nrotated = imutils.rotate(image, 180)\ncv2.imshow('Rotated by 180', rotated)\ncv2.waitKey(0)\n",
"step-5": "import numpy as np\nimport imutils\nimport cv2\n\nimage = cv2.imread(\"D:\\\\Github\\\\python-opencv\\\\images\\\\trex.png\")\ncv2.imshow(\"Original\", image)\ncv2.waitKey(0)\n\n(h, w) = image.shape[:2] # get height and width of the image\ncenter = (w/2, h/2) # which point to rotate around\n\nM = cv2.getRotationMatrix2D(center, 45, 1.0) # rotation matrix\nrotated = cv2.warpAffine(image, M, (w, h)) # apply the rotation\ncv2. imshow(\"Rotated by 45 degrees\", rotated)\ncv2.waitKey(0)\n\nM = cv2.getRotationMatrix2D(center, -90, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h))\ncv2.imshow(\"Rotated by -90 degrees\", rotated)\ncv2.waitKey(0)\n\nrotated = imutils.rotate(image, 180)\ncv2.imshow(\"Rotated by 180\", rotated)\ncv2.waitKey(0)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PluginSetupTests(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PluginSetupTests(unittest.TestCase):
def test_plugin_setup(self):
self.assertEqual(chemistree_plugin.name, 'chemistree')
<|reserved_special_token_1|>
import unittest
from q2_chemistree.plugin_setup import plugin as chemistree_plugin
class PluginSetupTests(unittest.TestCase):
def test_plugin_setup(self):
self.assertEqual(chemistree_plugin.name, 'chemistree')
<|reserved_special_token_1|>
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2018, q2-chemistree development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
from q2_chemistree.plugin_setup import plugin as chemistree_plugin
class PluginSetupTests(unittest.TestCase):
def test_plugin_setup(self):
self.assertEqual(chemistree_plugin.name, 'chemistree')
|
flexible
|
{
"blob_id": "4296dc5b79fd1d2c872eb1115beab52a0f067423",
"index": 4816,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass PluginSetupTests(unittest.TestCase):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass PluginSetupTests(unittest.TestCase):\n\n def test_plugin_setup(self):\n self.assertEqual(chemistree_plugin.name, 'chemistree')\n",
"step-4": "import unittest\nfrom q2_chemistree.plugin_setup import plugin as chemistree_plugin\n\n\nclass PluginSetupTests(unittest.TestCase):\n\n def test_plugin_setup(self):\n self.assertEqual(chemistree_plugin.name, 'chemistree')\n",
"step-5": "# ----------------------------------------------------------------------------\n# Copyright (c) 2016-2018, q2-chemistree development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file LICENSE, distributed with this software.\n# ----------------------------------------------------------------------------\n\nimport unittest\n\nfrom q2_chemistree.plugin_setup import plugin as chemistree_plugin\n\n\nclass PluginSetupTests(unittest.TestCase):\n\n def test_plugin_setup(self):\n self.assertEqual(chemistree_plugin.name, 'chemistree')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [path('users/', views.UserCreateAPIView.as_view(), name=
'user-list'), path('users/login/', CustomObtainAuthToken.as_view()),
path('users/<int:pk>/', views.ReadUserAPIView.as_view()), path(
'users/<int:pk>/profile/', views.ReadUpdateProfileAPIView.as_view()),
path('charities/', views.ListCharitiesAPIView.as_view()), path(
'categories/', views.ListCategoriesAPIView.as_view())]
<|reserved_special_token_1|>
from django.contrib import admin
from django.urls import path, include
from serverside.router import router
from rest_framework.authtoken import views as auth_views
from . import views
from .views import CustomObtainAuthToken
urlpatterns = [path('users/', views.UserCreateAPIView.as_view(), name=
'user-list'), path('users/login/', CustomObtainAuthToken.as_view()),
path('users/<int:pk>/', views.ReadUserAPIView.as_view()), path(
'users/<int:pk>/profile/', views.ReadUpdateProfileAPIView.as_view()),
path('charities/', views.ListCharitiesAPIView.as_view()), path(
'categories/', views.ListCategoriesAPIView.as_view())]
|
flexible
|
{
"blob_id": "49d76458b8adcf6eea9db2ef127609ff96e03ad1",
"index": 6270,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('users/', views.UserCreateAPIView.as_view(), name=\n 'user-list'), path('users/login/', CustomObtainAuthToken.as_view()),\n path('users/<int:pk>/', views.ReadUserAPIView.as_view()), path(\n 'users/<int:pk>/profile/', views.ReadUpdateProfileAPIView.as_view()),\n path('charities/', views.ListCharitiesAPIView.as_view()), path(\n 'categories/', views.ListCategoriesAPIView.as_view())]\n",
"step-3": "from django.contrib import admin\nfrom django.urls import path, include\nfrom serverside.router import router\nfrom rest_framework.authtoken import views as auth_views\nfrom . import views\nfrom .views import CustomObtainAuthToken\nurlpatterns = [path('users/', views.UserCreateAPIView.as_view(), name=\n 'user-list'), path('users/login/', CustomObtainAuthToken.as_view()),\n path('users/<int:pk>/', views.ReadUserAPIView.as_view()), path(\n 'users/<int:pk>/profile/', views.ReadUpdateProfileAPIView.as_view()),\n path('charities/', views.ListCharitiesAPIView.as_view()), path(\n 'categories/', views.ListCategoriesAPIView.as_view())]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
def parse(filename):
t1, t2 = open(filename).read().strip().split('\n\n')
return tuple(map(lambda x: list(map(int, x.split('\n')[1:])), [t1, t2]))
def score(deck):
res = 0
for i in range(len(deck)):
res += deck[i] * (len(deck) - i)
return res
<|reserved_special_token_0|>
def combat(deck1, deck2):
db = set()
while len(deck1) > 0 and len(deck2) > 0:
key = tuple(deck1), tuple(deck2)
if key in db:
return 'p1', score(deck1)
db.add(key)
p1, p2 = deck1[0], deck2[0]
if can_recurse(deck1, deck2):
winner, _ = combat(deck1[1:p1 + 1], deck2[1:p2 + 1])
else:
winner = 'p1' if p1 > p2 else 'p2'
if winner == 'p1':
deck1 = deck1[1:] + [p1, p2]
deck2 = deck2[1:]
else:
deck1 = deck1[1:]
deck2 = deck2[1:] + [p2, p1]
if len(deck1) > 0:
return 'p1', score(deck1)
return 'p2', score(deck2)
<|reserved_special_token_0|>
def main():
print(solution1(*parse('sample.txt')))
print(solution1(*parse('input.txt')))
print(solution2(*parse('sample.txt')))
print(solution2(*parse('input.txt')))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def parse(filename):
t1, t2 = open(filename).read().strip().split('\n\n')
return tuple(map(lambda x: list(map(int, x.split('\n')[1:])), [t1, t2]))
def score(deck):
res = 0
for i in range(len(deck)):
res += deck[i] * (len(deck) - i)
return res
<|reserved_special_token_0|>
def combat(deck1, deck2):
db = set()
while len(deck1) > 0 and len(deck2) > 0:
key = tuple(deck1), tuple(deck2)
if key in db:
return 'p1', score(deck1)
db.add(key)
p1, p2 = deck1[0], deck2[0]
if can_recurse(deck1, deck2):
winner, _ = combat(deck1[1:p1 + 1], deck2[1:p2 + 1])
else:
winner = 'p1' if p1 > p2 else 'p2'
if winner == 'p1':
deck1 = deck1[1:] + [p1, p2]
deck2 = deck2[1:]
else:
deck1 = deck1[1:]
deck2 = deck2[1:] + [p2, p1]
if len(deck1) > 0:
return 'p1', score(deck1)
return 'p2', score(deck2)
def solution2(deck1, deck2):
return combat(deck1, deck2)[1]
def main():
print(solution1(*parse('sample.txt')))
print(solution1(*parse('input.txt')))
print(solution2(*parse('sample.txt')))
print(solution2(*parse('input.txt')))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def parse(filename):
t1, t2 = open(filename).read().strip().split('\n\n')
return tuple(map(lambda x: list(map(int, x.split('\n')[1:])), [t1, t2]))
def score(deck):
res = 0
for i in range(len(deck)):
res += deck[i] * (len(deck) - i)
return res
<|reserved_special_token_0|>
def can_recurse(deck1, deck2):
p1, p2 = deck1[0], deck2[0]
return p1 <= len(deck1) - 1 and p2 <= len(deck2) - 1
def combat(deck1, deck2):
db = set()
while len(deck1) > 0 and len(deck2) > 0:
key = tuple(deck1), tuple(deck2)
if key in db:
return 'p1', score(deck1)
db.add(key)
p1, p2 = deck1[0], deck2[0]
if can_recurse(deck1, deck2):
winner, _ = combat(deck1[1:p1 + 1], deck2[1:p2 + 1])
else:
winner = 'p1' if p1 > p2 else 'p2'
if winner == 'p1':
deck1 = deck1[1:] + [p1, p2]
deck2 = deck2[1:]
else:
deck1 = deck1[1:]
deck2 = deck2[1:] + [p2, p1]
if len(deck1) > 0:
return 'p1', score(deck1)
return 'p2', score(deck2)
def solution2(deck1, deck2):
return combat(deck1, deck2)[1]
def main():
print(solution1(*parse('sample.txt')))
print(solution1(*parse('input.txt')))
print(solution2(*parse('sample.txt')))
print(solution2(*parse('input.txt')))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def parse(filename):
t1, t2 = open(filename).read().strip().split('\n\n')
return tuple(map(lambda x: list(map(int, x.split('\n')[1:])), [t1, t2]))
def score(deck):
res = 0
for i in range(len(deck)):
res += deck[i] * (len(deck) - i)
return res
def solution1(deck1, deck2):
while len(deck1) > 0 and len(deck2) > 0:
p1, p2 = deck1[0], deck2[0]
if p1 > p2:
deck1 = deck1[1:] + [p1, p2]
deck2 = deck2[1:]
else:
deck1 = deck1[1:]
deck2 = deck2[1:] + [p2, p1]
if len(deck1) > 0:
return score(deck1)
return score(deck2)
def can_recurse(deck1, deck2):
p1, p2 = deck1[0], deck2[0]
return p1 <= len(deck1) - 1 and p2 <= len(deck2) - 1
def combat(deck1, deck2):
db = set()
while len(deck1) > 0 and len(deck2) > 0:
key = tuple(deck1), tuple(deck2)
if key in db:
return 'p1', score(deck1)
db.add(key)
p1, p2 = deck1[0], deck2[0]
if can_recurse(deck1, deck2):
winner, _ = combat(deck1[1:p1 + 1], deck2[1:p2 + 1])
else:
winner = 'p1' if p1 > p2 else 'p2'
if winner == 'p1':
deck1 = deck1[1:] + [p1, p2]
deck2 = deck2[1:]
else:
deck1 = deck1[1:]
deck2 = deck2[1:] + [p2, p1]
if len(deck1) > 0:
return 'p1', score(deck1)
return 'p2', score(deck2)
def solution2(deck1, deck2):
return combat(deck1, deck2)[1]
def main():
print(solution1(*parse('sample.txt')))
print(solution1(*parse('input.txt')))
print(solution2(*parse('sample.txt')))
print(solution2(*parse('input.txt')))
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
def parse(filename):
t1, t2 = open(filename).read().strip().split("\n\n")
return tuple(map(lambda x: list(map(int, x.split("\n")[1:])), [t1, t2]))
def score(deck):
res = 0
for i in range(len(deck)):
res += deck[i] * (len(deck)-i)
return res
def solution1(deck1, deck2):
while len(deck1) > 0 and len(deck2) > 0:
p1, p2 = deck1[0], deck2[0]
if p1 > p2:
deck1 = deck1[1:] + [p1, p2]
deck2 = deck2[1:]
else:
deck1 = deck1[1:]
deck2 = deck2[1:] + [p2, p1]
if len(deck1) > 0:
return score(deck1)
return score(deck2)
def can_recurse(deck1, deck2):
p1, p2 = deck1[0], deck2[0]
return p1 <= len(deck1) - 1 and p2 <= len(deck2) - 1
def combat(deck1, deck2):
db = set()
while len(deck1) > 0 and len(deck2) > 0:
key = (tuple(deck1), tuple(deck2))
if key in db:
return "p1", score(deck1)
db.add(key)
p1, p2 = deck1[0], deck2[0]
if can_recurse(deck1, deck2):
winner, _ = combat(deck1[1:p1+1], deck2[1:p2+1])
else:
winner = "p1" if p1 > p2 else "p2"
if winner == "p1":
deck1 = deck1[1:] + [p1, p2]
deck2 = deck2[1:]
else:
deck1 = deck1[1:]
deck2 = deck2[1:] + [p2, p1]
if len(deck1) > 0:
return "p1", score(deck1)
return "p2", score(deck2)
def solution2(deck1, deck2):
return combat(deck1, deck2)[1]
def main():
print(solution1(*parse("sample.txt")))
print(solution1(*parse("input.txt")))
print(solution2(*parse("sample.txt")))
print(solution2(*parse("input.txt")))
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "508d016161131481ace41f3d3bda005423125fe5",
"index": 5635,
"step-1": "def parse(filename):\n t1, t2 = open(filename).read().strip().split('\\n\\n')\n return tuple(map(lambda x: list(map(int, x.split('\\n')[1:])), [t1, t2]))\n\n\ndef score(deck):\n res = 0\n for i in range(len(deck)):\n res += deck[i] * (len(deck) - i)\n return res\n\n\n<mask token>\n\n\ndef combat(deck1, deck2):\n db = set()\n while len(deck1) > 0 and len(deck2) > 0:\n key = tuple(deck1), tuple(deck2)\n if key in db:\n return 'p1', score(deck1)\n db.add(key)\n p1, p2 = deck1[0], deck2[0]\n if can_recurse(deck1, deck2):\n winner, _ = combat(deck1[1:p1 + 1], deck2[1:p2 + 1])\n else:\n winner = 'p1' if p1 > p2 else 'p2'\n if winner == 'p1':\n deck1 = deck1[1:] + [p1, p2]\n deck2 = deck2[1:]\n else:\n deck1 = deck1[1:]\n deck2 = deck2[1:] + [p2, p1]\n if len(deck1) > 0:\n return 'p1', score(deck1)\n return 'p2', score(deck2)\n\n\n<mask token>\n\n\ndef main():\n print(solution1(*parse('sample.txt')))\n print(solution1(*parse('input.txt')))\n print(solution2(*parse('sample.txt')))\n print(solution2(*parse('input.txt')))\n\n\n<mask token>\n",
"step-2": "def parse(filename):\n t1, t2 = open(filename).read().strip().split('\\n\\n')\n return tuple(map(lambda x: list(map(int, x.split('\\n')[1:])), [t1, t2]))\n\n\ndef score(deck):\n res = 0\n for i in range(len(deck)):\n res += deck[i] * (len(deck) - i)\n return res\n\n\n<mask token>\n\n\ndef combat(deck1, deck2):\n db = set()\n while len(deck1) > 0 and len(deck2) > 0:\n key = tuple(deck1), tuple(deck2)\n if key in db:\n return 'p1', score(deck1)\n db.add(key)\n p1, p2 = deck1[0], deck2[0]\n if can_recurse(deck1, deck2):\n winner, _ = combat(deck1[1:p1 + 1], deck2[1:p2 + 1])\n else:\n winner = 'p1' if p1 > p2 else 'p2'\n if winner == 'p1':\n deck1 = deck1[1:] + [p1, p2]\n deck2 = deck2[1:]\n else:\n deck1 = deck1[1:]\n deck2 = deck2[1:] + [p2, p1]\n if len(deck1) > 0:\n return 'p1', score(deck1)\n return 'p2', score(deck2)\n\n\ndef solution2(deck1, deck2):\n return combat(deck1, deck2)[1]\n\n\ndef main():\n print(solution1(*parse('sample.txt')))\n print(solution1(*parse('input.txt')))\n print(solution2(*parse('sample.txt')))\n print(solution2(*parse('input.txt')))\n\n\n<mask token>\n",
"step-3": "def parse(filename):\n t1, t2 = open(filename).read().strip().split('\\n\\n')\n return tuple(map(lambda x: list(map(int, x.split('\\n')[1:])), [t1, t2]))\n\n\ndef score(deck):\n res = 0\n for i in range(len(deck)):\n res += deck[i] * (len(deck) - i)\n return res\n\n\n<mask token>\n\n\ndef can_recurse(deck1, deck2):\n p1, p2 = deck1[0], deck2[0]\n return p1 <= len(deck1) - 1 and p2 <= len(deck2) - 1\n\n\ndef combat(deck1, deck2):\n db = set()\n while len(deck1) > 0 and len(deck2) > 0:\n key = tuple(deck1), tuple(deck2)\n if key in db:\n return 'p1', score(deck1)\n db.add(key)\n p1, p2 = deck1[0], deck2[0]\n if can_recurse(deck1, deck2):\n winner, _ = combat(deck1[1:p1 + 1], deck2[1:p2 + 1])\n else:\n winner = 'p1' if p1 > p2 else 'p2'\n if winner == 'p1':\n deck1 = deck1[1:] + [p1, p2]\n deck2 = deck2[1:]\n else:\n deck1 = deck1[1:]\n deck2 = deck2[1:] + [p2, p1]\n if len(deck1) > 0:\n return 'p1', score(deck1)\n return 'p2', score(deck2)\n\n\ndef solution2(deck1, deck2):\n return combat(deck1, deck2)[1]\n\n\ndef main():\n print(solution1(*parse('sample.txt')))\n print(solution1(*parse('input.txt')))\n print(solution2(*parse('sample.txt')))\n print(solution2(*parse('input.txt')))\n\n\n<mask token>\n",
"step-4": "def parse(filename):\n t1, t2 = open(filename).read().strip().split('\\n\\n')\n return tuple(map(lambda x: list(map(int, x.split('\\n')[1:])), [t1, t2]))\n\n\ndef score(deck):\n res = 0\n for i in range(len(deck)):\n res += deck[i] * (len(deck) - i)\n return res\n\n\ndef solution1(deck1, deck2):\n while len(deck1) > 0 and len(deck2) > 0:\n p1, p2 = deck1[0], deck2[0]\n if p1 > p2:\n deck1 = deck1[1:] + [p1, p2]\n deck2 = deck2[1:]\n else:\n deck1 = deck1[1:]\n deck2 = deck2[1:] + [p2, p1]\n if len(deck1) > 0:\n return score(deck1)\n return score(deck2)\n\n\ndef can_recurse(deck1, deck2):\n p1, p2 = deck1[0], deck2[0]\n return p1 <= len(deck1) - 1 and p2 <= len(deck2) - 1\n\n\ndef combat(deck1, deck2):\n db = set()\n while len(deck1) > 0 and len(deck2) > 0:\n key = tuple(deck1), tuple(deck2)\n if key in db:\n return 'p1', score(deck1)\n db.add(key)\n p1, p2 = deck1[0], deck2[0]\n if can_recurse(deck1, deck2):\n winner, _ = combat(deck1[1:p1 + 1], deck2[1:p2 + 1])\n else:\n winner = 'p1' if p1 > p2 else 'p2'\n if winner == 'p1':\n deck1 = deck1[1:] + [p1, p2]\n deck2 = deck2[1:]\n else:\n deck1 = deck1[1:]\n deck2 = deck2[1:] + [p2, p1]\n if len(deck1) > 0:\n return 'p1', score(deck1)\n return 'p2', score(deck2)\n\n\ndef solution2(deck1, deck2):\n return combat(deck1, deck2)[1]\n\n\ndef main():\n print(solution1(*parse('sample.txt')))\n print(solution1(*parse('input.txt')))\n print(solution2(*parse('sample.txt')))\n print(solution2(*parse('input.txt')))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "def parse(filename):\n\tt1, t2 = open(filename).read().strip().split(\"\\n\\n\")\n\treturn tuple(map(lambda x: list(map(int, x.split(\"\\n\")[1:])), [t1, t2]))\n\ndef score(deck):\n\tres = 0\n\tfor i in range(len(deck)):\n\t\tres += deck[i] * (len(deck)-i)\n\treturn res\n\ndef solution1(deck1, deck2):\n\twhile len(deck1) > 0 and len(deck2) > 0:\n\t\tp1, p2 = deck1[0], deck2[0]\n\t\tif p1 > p2:\n\t\t\tdeck1 = deck1[1:] + [p1, p2]\n\t\t\tdeck2 = deck2[1:]\n\t\telse:\n\t\t\tdeck1 = deck1[1:]\n\t\t\tdeck2 = deck2[1:] + [p2, p1]\n\tif len(deck1) > 0:\n\t\treturn score(deck1)\n\treturn score(deck2)\n\ndef can_recurse(deck1, deck2):\n\tp1, p2 = deck1[0], deck2[0]\n\treturn p1 <= len(deck1) - 1 and p2 <= len(deck2) - 1\n\ndef combat(deck1, deck2):\n\tdb = set()\n\twhile len(deck1) > 0 and len(deck2) > 0:\n\t\tkey = (tuple(deck1), tuple(deck2))\n\t\tif key in db:\n\t\t\treturn \"p1\", score(deck1)\n\t\tdb.add(key)\n\n\t\tp1, p2 = deck1[0], deck2[0]\n\n\t\tif can_recurse(deck1, deck2):\n\t\t\twinner, _ = combat(deck1[1:p1+1], deck2[1:p2+1])\n\t\telse:\n\t\t\twinner = \"p1\" if p1 > p2 else \"p2\"\n\n\t\tif winner == \"p1\":\n\t\t\tdeck1 = deck1[1:] + [p1, p2]\n\t\t\tdeck2 = deck2[1:]\n\t\telse:\n\t\t\tdeck1 = deck1[1:]\n\t\t\tdeck2 = deck2[1:] + [p2, p1]\n\n\tif len(deck1) > 0:\n\t\treturn \"p1\", score(deck1)\n\treturn \"p2\", score(deck2)\n\ndef solution2(deck1, deck2):\n\treturn combat(deck1, deck2)[1]\n\ndef main():\n\tprint(solution1(*parse(\"sample.txt\")))\n\tprint(solution1(*parse(\"input.txt\")))\n\n\tprint(solution2(*parse(\"sample.txt\")))\n\tprint(solution2(*parse(\"input.txt\")))\n\nif __name__ == \"__main__\":\n\tmain()\n",
"step-ids": [
4,
5,
6,
8,
9
]
}
|
[
4,
5,
6,
8,
9
] |
#List methods allow you to modify lists. The following are some list methods for you to practice with. Feel free to google resources to help you with this assignment.
#append(element) adds a single element to the list
#1. 'Anonymous' is also deserving to be in the hacker legends list. Add him in to the hacker legends list and print your results.
hacker_legends = ['LulzSec', 'Gary McKinnon', 'Adrian Lamo', 'Jonathan James', 'Kevin Poulsen']
hacker_legends.append('Anonymous')
print(hacker_legends)
#insert (index, element) adds a new element at any position in your list.
#2. You just created a networking study list and forgot to add in 'SSH'. Please add that into the 3rd position in the networking list and print your results.
networking = ['packet', 'LAN', 'WAN', 'port', 'firewall', 'VPN']
networking.insert(3, 'SSH')
print(networking)
#remove(element) removes a single element from the list
#3. The cyber security analyst entered the wrong IP address in the list below. Please remove the non-float integer from the ip addy list and print your results.
ip_addy = [255.224, 192.168, 1331904083.25, 5102018, 10.255, 172.31]
ip_addy.remove(5102018)
print(ip_addy)
#pop(index) removes the element at the given index position
#4. The cyber traits list below is a list of traits that fit a career in cyber security. Everything is accurate, except for 'lazy'. Please remove 'lazy' from the list and print your results.
cyber_traits = ['detailed oriented', 'methodically', 'lazy', 'persistent', 'curious', 'instinctive']
cyber_traits.pop(2)
print(cyber_traits)
#extend(list) adds elements from another list
#5. Combine the new co list with the sec co list and print your results.
sec_co = ['IBM', 'Raytheon', 'Mimecast', 'Cisco']
new_co= ['Checkp Point Software', 'Palo Alto Networks', 'Symantec', 'Trend Micro']
sec_co.extend(new_co)
print(sec_co)
#index(element) searches an element in the list and returns its index
#6. There were some headline grabbing cyber attacks in 2017. In the cyber attacks list below, find the index position of 'WannaCry' and print your result.
cyber_attacks = ['Equifax Data Breach', 'Uber Data Breach', 'Yahoo!','WannaCry', 'Deep Root Analytics']
print(cyber_attacks[3])
#count(element) counts how many times an element is in a list
#7. In the dns list below, find the number of ocurrence for 98.105 and print your results.
dns_list = [98.105, 98.1115, 99.105, 98.111, 98.105, 98.106, 98.501]
print(dns_list.count(98.105))
#reverse() reverses the elements of a given list
#8. Decipher Mr. Robot's quote using the reverse method and print his message.
mr_robot = ['bigger', 'something', 'represents', 'it', 'mistake', 'a', 'just', 'never', 'is', 'bug', 'a']
mr_robot.reverse()
print(mr_robot)
#sort () sorts elements of a given list in a specific order (ascending or descending)
#9 Sort the following list of SSH Ids in ascending order
ssh_list = [1331903959.94555, 1331901011.84795, 1331903492.37203, 1331901032.03789, 1331903508.24007, 1331903476.8]
ssh_list.sort()
print(ssh_list)
#print the list in descending order
ssh_list.sort(reverse=True)
print(ssh_list)
#max() returns the largest element in the list
#10 Find the largest integer in the network list below:
network_list = [39104, 38694, 38702, 38787, 39860]
print(max(network_list))
#min() returns the smallest element in the list
#11 Find the smallest integet in the network list below:
network_list = [39104, 38694, 38702, 38787, 39860]
print(min(network_list))
#sum() calculates the sum of the all the elements in the list
#12 Find the sum of the following occurence list below:
occurences = [3, 2.5, 9, 7, 21, 6, 8]
print(sum(occurences))
|
normal
|
{
"blob_id": "53fd020946a2baddb1bb0463d2a56744de6e3822",
"index": 5506,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nhacker_legends.append('Anonymous')\nprint(hacker_legends)\n<mask token>\nnetworking.insert(3, 'SSH')\nprint(networking)\n<mask token>\nip_addy.remove(5102018)\nprint(ip_addy)\n<mask token>\ncyber_traits.pop(2)\nprint(cyber_traits)\n<mask token>\nsec_co.extend(new_co)\nprint(sec_co)\n<mask token>\nprint(cyber_attacks[3])\n<mask token>\nprint(dns_list.count(98.105))\n<mask token>\nmr_robot.reverse()\nprint(mr_robot)\n<mask token>\nssh_list.sort()\nprint(ssh_list)\nssh_list.sort(reverse=True)\nprint(ssh_list)\n<mask token>\nprint(max(network_list))\n<mask token>\nprint(min(network_list))\n<mask token>\nprint(sum(occurences))\n",
"step-3": "hacker_legends = ['LulzSec', 'Gary McKinnon', 'Adrian Lamo',\n 'Jonathan James', 'Kevin Poulsen']\nhacker_legends.append('Anonymous')\nprint(hacker_legends)\nnetworking = ['packet', 'LAN', 'WAN', 'port', 'firewall', 'VPN']\nnetworking.insert(3, 'SSH')\nprint(networking)\nip_addy = [255.224, 192.168, 1331904083.25, 5102018, 10.255, 172.31]\nip_addy.remove(5102018)\nprint(ip_addy)\ncyber_traits = ['detailed oriented', 'methodically', 'lazy', 'persistent',\n 'curious', 'instinctive']\ncyber_traits.pop(2)\nprint(cyber_traits)\nsec_co = ['IBM', 'Raytheon', 'Mimecast', 'Cisco']\nnew_co = ['Checkp Point Software', 'Palo Alto Networks', 'Symantec',\n 'Trend Micro']\nsec_co.extend(new_co)\nprint(sec_co)\ncyber_attacks = ['Equifax Data Breach', 'Uber Data Breach', 'Yahoo!',\n 'WannaCry', 'Deep Root Analytics']\nprint(cyber_attacks[3])\ndns_list = [98.105, 98.1115, 99.105, 98.111, 98.105, 98.106, 98.501]\nprint(dns_list.count(98.105))\nmr_robot = ['bigger', 'something', 'represents', 'it', 'mistake', 'a',\n 'just', 'never', 'is', 'bug', 'a']\nmr_robot.reverse()\nprint(mr_robot)\nssh_list = [1331903959.94555, 1331901011.84795, 1331903492.37203, \n 1331901032.03789, 1331903508.24007, 1331903476.8]\nssh_list.sort()\nprint(ssh_list)\nssh_list.sort(reverse=True)\nprint(ssh_list)\nnetwork_list = [39104, 38694, 38702, 38787, 39860]\nprint(max(network_list))\nnetwork_list = [39104, 38694, 38702, 38787, 39860]\nprint(min(network_list))\noccurences = [3, 2.5, 9, 7, 21, 6, 8]\nprint(sum(occurences))\n",
"step-4": "#List methods allow you to modify lists. The following are some list methods for you to practice with. Feel free to google resources to help you with this assignment.\n\n#append(element) adds a single element to the list\n#1. 'Anonymous' is also deserving to be in the hacker legends list. Add him in to the hacker legends list and print your results.\n\nhacker_legends = ['LulzSec', 'Gary McKinnon', 'Adrian Lamo', 'Jonathan James', 'Kevin Poulsen']\n\nhacker_legends.append('Anonymous')\nprint(hacker_legends)\n\n#insert (index, element) adds a new element at any position in your list.\n#2. You just created a networking study list and forgot to add in 'SSH'. Please add that into the 3rd position in the networking list and print your results.\n\nnetworking = ['packet', 'LAN', 'WAN', 'port', 'firewall', 'VPN']\n\nnetworking.insert(3, 'SSH')\nprint(networking)\n\n#remove(element) removes a single element from the list\n#3. The cyber security analyst entered the wrong IP address in the list below. Please remove the non-float integer from the ip addy list and print your results.\n\nip_addy = [255.224, 192.168, 1331904083.25, 5102018, 10.255, 172.31]\n\nip_addy.remove(5102018)\nprint(ip_addy)\n\n#pop(index) removes the element at the given index position\n#4. The cyber traits list below is a list of traits that fit a career in cyber security. Everything is accurate, except for 'lazy'. Please remove 'lazy' from the list and print your results.\n\ncyber_traits = ['detailed oriented', 'methodically', 'lazy', 'persistent', 'curious', 'instinctive']\n\ncyber_traits.pop(2)\nprint(cyber_traits)\n\n#extend(list) adds elements from another list \n#5. Combine the new co list with the sec co list and print your results.\n\nsec_co = ['IBM', 'Raytheon', 'Mimecast', 'Cisco']\nnew_co= ['Checkp Point Software', 'Palo Alto Networks', 'Symantec', 'Trend Micro']\n\nsec_co.extend(new_co)\nprint(sec_co)\n\n#index(element) searches an element in the list and returns its index\n#6. There were some headline grabbing cyber attacks in 2017. In the cyber attacks list below, find the index position of 'WannaCry' and print your result.\n\ncyber_attacks = ['Equifax Data Breach', 'Uber Data Breach', 'Yahoo!','WannaCry', 'Deep Root Analytics']\n\nprint(cyber_attacks[3])\n\n#count(element) counts how many times an element is in a list\n#7. In the dns list below, find the number of ocurrence for 98.105 and print your results.\n\ndns_list = [98.105, 98.1115, 99.105, 98.111, 98.105, 98.106, 98.501]\n\nprint(dns_list.count(98.105))\n\n#reverse() reverses the elements of a given list\n#8. Decipher Mr. Robot's quote using the reverse method and print his message.\n\nmr_robot = ['bigger', 'something', 'represents', 'it', 'mistake', 'a', 'just', 'never', 'is', 'bug', 'a']\n\nmr_robot.reverse()\nprint(mr_robot)\n\n#sort () sorts elements of a given list in a specific order (ascending or descending)\n#9 Sort the following list of SSH Ids in ascending order\n\nssh_list = [1331903959.94555, 1331901011.84795, 1331903492.37203, 1331901032.03789, 1331903508.24007, 1331903476.8]\n\nssh_list.sort()\nprint(ssh_list)\n\n#print the list in descending order\nssh_list.sort(reverse=True)\nprint(ssh_list)\n\n#max() returns the largest element in the list\n#10 Find the largest integer in the network list below:\n\nnetwork_list = [39104, 38694, 38702, 38787, 39860]\n\nprint(max(network_list))\n\n#min() returns the smallest element in the list\n#11 Find the smallest integet in the network list below:\n\nnetwork_list = [39104, 38694, 38702, 38787, 39860]\n\nprint(min(network_list))\n\n#sum() calculates the sum of the all the elements in the list\n#12 Find the sum of the following occurence list below:\n\noccurences = [3, 2.5, 9, 7, 21, 6, 8]\nprint(sum(occurences))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for ti in range(tn):
rn, cn = [int(x) for x in input().split()]
evenRow = '-'.join(['+'] * (cn + 1))
oddRow = '.'.join(['|'] * (cn + 1))
artrn = rn * 2 + 1
print(f'Case #{ti + 1}:')
for ri in range(artrn):
defaultRow = evenRow if ri % 2 == 0 else oddRow
if ri // 2 == 0:
print('..' + defaultRow[2:])
else:
print(defaultRow)
<|reserved_special_token_1|>
tn = int(input())
for ti in range(tn):
rn, cn = [int(x) for x in input().split()]
evenRow = '-'.join(['+'] * (cn + 1))
oddRow = '.'.join(['|'] * (cn + 1))
artrn = rn * 2 + 1
print(f'Case #{ti + 1}:')
for ri in range(artrn):
defaultRow = evenRow if ri % 2 == 0 else oddRow
if ri // 2 == 0:
print('..' + defaultRow[2:])
else:
print(defaultRow)
<|reserved_special_token_1|>
tn=int(input())
for ti in range(tn):
#ans = work()
rn,cn = [int(x) for x in input().split()]
evenRow='-'.join(['+']*(cn+1))
oddRow='.'.join(['|']*(cn+1))
artrn = rn*2+1
print(f'Case #{ti+1}:')
for ri in range(artrn):
defaultRow = evenRow if ri%2==0 else oddRow
if ri//2==0:
print('..'+defaultRow[2:])
else:
print(defaultRow)
|
flexible
|
{
"blob_id": "1972e3733918da654cd156a500432a35a239aed4",
"index": 1841,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor ti in range(tn):\n rn, cn = [int(x) for x in input().split()]\n evenRow = '-'.join(['+'] * (cn + 1))\n oddRow = '.'.join(['|'] * (cn + 1))\n artrn = rn * 2 + 1\n print(f'Case #{ti + 1}:')\n for ri in range(artrn):\n defaultRow = evenRow if ri % 2 == 0 else oddRow\n if ri // 2 == 0:\n print('..' + defaultRow[2:])\n else:\n print(defaultRow)\n",
"step-3": "tn = int(input())\nfor ti in range(tn):\n rn, cn = [int(x) for x in input().split()]\n evenRow = '-'.join(['+'] * (cn + 1))\n oddRow = '.'.join(['|'] * (cn + 1))\n artrn = rn * 2 + 1\n print(f'Case #{ti + 1}:')\n for ri in range(artrn):\n defaultRow = evenRow if ri % 2 == 0 else oddRow\n if ri // 2 == 0:\n print('..' + defaultRow[2:])\n else:\n print(defaultRow)\n",
"step-4": "tn=int(input())\nfor ti in range(tn):\n #ans = work()\n rn,cn = [int(x) for x in input().split()]\n evenRow='-'.join(['+']*(cn+1))\n oddRow='.'.join(['|']*(cn+1))\n artrn = rn*2+1\n print(f'Case #{ti+1}:')\n for ri in range(artrn):\n defaultRow = evenRow if ri%2==0 else oddRow\n if ri//2==0:\n print('..'+defaultRow[2:])\n else:\n print(defaultRow)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from graphviz import Digraph
dot = Digraph()
dot.edge("BaseException", "SystemExit")
dot.edge("BaseException", "KeyboardInterrupt")
dot.edge("BaseException", "GeneratorExit")
dot.edge("BaseException", "Exception")
dot.edge("Exception", "StopIteration")
dot.edge("Exception", "StopAsyncIteration")
dot.edge("Exception", "ArithmeticError")
dot.edge("ArithmeticError", "FloatingPointError")
dot.edge("ArithmeticError", "OverflowError")
dot.edge("ArithmeticError", "ZeroDivisionError")
dot.edge("Exception", "AssertionError")
dot.edge("Exception", "AttributeError")
dot.edge("Exception", "BufferError")
dot.edge("Exception", "EOFError")
dot.edge("Exception", "ImportError")
dot.edge("ImportError", "ModuleNotFoundError")
dot.edge("Exception", "LookupError")
dot.edge("LookupError", "IndexError")
dot.edge("LookupError", "KeyError")
dot.edge("Exception", "MemoryError")
dot.edge("Exception", "NameError")
dot.edge("NameError", "UnboundLocalError")
dot.edge("Exception", "OSError")
dot.edge("OSError", "BlockingIOError")
dot.edge("OSError", "ChildProcessError")
dot.edge("OSError", "ConnectionError")
dot.edge("ConnectionError", "BrokenPipeError")
dot.edge("ConnectionError", "ConnectionAbortedError")
dot.edge("ConnectionError", "ConnectionRefusedError")
dot.edge("ConnectionError", "ConnectionResetError")
dot.edge("OSError", "FileExistsError")
dot.edge("OSError", "FileNotFoundError")
dot.edge("OSError", "InterruptedError")
dot.edge("OSError", "IsADirectoryError")
dot.edge("OSError", "NotADirectoryError")
dot.edge("OSError", "PermissionError")
dot.edge("OSError", "ProcessLookupError")
dot.edge("OSError", "TimeoutError")
dot.edge("Exception", "ReferenceError")
dot.edge("Exception", "RuntimeError")
dot.edge("RuntimeError", "NotImplementedError")
dot.edge("RuntimeError", "RecursionError")
dot.edge("Exception", "SyntaxError")
dot.edge("SyntaxError", "IndentationError")
dot.edge("SyntaxError", "TabError")
dot.edge("Exception", "SystemError")
dot.edge("Exception", "TypeError")
dot.edge("Exception", "ValueError")
dot.edge("ValueError", "UnicodeError")
dot.edge("UnicodeError", "UnicodeDecodeError")
dot.edge("UnicodeError", "UnicodeEncodeError")
dot.edge("UnicodeError", "UnicodeTranslateError")
dot_source = dot.source
with open("exceptions.dot", "w") as dot_file:
dot_file.write(dot_source)
|
normal
|
{
"blob_id": "a7db627c49b53cd3a073d866a0373336a46b4053",
"index": 1088,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndot.edge('BaseException', 'SystemExit')\ndot.edge('BaseException', 'KeyboardInterrupt')\ndot.edge('BaseException', 'GeneratorExit')\ndot.edge('BaseException', 'Exception')\ndot.edge('Exception', 'StopIteration')\ndot.edge('Exception', 'StopAsyncIteration')\ndot.edge('Exception', 'ArithmeticError')\ndot.edge('ArithmeticError', 'FloatingPointError')\ndot.edge('ArithmeticError', 'OverflowError')\ndot.edge('ArithmeticError', 'ZeroDivisionError')\ndot.edge('Exception', 'AssertionError')\ndot.edge('Exception', 'AttributeError')\ndot.edge('Exception', 'BufferError')\ndot.edge('Exception', 'EOFError')\ndot.edge('Exception', 'ImportError')\ndot.edge('ImportError', 'ModuleNotFoundError')\ndot.edge('Exception', 'LookupError')\ndot.edge('LookupError', 'IndexError')\ndot.edge('LookupError', 'KeyError')\ndot.edge('Exception', 'MemoryError')\ndot.edge('Exception', 'NameError')\ndot.edge('NameError', 'UnboundLocalError')\ndot.edge('Exception', 'OSError')\ndot.edge('OSError', 'BlockingIOError')\ndot.edge('OSError', 'ChildProcessError')\ndot.edge('OSError', 'ConnectionError')\ndot.edge('ConnectionError', 'BrokenPipeError')\ndot.edge('ConnectionError', 'ConnectionAbortedError')\ndot.edge('ConnectionError', 'ConnectionRefusedError')\ndot.edge('ConnectionError', 'ConnectionResetError')\ndot.edge('OSError', 'FileExistsError')\ndot.edge('OSError', 'FileNotFoundError')\ndot.edge('OSError', 'InterruptedError')\ndot.edge('OSError', 'IsADirectoryError')\ndot.edge('OSError', 'NotADirectoryError')\ndot.edge('OSError', 'PermissionError')\ndot.edge('OSError', 'ProcessLookupError')\ndot.edge('OSError', 'TimeoutError')\ndot.edge('Exception', 'ReferenceError')\ndot.edge('Exception', 'RuntimeError')\ndot.edge('RuntimeError', 'NotImplementedError')\ndot.edge('RuntimeError', 'RecursionError')\ndot.edge('Exception', 'SyntaxError')\ndot.edge('SyntaxError', 'IndentationError')\ndot.edge('SyntaxError', 'TabError')\ndot.edge('Exception', 'SystemError')\ndot.edge('Exception', 'TypeError')\ndot.edge('Exception', 'ValueError')\ndot.edge('ValueError', 'UnicodeError')\ndot.edge('UnicodeError', 'UnicodeDecodeError')\ndot.edge('UnicodeError', 'UnicodeEncodeError')\ndot.edge('UnicodeError', 'UnicodeTranslateError')\n<mask token>\nwith open('exceptions.dot', 'w') as dot_file:\n dot_file.write(dot_source)\n",
"step-3": "<mask token>\ndot = Digraph()\ndot.edge('BaseException', 'SystemExit')\ndot.edge('BaseException', 'KeyboardInterrupt')\ndot.edge('BaseException', 'GeneratorExit')\ndot.edge('BaseException', 'Exception')\ndot.edge('Exception', 'StopIteration')\ndot.edge('Exception', 'StopAsyncIteration')\ndot.edge('Exception', 'ArithmeticError')\ndot.edge('ArithmeticError', 'FloatingPointError')\ndot.edge('ArithmeticError', 'OverflowError')\ndot.edge('ArithmeticError', 'ZeroDivisionError')\ndot.edge('Exception', 'AssertionError')\ndot.edge('Exception', 'AttributeError')\ndot.edge('Exception', 'BufferError')\ndot.edge('Exception', 'EOFError')\ndot.edge('Exception', 'ImportError')\ndot.edge('ImportError', 'ModuleNotFoundError')\ndot.edge('Exception', 'LookupError')\ndot.edge('LookupError', 'IndexError')\ndot.edge('LookupError', 'KeyError')\ndot.edge('Exception', 'MemoryError')\ndot.edge('Exception', 'NameError')\ndot.edge('NameError', 'UnboundLocalError')\ndot.edge('Exception', 'OSError')\ndot.edge('OSError', 'BlockingIOError')\ndot.edge('OSError', 'ChildProcessError')\ndot.edge('OSError', 'ConnectionError')\ndot.edge('ConnectionError', 'BrokenPipeError')\ndot.edge('ConnectionError', 'ConnectionAbortedError')\ndot.edge('ConnectionError', 'ConnectionRefusedError')\ndot.edge('ConnectionError', 'ConnectionResetError')\ndot.edge('OSError', 'FileExistsError')\ndot.edge('OSError', 'FileNotFoundError')\ndot.edge('OSError', 'InterruptedError')\ndot.edge('OSError', 'IsADirectoryError')\ndot.edge('OSError', 'NotADirectoryError')\ndot.edge('OSError', 'PermissionError')\ndot.edge('OSError', 'ProcessLookupError')\ndot.edge('OSError', 'TimeoutError')\ndot.edge('Exception', 'ReferenceError')\ndot.edge('Exception', 'RuntimeError')\ndot.edge('RuntimeError', 'NotImplementedError')\ndot.edge('RuntimeError', 'RecursionError')\ndot.edge('Exception', 'SyntaxError')\ndot.edge('SyntaxError', 'IndentationError')\ndot.edge('SyntaxError', 'TabError')\ndot.edge('Exception', 'SystemError')\ndot.edge('Exception', 'TypeError')\ndot.edge('Exception', 'ValueError')\ndot.edge('ValueError', 'UnicodeError')\ndot.edge('UnicodeError', 'UnicodeDecodeError')\ndot.edge('UnicodeError', 'UnicodeEncodeError')\ndot.edge('UnicodeError', 'UnicodeTranslateError')\ndot_source = dot.source\nwith open('exceptions.dot', 'w') as dot_file:\n dot_file.write(dot_source)\n",
"step-4": "from graphviz import Digraph\ndot = Digraph()\ndot.edge('BaseException', 'SystemExit')\ndot.edge('BaseException', 'KeyboardInterrupt')\ndot.edge('BaseException', 'GeneratorExit')\ndot.edge('BaseException', 'Exception')\ndot.edge('Exception', 'StopIteration')\ndot.edge('Exception', 'StopAsyncIteration')\ndot.edge('Exception', 'ArithmeticError')\ndot.edge('ArithmeticError', 'FloatingPointError')\ndot.edge('ArithmeticError', 'OverflowError')\ndot.edge('ArithmeticError', 'ZeroDivisionError')\ndot.edge('Exception', 'AssertionError')\ndot.edge('Exception', 'AttributeError')\ndot.edge('Exception', 'BufferError')\ndot.edge('Exception', 'EOFError')\ndot.edge('Exception', 'ImportError')\ndot.edge('ImportError', 'ModuleNotFoundError')\ndot.edge('Exception', 'LookupError')\ndot.edge('LookupError', 'IndexError')\ndot.edge('LookupError', 'KeyError')\ndot.edge('Exception', 'MemoryError')\ndot.edge('Exception', 'NameError')\ndot.edge('NameError', 'UnboundLocalError')\ndot.edge('Exception', 'OSError')\ndot.edge('OSError', 'BlockingIOError')\ndot.edge('OSError', 'ChildProcessError')\ndot.edge('OSError', 'ConnectionError')\ndot.edge('ConnectionError', 'BrokenPipeError')\ndot.edge('ConnectionError', 'ConnectionAbortedError')\ndot.edge('ConnectionError', 'ConnectionRefusedError')\ndot.edge('ConnectionError', 'ConnectionResetError')\ndot.edge('OSError', 'FileExistsError')\ndot.edge('OSError', 'FileNotFoundError')\ndot.edge('OSError', 'InterruptedError')\ndot.edge('OSError', 'IsADirectoryError')\ndot.edge('OSError', 'NotADirectoryError')\ndot.edge('OSError', 'PermissionError')\ndot.edge('OSError', 'ProcessLookupError')\ndot.edge('OSError', 'TimeoutError')\ndot.edge('Exception', 'ReferenceError')\ndot.edge('Exception', 'RuntimeError')\ndot.edge('RuntimeError', 'NotImplementedError')\ndot.edge('RuntimeError', 'RecursionError')\ndot.edge('Exception', 'SyntaxError')\ndot.edge('SyntaxError', 'IndentationError')\ndot.edge('SyntaxError', 'TabError')\ndot.edge('Exception', 'SystemError')\ndot.edge('Exception', 'TypeError')\ndot.edge('Exception', 'ValueError')\ndot.edge('ValueError', 'UnicodeError')\ndot.edge('UnicodeError', 'UnicodeDecodeError')\ndot.edge('UnicodeError', 'UnicodeEncodeError')\ndot.edge('UnicodeError', 'UnicodeTranslateError')\ndot_source = dot.source\nwith open('exceptions.dot', 'w') as dot_file:\n dot_file.write(dot_source)\n",
"step-5": "from graphviz import Digraph\n\ndot = Digraph()\n\ndot.edge(\"BaseException\", \"SystemExit\")\ndot.edge(\"BaseException\", \"KeyboardInterrupt\")\ndot.edge(\"BaseException\", \"GeneratorExit\")\ndot.edge(\"BaseException\", \"Exception\")\ndot.edge(\"Exception\", \"StopIteration\")\ndot.edge(\"Exception\", \"StopAsyncIteration\")\ndot.edge(\"Exception\", \"ArithmeticError\")\ndot.edge(\"ArithmeticError\", \"FloatingPointError\")\ndot.edge(\"ArithmeticError\", \"OverflowError\")\ndot.edge(\"ArithmeticError\", \"ZeroDivisionError\")\ndot.edge(\"Exception\", \"AssertionError\")\ndot.edge(\"Exception\", \"AttributeError\")\ndot.edge(\"Exception\", \"BufferError\")\ndot.edge(\"Exception\", \"EOFError\")\ndot.edge(\"Exception\", \"ImportError\")\ndot.edge(\"ImportError\", \"ModuleNotFoundError\")\ndot.edge(\"Exception\", \"LookupError\")\ndot.edge(\"LookupError\", \"IndexError\")\ndot.edge(\"LookupError\", \"KeyError\")\ndot.edge(\"Exception\", \"MemoryError\")\ndot.edge(\"Exception\", \"NameError\")\ndot.edge(\"NameError\", \"UnboundLocalError\")\ndot.edge(\"Exception\", \"OSError\")\ndot.edge(\"OSError\", \"BlockingIOError\")\ndot.edge(\"OSError\", \"ChildProcessError\")\ndot.edge(\"OSError\", \"ConnectionError\")\ndot.edge(\"ConnectionError\", \"BrokenPipeError\")\ndot.edge(\"ConnectionError\", \"ConnectionAbortedError\")\ndot.edge(\"ConnectionError\", \"ConnectionRefusedError\")\ndot.edge(\"ConnectionError\", \"ConnectionResetError\")\ndot.edge(\"OSError\", \"FileExistsError\")\ndot.edge(\"OSError\", \"FileNotFoundError\")\ndot.edge(\"OSError\", \"InterruptedError\")\ndot.edge(\"OSError\", \"IsADirectoryError\")\ndot.edge(\"OSError\", \"NotADirectoryError\")\ndot.edge(\"OSError\", \"PermissionError\")\ndot.edge(\"OSError\", \"ProcessLookupError\")\ndot.edge(\"OSError\", \"TimeoutError\")\ndot.edge(\"Exception\", \"ReferenceError\")\ndot.edge(\"Exception\", \"RuntimeError\")\ndot.edge(\"RuntimeError\", \"NotImplementedError\")\ndot.edge(\"RuntimeError\", \"RecursionError\")\ndot.edge(\"Exception\", \"SyntaxError\")\ndot.edge(\"SyntaxError\", \"IndentationError\")\ndot.edge(\"SyntaxError\", \"TabError\")\ndot.edge(\"Exception\", \"SystemError\")\ndot.edge(\"Exception\", \"TypeError\")\ndot.edge(\"Exception\", \"ValueError\")\ndot.edge(\"ValueError\", \"UnicodeError\")\ndot.edge(\"UnicodeError\", \"UnicodeDecodeError\")\ndot.edge(\"UnicodeError\", \"UnicodeEncodeError\")\ndot.edge(\"UnicodeError\", \"UnicodeTranslateError\")\n\ndot_source = dot.source\n\nwith open(\"exceptions.dot\", \"w\") as dot_file:\n dot_file.write(dot_source)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from functools import partial
import numpy as np
import scipy.stats as sps
# SPMs HRF
def spm_hrf_compat(t,
peak_delay=6,
under_delay=16,
peak_disp=1,
under_disp=1,
p_u_ratio = 6,
normalize=True,
):
""" SPM HRF function from sum of two gamma PDFs
This function is designed to be partially compatible with SPMs `spm_hrf.m`
function.
The SPN HRF is a *peak* gamma PDF (with location `peak_delay` and dispersion
`peak_disp`), minus an *undershoot* gamma PDF (with location `under_delay`
and dispersion `under_disp`, and divided by the `p_u_ratio`).
Parameters
----------
t : array-like
vector of times at which to sample HRF
peak_delay : float, optional
delay of peak
peak_disp : float, optional
width (dispersion) of peak
under_delay : float, optional
delay of undershoot
under_disp : float, optional
width (dispersion) of undershoot
p_u_ratio : float, optional
peak to undershoot ratio. Undershoot divided by this value before
subtracting from peak.
normalize : {True, False}, optional
If True, divide HRF values by their sum before returning. SPM does this
by default.
Returns
-------
hrf : array
vector length ``len(t)`` of samples from HRF at times `t`
Notes
-----
See ``spm_hrf.m`` in the SPM distribution.
"""
if len([v for v in [peak_delay, peak_disp, under_delay, under_disp]
if v <= 0]):
raise ValueError("delays and dispersions must be > 0")
# gamma.pdf only defined for t > 0
hrf = np.zeros(t.shape, dtype=np.float)
pos_t = t[t > 0]
peak = sps.gamma.pdf(pos_t,
peak_delay / peak_disp,
loc=0,
scale=peak_disp)
undershoot = sps.gamma.pdf(pos_t,
under_delay / under_disp,
loc=0,
scale=under_disp)
hrf[t > 0] = peak - undershoot / p_u_ratio
if not normalize:
return hrf
return hrf / np.max(hrf)
def _get_num_int(lf, dt=0.02, t=50):
# numerical integral of numerical function
tt = np.arange(dt,t+dt,dt)
return lf(tt).sum() * dt
_spm_can_int = _get_num_int(partial(spm_hrf_compat, normalize=True))
def spmt(t):
""" SPM canonical HRF, HRF values for time values `t`
This is the canonical HRF function as used in SPM. It
has the following defaults:
defaults
(seconds)
delay of response (relative to onset) 6
delay of undershoot (relative to onset) 16
dispersion of response 1
dispersion of undershoot 1
ratio of response to undershoot 6
onset (seconds) 0
length of kernel (seconds) 32
"""
return spm_hrf_compat(t, normalize=True)
def dspmt(t):
""" SPM canonical HRF derivative, HRF derivative values for time values `t`
This is the canonical HRF derivative function as used in SPM.
It is the numerical difference of the HRF sampled at time `t` minus the
values sampled at time `t` -1
"""
t = np.asarray(t)
return spmt(t) - spmt(t - 1)
_spm_dd_func = partial(spm_hrf_compat, normalize=True, peak_disp=1.01)
def ddspmt(t):
""" SPM canonical HRF dispersion derivative, values for time values `t`
This is the canonical HRF dispersion derivative function as used in SPM.
It is the numerical difference between the HRF sampled at time `t`, and
values at `t` for another HRF shape with a small change in the peak
dispersion parameter (``peak_disp`` in func:`spm_hrf_compat`).
"""
return (spmt(t) - _spm_dd_func(t)) / 0.01
|
normal
|
{
"blob_id": "596ee5568a32c3044e797375fbc705e2091f35c2",
"index": 4340,
"step-1": "<mask token>\n\n\ndef spm_hrf_compat(t, peak_delay=6, under_delay=16, peak_disp=1, under_disp\n =1, p_u_ratio=6, normalize=True):\n \"\"\" SPM HRF function from sum of two gamma PDFs\n\n This function is designed to be partially compatible with SPMs `spm_hrf.m`\n function.\n\n The SPN HRF is a *peak* gamma PDF (with location `peak_delay` and dispersion\n `peak_disp`), minus an *undershoot* gamma PDF (with location `under_delay`\n and dispersion `under_disp`, and divided by the `p_u_ratio`).\n\n Parameters\n ----------\n t : array-like\n vector of times at which to sample HRF\n peak_delay : float, optional\n delay of peak\n peak_disp : float, optional\n width (dispersion) of peak\n under_delay : float, optional\n delay of undershoot\n under_disp : float, optional\n width (dispersion) of undershoot\n p_u_ratio : float, optional\n peak to undershoot ratio. Undershoot divided by this value before\n subtracting from peak.\n normalize : {True, False}, optional\n If True, divide HRF values by their sum before returning. SPM does this\n by default.\n\n Returns\n -------\n hrf : array\n vector length ``len(t)`` of samples from HRF at times `t`\n\n Notes\n -----\n See ``spm_hrf.m`` in the SPM distribution.\n \"\"\"\n if len([v for v in [peak_delay, peak_disp, under_delay, under_disp] if \n v <= 0]):\n raise ValueError('delays and dispersions must be > 0')\n hrf = np.zeros(t.shape, dtype=np.float)\n pos_t = t[t > 0]\n peak = sps.gamma.pdf(pos_t, peak_delay / peak_disp, loc=0, scale=peak_disp)\n undershoot = sps.gamma.pdf(pos_t, under_delay / under_disp, loc=0,\n scale=under_disp)\n hrf[t > 0] = peak - undershoot / p_u_ratio\n if not normalize:\n return hrf\n return hrf / np.max(hrf)\n\n\ndef _get_num_int(lf, dt=0.02, t=50):\n tt = np.arange(dt, t + dt, dt)\n return lf(tt).sum() * dt\n\n\n<mask token>\n\n\ndef spmt(t):\n \"\"\" SPM canonical HRF, HRF values for time values `t`\n\n This is the canonical HRF function as used in SPM. It\n has the following defaults:\n defaults\n (seconds)\n delay of response (relative to onset) 6\n delay of undershoot (relative to onset) 16\n dispersion of response 1\n dispersion of undershoot 1\n ratio of response to undershoot 6\n onset (seconds) 0\n length of kernel (seconds) 32\n \"\"\"\n return spm_hrf_compat(t, normalize=True)\n\n\ndef dspmt(t):\n \"\"\" SPM canonical HRF derivative, HRF derivative values for time values `t`\n\n This is the canonical HRF derivative function as used in SPM.\n\n It is the numerical difference of the HRF sampled at time `t` minus the\n values sampled at time `t` -1\n \"\"\"\n t = np.asarray(t)\n return spmt(t) - spmt(t - 1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef spm_hrf_compat(t, peak_delay=6, under_delay=16, peak_disp=1, under_disp\n =1, p_u_ratio=6, normalize=True):\n \"\"\" SPM HRF function from sum of two gamma PDFs\n\n This function is designed to be partially compatible with SPMs `spm_hrf.m`\n function.\n\n The SPN HRF is a *peak* gamma PDF (with location `peak_delay` and dispersion\n `peak_disp`), minus an *undershoot* gamma PDF (with location `under_delay`\n and dispersion `under_disp`, and divided by the `p_u_ratio`).\n\n Parameters\n ----------\n t : array-like\n vector of times at which to sample HRF\n peak_delay : float, optional\n delay of peak\n peak_disp : float, optional\n width (dispersion) of peak\n under_delay : float, optional\n delay of undershoot\n under_disp : float, optional\n width (dispersion) of undershoot\n p_u_ratio : float, optional\n peak to undershoot ratio. Undershoot divided by this value before\n subtracting from peak.\n normalize : {True, False}, optional\n If True, divide HRF values by their sum before returning. SPM does this\n by default.\n\n Returns\n -------\n hrf : array\n vector length ``len(t)`` of samples from HRF at times `t`\n\n Notes\n -----\n See ``spm_hrf.m`` in the SPM distribution.\n \"\"\"\n if len([v for v in [peak_delay, peak_disp, under_delay, under_disp] if \n v <= 0]):\n raise ValueError('delays and dispersions must be > 0')\n hrf = np.zeros(t.shape, dtype=np.float)\n pos_t = t[t > 0]\n peak = sps.gamma.pdf(pos_t, peak_delay / peak_disp, loc=0, scale=peak_disp)\n undershoot = sps.gamma.pdf(pos_t, under_delay / under_disp, loc=0,\n scale=under_disp)\n hrf[t > 0] = peak - undershoot / p_u_ratio\n if not normalize:\n return hrf\n return hrf / np.max(hrf)\n\n\ndef _get_num_int(lf, dt=0.02, t=50):\n tt = np.arange(dt, t + dt, dt)\n return lf(tt).sum() * dt\n\n\n<mask token>\n\n\ndef spmt(t):\n \"\"\" SPM canonical HRF, HRF values for time values `t`\n\n This is the canonical HRF function as used in SPM. It\n has the following defaults:\n defaults\n (seconds)\n delay of response (relative to onset) 6\n delay of undershoot (relative to onset) 16\n dispersion of response 1\n dispersion of undershoot 1\n ratio of response to undershoot 6\n onset (seconds) 0\n length of kernel (seconds) 32\n \"\"\"\n return spm_hrf_compat(t, normalize=True)\n\n\ndef dspmt(t):\n \"\"\" SPM canonical HRF derivative, HRF derivative values for time values `t`\n\n This is the canonical HRF derivative function as used in SPM.\n\n It is the numerical difference of the HRF sampled at time `t` minus the\n values sampled at time `t` -1\n \"\"\"\n t = np.asarray(t)\n return spmt(t) - spmt(t - 1)\n\n\n<mask token>\n\n\ndef ddspmt(t):\n \"\"\" SPM canonical HRF dispersion derivative, values for time values `t`\n\n This is the canonical HRF dispersion derivative function as used in SPM.\n\n It is the numerical difference between the HRF sampled at time `t`, and\n values at `t` for another HRF shape with a small change in the peak\n dispersion parameter (``peak_disp`` in func:`spm_hrf_compat`).\n \"\"\"\n return (spmt(t) - _spm_dd_func(t)) / 0.01\n",
"step-3": "<mask token>\n\n\ndef spm_hrf_compat(t, peak_delay=6, under_delay=16, peak_disp=1, under_disp\n =1, p_u_ratio=6, normalize=True):\n \"\"\" SPM HRF function from sum of two gamma PDFs\n\n This function is designed to be partially compatible with SPMs `spm_hrf.m`\n function.\n\n The SPN HRF is a *peak* gamma PDF (with location `peak_delay` and dispersion\n `peak_disp`), minus an *undershoot* gamma PDF (with location `under_delay`\n and dispersion `under_disp`, and divided by the `p_u_ratio`).\n\n Parameters\n ----------\n t : array-like\n vector of times at which to sample HRF\n peak_delay : float, optional\n delay of peak\n peak_disp : float, optional\n width (dispersion) of peak\n under_delay : float, optional\n delay of undershoot\n under_disp : float, optional\n width (dispersion) of undershoot\n p_u_ratio : float, optional\n peak to undershoot ratio. Undershoot divided by this value before\n subtracting from peak.\n normalize : {True, False}, optional\n If True, divide HRF values by their sum before returning. SPM does this\n by default.\n\n Returns\n -------\n hrf : array\n vector length ``len(t)`` of samples from HRF at times `t`\n\n Notes\n -----\n See ``spm_hrf.m`` in the SPM distribution.\n \"\"\"\n if len([v for v in [peak_delay, peak_disp, under_delay, under_disp] if \n v <= 0]):\n raise ValueError('delays and dispersions must be > 0')\n hrf = np.zeros(t.shape, dtype=np.float)\n pos_t = t[t > 0]\n peak = sps.gamma.pdf(pos_t, peak_delay / peak_disp, loc=0, scale=peak_disp)\n undershoot = sps.gamma.pdf(pos_t, under_delay / under_disp, loc=0,\n scale=under_disp)\n hrf[t > 0] = peak - undershoot / p_u_ratio\n if not normalize:\n return hrf\n return hrf / np.max(hrf)\n\n\ndef _get_num_int(lf, dt=0.02, t=50):\n tt = np.arange(dt, t + dt, dt)\n return lf(tt).sum() * dt\n\n\n_spm_can_int = _get_num_int(partial(spm_hrf_compat, normalize=True))\n\n\ndef spmt(t):\n \"\"\" SPM canonical HRF, HRF values for time values `t`\n\n This is the canonical HRF function as used in SPM. It\n has the following defaults:\n defaults\n (seconds)\n delay of response (relative to onset) 6\n delay of undershoot (relative to onset) 16\n dispersion of response 1\n dispersion of undershoot 1\n ratio of response to undershoot 6\n onset (seconds) 0\n length of kernel (seconds) 32\n \"\"\"\n return spm_hrf_compat(t, normalize=True)\n\n\ndef dspmt(t):\n \"\"\" SPM canonical HRF derivative, HRF derivative values for time values `t`\n\n This is the canonical HRF derivative function as used in SPM.\n\n It is the numerical difference of the HRF sampled at time `t` minus the\n values sampled at time `t` -1\n \"\"\"\n t = np.asarray(t)\n return spmt(t) - spmt(t - 1)\n\n\n_spm_dd_func = partial(spm_hrf_compat, normalize=True, peak_disp=1.01)\n\n\ndef ddspmt(t):\n \"\"\" SPM canonical HRF dispersion derivative, values for time values `t`\n\n This is the canonical HRF dispersion derivative function as used in SPM.\n\n It is the numerical difference between the HRF sampled at time `t`, and\n values at `t` for another HRF shape with a small change in the peak\n dispersion parameter (``peak_disp`` in func:`spm_hrf_compat`).\n \"\"\"\n return (spmt(t) - _spm_dd_func(t)) / 0.01\n",
"step-4": "from functools import partial\nimport numpy as np\nimport scipy.stats as sps\n\n\ndef spm_hrf_compat(t, peak_delay=6, under_delay=16, peak_disp=1, under_disp\n =1, p_u_ratio=6, normalize=True):\n \"\"\" SPM HRF function from sum of two gamma PDFs\n\n This function is designed to be partially compatible with SPMs `spm_hrf.m`\n function.\n\n The SPN HRF is a *peak* gamma PDF (with location `peak_delay` and dispersion\n `peak_disp`), minus an *undershoot* gamma PDF (with location `under_delay`\n and dispersion `under_disp`, and divided by the `p_u_ratio`).\n\n Parameters\n ----------\n t : array-like\n vector of times at which to sample HRF\n peak_delay : float, optional\n delay of peak\n peak_disp : float, optional\n width (dispersion) of peak\n under_delay : float, optional\n delay of undershoot\n under_disp : float, optional\n width (dispersion) of undershoot\n p_u_ratio : float, optional\n peak to undershoot ratio. Undershoot divided by this value before\n subtracting from peak.\n normalize : {True, False}, optional\n If True, divide HRF values by their sum before returning. SPM does this\n by default.\n\n Returns\n -------\n hrf : array\n vector length ``len(t)`` of samples from HRF at times `t`\n\n Notes\n -----\n See ``spm_hrf.m`` in the SPM distribution.\n \"\"\"\n if len([v for v in [peak_delay, peak_disp, under_delay, under_disp] if \n v <= 0]):\n raise ValueError('delays and dispersions must be > 0')\n hrf = np.zeros(t.shape, dtype=np.float)\n pos_t = t[t > 0]\n peak = sps.gamma.pdf(pos_t, peak_delay / peak_disp, loc=0, scale=peak_disp)\n undershoot = sps.gamma.pdf(pos_t, under_delay / under_disp, loc=0,\n scale=under_disp)\n hrf[t > 0] = peak - undershoot / p_u_ratio\n if not normalize:\n return hrf\n return hrf / np.max(hrf)\n\n\ndef _get_num_int(lf, dt=0.02, t=50):\n tt = np.arange(dt, t + dt, dt)\n return lf(tt).sum() * dt\n\n\n_spm_can_int = _get_num_int(partial(spm_hrf_compat, normalize=True))\n\n\ndef spmt(t):\n \"\"\" SPM canonical HRF, HRF values for time values `t`\n\n This is the canonical HRF function as used in SPM. It\n has the following defaults:\n defaults\n (seconds)\n delay of response (relative to onset) 6\n delay of undershoot (relative to onset) 16\n dispersion of response 1\n dispersion of undershoot 1\n ratio of response to undershoot 6\n onset (seconds) 0\n length of kernel (seconds) 32\n \"\"\"\n return spm_hrf_compat(t, normalize=True)\n\n\ndef dspmt(t):\n \"\"\" SPM canonical HRF derivative, HRF derivative values for time values `t`\n\n This is the canonical HRF derivative function as used in SPM.\n\n It is the numerical difference of the HRF sampled at time `t` minus the\n values sampled at time `t` -1\n \"\"\"\n t = np.asarray(t)\n return spmt(t) - spmt(t - 1)\n\n\n_spm_dd_func = partial(spm_hrf_compat, normalize=True, peak_disp=1.01)\n\n\ndef ddspmt(t):\n \"\"\" SPM canonical HRF dispersion derivative, values for time values `t`\n\n This is the canonical HRF dispersion derivative function as used in SPM.\n\n It is the numerical difference between the HRF sampled at time `t`, and\n values at `t` for another HRF shape with a small change in the peak\n dispersion parameter (``peak_disp`` in func:`spm_hrf_compat`).\n \"\"\"\n return (spmt(t) - _spm_dd_func(t)) / 0.01\n",
"step-5": "from functools import partial\nimport numpy as np\nimport scipy.stats as sps\n\n# SPMs HRF\ndef spm_hrf_compat(t,\n peak_delay=6,\n under_delay=16,\n peak_disp=1,\n under_disp=1,\n p_u_ratio = 6,\n normalize=True,\n ):\n \"\"\" SPM HRF function from sum of two gamma PDFs\n\n This function is designed to be partially compatible with SPMs `spm_hrf.m`\n function.\n\n The SPN HRF is a *peak* gamma PDF (with location `peak_delay` and dispersion\n `peak_disp`), minus an *undershoot* gamma PDF (with location `under_delay`\n and dispersion `under_disp`, and divided by the `p_u_ratio`).\n\n Parameters\n ----------\n t : array-like\n vector of times at which to sample HRF\n peak_delay : float, optional\n delay of peak\n peak_disp : float, optional\n width (dispersion) of peak\n under_delay : float, optional\n delay of undershoot\n under_disp : float, optional\n width (dispersion) of undershoot\n p_u_ratio : float, optional\n peak to undershoot ratio. Undershoot divided by this value before\n subtracting from peak.\n normalize : {True, False}, optional\n If True, divide HRF values by their sum before returning. SPM does this\n by default.\n\n Returns\n -------\n hrf : array\n vector length ``len(t)`` of samples from HRF at times `t`\n\n Notes\n -----\n See ``spm_hrf.m`` in the SPM distribution.\n \"\"\"\n if len([v for v in [peak_delay, peak_disp, under_delay, under_disp]\n if v <= 0]):\n raise ValueError(\"delays and dispersions must be > 0\")\n # gamma.pdf only defined for t > 0\n hrf = np.zeros(t.shape, dtype=np.float)\n pos_t = t[t > 0]\n peak = sps.gamma.pdf(pos_t,\n peak_delay / peak_disp,\n loc=0,\n scale=peak_disp)\n undershoot = sps.gamma.pdf(pos_t,\n under_delay / under_disp,\n loc=0,\n scale=under_disp)\n hrf[t > 0] = peak - undershoot / p_u_ratio\n if not normalize:\n return hrf\n return hrf / np.max(hrf)\n\ndef _get_num_int(lf, dt=0.02, t=50):\n # numerical integral of numerical function\n tt = np.arange(dt,t+dt,dt)\n return lf(tt).sum() * dt\n\n_spm_can_int = _get_num_int(partial(spm_hrf_compat, normalize=True))\n\n\ndef spmt(t):\n \"\"\" SPM canonical HRF, HRF values for time values `t`\n\n This is the canonical HRF function as used in SPM. It\n has the following defaults:\n defaults\n (seconds)\n delay of response (relative to onset) 6\n delay of undershoot (relative to onset) 16\n dispersion of response 1\n dispersion of undershoot 1\n ratio of response to undershoot 6\n onset (seconds) 0\n length of kernel (seconds) 32\n \"\"\"\n return spm_hrf_compat(t, normalize=True)\n\n\ndef dspmt(t):\n \"\"\" SPM canonical HRF derivative, HRF derivative values for time values `t`\n\n This is the canonical HRF derivative function as used in SPM.\n\n It is the numerical difference of the HRF sampled at time `t` minus the\n values sampled at time `t` -1\n \"\"\"\n t = np.asarray(t)\n return spmt(t) - spmt(t - 1)\n\n\n_spm_dd_func = partial(spm_hrf_compat, normalize=True, peak_disp=1.01)\n\ndef ddspmt(t):\n \"\"\" SPM canonical HRF dispersion derivative, values for time values `t`\n\n This is the canonical HRF dispersion derivative function as used in SPM.\n\n It is the numerical difference between the HRF sampled at time `t`, and\n values at `t` for another HRF shape with a small change in the peak\n dispersion parameter (``peak_disp`` in func:`spm_hrf_compat`).\n \"\"\"\n return (spmt(t) - _spm_dd_func(t)) / 0.01",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import unittest
import gym
import torch
from all.environments import DuplicateEnvironment, GymEnvironment
def make_vec_env(num_envs=3):
env = [GymEnvironment('CartPole-v0') for i in range(num_envs)]
return env
class DuplicateEnvironmentTest(unittest.TestCase):
def test_env_name(self):
env = DuplicateEnvironment(make_vec_env())
self.assertEqual(env.name, 'CartPole-v0')
def test_num_envs(self):
num_envs = 5
env = DuplicateEnvironment(make_vec_env(num_envs))
self.assertEqual(env.num_envs, num_envs)
self.assertEqual((num_envs,), env.reset().shape)
def test_reset(self):
num_envs = 5
env = DuplicateEnvironment(make_vec_env(num_envs))
state = env.reset()
self.assertEqual(state.observation.shape, (num_envs, 4))
self.assertTrue((state.reward == torch.zeros(num_envs, )).all())
self.assertTrue((state.done == torch.zeros(num_envs, )).all())
self.assertTrue((state.mask == torch.ones(num_envs, )).all())
def test_step(self):
num_envs = 5
env = DuplicateEnvironment(make_vec_env(num_envs))
env.reset()
state = env.step(torch.ones(num_envs, dtype=torch.int32))
self.assertEqual(state.observation.shape, (num_envs, 4))
self.assertTrue((state.reward == torch.ones(num_envs, )).all())
self.assertTrue((state.done == torch.zeros(num_envs, )).all())
self.assertTrue((state.mask == torch.ones(num_envs, )).all())
def test_step_until_done(self):
num_envs = 3
env = DuplicateEnvironment(make_vec_env(num_envs))
env.seed(5)
env.reset()
for _ in range(100):
state = env.step(torch.ones(num_envs, dtype=torch.int32))
if state.done[0]:
break
self.assertEqual(state[0].observation.shape, (4,))
self.assertEqual(state[0].reward, 1.)
self.assertTrue(state[0].done)
self.assertEqual(state[0].mask, 0)
|
normal
|
{
"blob_id": "e01eced7c43aae354047fbf29028c601d1daae50",
"index": 9636,
"step-1": "<mask token>\n\n\nclass DuplicateEnvironmentTest(unittest.TestCase):\n <mask token>\n\n def test_num_envs(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n self.assertEqual(env.num_envs, num_envs)\n self.assertEqual((num_envs,), env.reset().shape)\n\n def test_reset(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n state = env.reset()\n self.assertEqual(state.observation.shape, (num_envs, 4))\n self.assertTrue((state.reward == torch.zeros(num_envs)).all())\n self.assertTrue((state.done == torch.zeros(num_envs)).all())\n self.assertTrue((state.mask == torch.ones(num_envs)).all())\n\n def test_step(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n env.reset()\n state = env.step(torch.ones(num_envs, dtype=torch.int32))\n self.assertEqual(state.observation.shape, (num_envs, 4))\n self.assertTrue((state.reward == torch.ones(num_envs)).all())\n self.assertTrue((state.done == torch.zeros(num_envs)).all())\n self.assertTrue((state.mask == torch.ones(num_envs)).all())\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DuplicateEnvironmentTest(unittest.TestCase):\n\n def test_env_name(self):\n env = DuplicateEnvironment(make_vec_env())\n self.assertEqual(env.name, 'CartPole-v0')\n\n def test_num_envs(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n self.assertEqual(env.num_envs, num_envs)\n self.assertEqual((num_envs,), env.reset().shape)\n\n def test_reset(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n state = env.reset()\n self.assertEqual(state.observation.shape, (num_envs, 4))\n self.assertTrue((state.reward == torch.zeros(num_envs)).all())\n self.assertTrue((state.done == torch.zeros(num_envs)).all())\n self.assertTrue((state.mask == torch.ones(num_envs)).all())\n\n def test_step(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n env.reset()\n state = env.step(torch.ones(num_envs, dtype=torch.int32))\n self.assertEqual(state.observation.shape, (num_envs, 4))\n self.assertTrue((state.reward == torch.ones(num_envs)).all())\n self.assertTrue((state.done == torch.zeros(num_envs)).all())\n self.assertTrue((state.mask == torch.ones(num_envs)).all())\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass DuplicateEnvironmentTest(unittest.TestCase):\n\n def test_env_name(self):\n env = DuplicateEnvironment(make_vec_env())\n self.assertEqual(env.name, 'CartPole-v0')\n\n def test_num_envs(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n self.assertEqual(env.num_envs, num_envs)\n self.assertEqual((num_envs,), env.reset().shape)\n\n def test_reset(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n state = env.reset()\n self.assertEqual(state.observation.shape, (num_envs, 4))\n self.assertTrue((state.reward == torch.zeros(num_envs)).all())\n self.assertTrue((state.done == torch.zeros(num_envs)).all())\n self.assertTrue((state.mask == torch.ones(num_envs)).all())\n\n def test_step(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n env.reset()\n state = env.step(torch.ones(num_envs, dtype=torch.int32))\n self.assertEqual(state.observation.shape, (num_envs, 4))\n self.assertTrue((state.reward == torch.ones(num_envs)).all())\n self.assertTrue((state.done == torch.zeros(num_envs)).all())\n self.assertTrue((state.mask == torch.ones(num_envs)).all())\n\n def test_step_until_done(self):\n num_envs = 3\n env = DuplicateEnvironment(make_vec_env(num_envs))\n env.seed(5)\n env.reset()\n for _ in range(100):\n state = env.step(torch.ones(num_envs, dtype=torch.int32))\n if state.done[0]:\n break\n self.assertEqual(state[0].observation.shape, (4,))\n self.assertEqual(state[0].reward, 1.0)\n self.assertTrue(state[0].done)\n self.assertEqual(state[0].mask, 0)\n",
"step-4": "<mask token>\n\n\ndef make_vec_env(num_envs=3):\n env = [GymEnvironment('CartPole-v0') for i in range(num_envs)]\n return env\n\n\nclass DuplicateEnvironmentTest(unittest.TestCase):\n\n def test_env_name(self):\n env = DuplicateEnvironment(make_vec_env())\n self.assertEqual(env.name, 'CartPole-v0')\n\n def test_num_envs(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n self.assertEqual(env.num_envs, num_envs)\n self.assertEqual((num_envs,), env.reset().shape)\n\n def test_reset(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n state = env.reset()\n self.assertEqual(state.observation.shape, (num_envs, 4))\n self.assertTrue((state.reward == torch.zeros(num_envs)).all())\n self.assertTrue((state.done == torch.zeros(num_envs)).all())\n self.assertTrue((state.mask == torch.ones(num_envs)).all())\n\n def test_step(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n env.reset()\n state = env.step(torch.ones(num_envs, dtype=torch.int32))\n self.assertEqual(state.observation.shape, (num_envs, 4))\n self.assertTrue((state.reward == torch.ones(num_envs)).all())\n self.assertTrue((state.done == torch.zeros(num_envs)).all())\n self.assertTrue((state.mask == torch.ones(num_envs)).all())\n\n def test_step_until_done(self):\n num_envs = 3\n env = DuplicateEnvironment(make_vec_env(num_envs))\n env.seed(5)\n env.reset()\n for _ in range(100):\n state = env.step(torch.ones(num_envs, dtype=torch.int32))\n if state.done[0]:\n break\n self.assertEqual(state[0].observation.shape, (4,))\n self.assertEqual(state[0].reward, 1.0)\n self.assertTrue(state[0].done)\n self.assertEqual(state[0].mask, 0)\n",
"step-5": "import unittest\nimport gym\nimport torch\nfrom all.environments import DuplicateEnvironment, GymEnvironment\n\n\ndef make_vec_env(num_envs=3):\n env = [GymEnvironment('CartPole-v0') for i in range(num_envs)]\n return env\n\n\nclass DuplicateEnvironmentTest(unittest.TestCase):\n def test_env_name(self):\n env = DuplicateEnvironment(make_vec_env())\n self.assertEqual(env.name, 'CartPole-v0')\n\n def test_num_envs(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n self.assertEqual(env.num_envs, num_envs)\n self.assertEqual((num_envs,), env.reset().shape)\n\n def test_reset(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n state = env.reset()\n self.assertEqual(state.observation.shape, (num_envs, 4))\n self.assertTrue((state.reward == torch.zeros(num_envs, )).all())\n self.assertTrue((state.done == torch.zeros(num_envs, )).all())\n self.assertTrue((state.mask == torch.ones(num_envs, )).all())\n\n def test_step(self):\n num_envs = 5\n env = DuplicateEnvironment(make_vec_env(num_envs))\n env.reset()\n state = env.step(torch.ones(num_envs, dtype=torch.int32))\n self.assertEqual(state.observation.shape, (num_envs, 4))\n self.assertTrue((state.reward == torch.ones(num_envs, )).all())\n self.assertTrue((state.done == torch.zeros(num_envs, )).all())\n self.assertTrue((state.mask == torch.ones(num_envs, )).all())\n\n def test_step_until_done(self):\n num_envs = 3\n env = DuplicateEnvironment(make_vec_env(num_envs))\n env.seed(5)\n env.reset()\n for _ in range(100):\n state = env.step(torch.ones(num_envs, dtype=torch.int32))\n if state.done[0]:\n break\n self.assertEqual(state[0].observation.shape, (4,))\n self.assertEqual(state[0].reward, 1.)\n self.assertTrue(state[0].done)\n self.assertEqual(state[0].mask, 0)\n",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
#Sorting for a number list
#ascending and descending
ls=[1,34,23,56,34,67,87,54,62,31,66]
ls.sort(reverse=True)
print(ls)
ls.sort()
print(ls)
#Sorting a letter's list with different scenarios
ls_l=["aaa","ertdf","ieurtff","fnjr","resdjx","jfh","r","fd"]
#1-sort according to string length from small length to bigger
ls_l.sort(key=len)
print(ls_l)
#you can always reverse
ls_l.sort(key=len,reverse=True)
print(ls_l)
#2-Sort with first alphabetical order
def FirstLetter(string):
return string[0]
ls_l.sort(key=FirstLetter)
print(ls_l)
ls2=[[0,1,'f'],[4,2,'t'],[9,4,'afsd']]
def secondItem(ls):
return ls[2]
ls2.sort(key=secondItem)
print(ls2)
|
normal
|
{
"blob_id": "0e0e51904f05b41b4769b730c836568b8bb63869",
"index": 9564,
"step-1": "<mask token>\n\n\ndef secondItem(ls):\n return ls[2]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef FirstLetter(string):\n return string[0]\n\n\n<mask token>\n\n\ndef secondItem(ls):\n return ls[2]\n\n\n<mask token>\n",
"step-3": "<mask token>\nls.sort(reverse=True)\nprint(ls)\nls.sort()\nprint(ls)\n<mask token>\nls_l.sort(key=len)\nprint(ls_l)\nls_l.sort(key=len, reverse=True)\nprint(ls_l)\n\n\ndef FirstLetter(string):\n return string[0]\n\n\nls_l.sort(key=FirstLetter)\nprint(ls_l)\n<mask token>\n\n\ndef secondItem(ls):\n return ls[2]\n\n\nls2.sort(key=secondItem)\nprint(ls2)\n",
"step-4": "ls = [1, 34, 23, 56, 34, 67, 87, 54, 62, 31, 66]\nls.sort(reverse=True)\nprint(ls)\nls.sort()\nprint(ls)\nls_l = ['aaa', 'ertdf', 'ieurtff', 'fnjr', 'resdjx', 'jfh', 'r', 'fd']\nls_l.sort(key=len)\nprint(ls_l)\nls_l.sort(key=len, reverse=True)\nprint(ls_l)\n\n\ndef FirstLetter(string):\n return string[0]\n\n\nls_l.sort(key=FirstLetter)\nprint(ls_l)\nls2 = [[0, 1, 'f'], [4, 2, 't'], [9, 4, 'afsd']]\n\n\ndef secondItem(ls):\n return ls[2]\n\n\nls2.sort(key=secondItem)\nprint(ls2)\n",
"step-5": "#Sorting for a number list\n#ascending and descending\nls=[1,34,23,56,34,67,87,54,62,31,66]\nls.sort(reverse=True)\nprint(ls)\nls.sort()\nprint(ls)\n#Sorting a letter's list with different scenarios\nls_l=[\"aaa\",\"ertdf\",\"ieurtff\",\"fnjr\",\"resdjx\",\"jfh\",\"r\",\"fd\"]\n\n#1-sort according to string length from small length to bigger\nls_l.sort(key=len)\nprint(ls_l)\n\n#you can always reverse\nls_l.sort(key=len,reverse=True)\nprint(ls_l)\n\n#2-Sort with first alphabetical order\ndef FirstLetter(string):\n return string[0]\n\nls_l.sort(key=FirstLetter)\nprint(ls_l)\n\n\n\n\n\nls2=[[0,1,'f'],[4,2,'t'],[9,4,'afsd']]\ndef secondItem(ls):\n return ls[2]\nls2.sort(key=secondItem)\nprint(ls2)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import logging
import azure.functions as func
def main(event: func.EventHubEvent):
logging.info('Python EventHub trigger processed an event: %s', event.
get_body().decode('utf-8'))
|
normal
|
{
"blob_id": "58f8924a9cd2af4106e54b163e96bcd8517282b5",
"index": 2803,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(event: func.EventHubEvent):\n logging.info('Python EventHub trigger processed an event: %s', event.\n get_body().decode('utf-8'))\n",
"step-3": "import logging\nimport azure.functions as func\n\n\ndef main(event: func.EventHubEvent):\n logging.info('Python EventHub trigger processed an event: %s', event.\n get_body().decode('utf-8'))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# pylint: disable=C0103, C0413, E1101, W0611
"""Covid Catcher Backend"""
import os
from os.path import join, dirname
import json
import requests
import flask
from flask import request
import flask_sqlalchemy
import flask_socketio
from dotenv import load_dotenv
from covid import get_covid_stats_by_state
from covid import get_covid_stats_by_county
from covid import get_covid_stats_for_all_states
from faq import get_all_questions
from faq import get_all_categories
from faq import FAQ
import news
from news import get_news
import location
from location import get_location
import sites
from sites import get_sites
from sites import search_user
from sites import TestingSites
app = flask.Flask(__name__)
socketio = flask_socketio.SocketIO(app)
socketio.init_app(app, cors_allowed_origins="*")
dotenv_path = join(dirname(__file__), "sql.env")
load_dotenv(dotenv_path)
dotenv_path = join(dirname(__file__), "api-keys.env")
load_dotenv(dotenv_path)
database_uri = os.environ["DATABASE_URL"]
api_k = os.environ["MAP_API_KEY"]
app.config["SQLALCHEMY_DATABASE_URI"] = database_uri
login = 0
db = flask_sqlalchemy.SQLAlchemy(app)
db.init_app(app)
db.app = app
USERS_UPDATED_CHANNEL = "users updated"
STATISTICS = "stats"
NEWUSER = "new user"
FAQS = "faq lists"
ARTICLE = "article list"
SITE = "site page"
SEARCH = "searching"
import models
def emit_all_users(channel):
"""emits all users"""
all_users = [user.name for user in db.session.query(models.User1).all()]
socketio.emit(channel, {"allUsers": all_users})
return channel
def push_stat_data(state):
"""Calls Covid API"""
information = get_covid_stats_by_state(state)
print(state)
case = information.cases
newCases = information.todaysCases
death = information.deaths
newDeaths = information.todayDeaths
rec = information.recovered
county_list = []
county_confirmed = []
county_deaths = []
county_rec = []
updated = []
print("CASES DEATHS AND RECOVERED: ", case, death, rec)
allcounty = get_covid_stats_by_county(state, "")
for x in allcounty:
county_list.append(x.county)
county_confirmed.append(x.confirmed)
county_deaths.append(x.deaths)
county_rec.append(x.recovered)
updated.append(x.updatedAt)
socketio.emit(
STATISTICS,
{
"state": state,
"cases": case,
"new_cases": newCases,
"deaths": death,
"new_deaths": newDeaths,
"recovered": rec,
"countyNames": county_list,
"countyCases": county_confirmed,
"countyDeaths": county_deaths,
"countyRecovered": county_rec,
"updated": updated,
},
room=request.sid,
)
r = "stats are pushed"
return r
@socketio.on("new google user")
def on_new_google_user(data):
"""new user when log in"""
print("Got an event for new google user input with data:", data)
push_new_user_to_db(data["name"], data["email"], data["pic"], data["room"])
emit_all_users(USERS_UPDATED_CHANNEL)
return USERS_UPDATED_CHANNEL
@socketio.on("email results")
def on_send_results(data):
#This name would be the user but mailgun will not allow emails to be sent to
# unverified users without paying.
name="Madison"
msg = "Hello "+name+"! After taking your questionnaire us here at Covid Catcher recommended the following...\n"
msg += data['results']
print(msg)
print(requests.post(
"https://api.mailgun.net/v3/sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org/messages",
auth=("api", os.environ["MAIL_API_KEY"]),
data={"from": "Excited User <mailgun@sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org>",
#This only sends to madison becuase mailgun for free can only send to verified emails
#To send to the specific users email simply pull the email from the database at this socket
# number and send it there
"to": ["miatkem@gmail.com"],
"subject": "Covid Catcher Questionnaire Results",
"text":msg}).text)
@socketio.on("faq categories")
def on_faq_categories():
"""get all categories for faqs"""
categories = get_all_categories()
socketio.emit("faq category list", categories)
@socketio.on("faq questions")
def on_faq_questions(category):
"""get questions and answers in a category"""
if category == "" or category == None:
faqs = get_all_questions()
else:
faqs = get_all_questions(category)
response = []
for faq in faqs:
response.append(
{
"question": faq.question,
"answer": faq.answer,
}
)
socketio.emit("faq list", response)
def push_new_user_to_db(name, email, picture, room):
"""puts new user in the database"""
global login
all_users = [user.email for user in db.session.query(models.User1).all()]
if email in all_users:
print(email, " is already a user in the database!")
else:
db.session.add(models.User1(name, email, picture, room))
db.session.commit()
login = 1
userLog()
emit_all_users(USERS_UPDATED_CHANNEL)
return name
def get_state_colors():
"""Colors for USA map"""
state_colors = []
state_cases = []
state_active = []
for i in get_covid_stats_for_all_states():
state_colors.append(i.color)
state_cases.append(i.cases)
state_active.append(i.activeCases)
socketio.emit(
"colors", {"colors": state_colors, "cases": state_cases, "active": state_active}
)
def userLog():
"""User Login Check"""
if login == 1:
socketio.emit(NEWUSER, {"login": 1})
return True
@socketio.on("search loc")
def search_loc(data):
"""Search for location covid stats"""
state = data["loc"]
push_stat_data(state)
@socketio.on("connect")
def on_connect():
"""Socket for when user connects"""
articleList()
#test_location()
get_state_colors()
ip = request.environ["HTTP_X_FORWARDED_FOR"]
loc = get_location(ip)
push_stat_data(loc.state)
return True
@socketio.on("search location")
def searching(data):
"""Search location"""
a = data["area"]
areaLoc = search_user(a)
allsites = get_sites(areaLoc[0], areaLoc[1])
title_list = []
address_list = []
lat_list = []
lng_list = []
phone_list = []
web_list = []
miles_list = []
counter = 0
for site in allsites:
if counter != 3:
title_list.append(site.title)
address_list.append(site.entireAddress)
lat_list.append(site.latitude)
lng_list.append(site.longitude)
phone_list.append(site.phone)
web_list.append(site.web)
miles_list.append(site.miles)
counter += 1
else:
break
socketio.emit(
SITE,
{
"user_lat": areaLoc[0],
"user_lng": areaLoc[1],
"title": title_list,
"address": address_list,
"latitude": lat_list,
"longitude": lng_list,
"phone": phone_list,
"web": web_list,
"miles": miles_list,
"key": api_k,
}, room=request.sid
)
return True
'''
def test_location():
"""Get testing locations"""
ip = request.environ["HTTP_X_FORWARDED_FOR"]
loc = get_location(ip)
lat = loc.latitude
lng = loc.longitude
allsites = get_sites(lat, lng)
title_list = []
address_list = []
lat_list = []
lng_list = []
phone_list = []
web_list = []
miles_list = []
counter = 0
for site in allsites:
if counter != 3:
title_list.append(site.title)
address_list.append(site.entireAddress)
lat_list.append(site.latitude)
lng_list.append(site.longitude)
phone_list.append(site.phone)
web_list.append(site.web)
miles_list.append(site.miles)
counter += 1
else:
break
socketio.emit(
SITE,
{
"user_lat": lat,
"user_lng": lng,
"title": title_list,
"address": address_list,
"latitude": lat_list,
"longitude": lng_list,
"phone": phone_list,
"web": web_list,
"miles": miles_list,
"key": api_k,
},
)
return True'''
def articleList():
"""Calls the Article API"""
articles = get_news(
5, since=news.YESTERDAY.strftime("%yyyy-%mm-%dd"), query="covid"
)
title_list = []
desc_list = []
url_list = []
image_list = []
source_list = []
for art in articles:
image_list.append(art.image)
title_list.append(art.title)
source_list.append(art.source)
desc_list.append(art.description)
url_list.append(art.url)
socketio.emit(
ARTICLE,
{
"title": title_list,
"desc": desc_list,
"url": url_list,
"img": image_list,
"sources": source_list,
},
)
return True
@app.route("/")
def index():
"""loads page"""
models.db.create_all()
db.session.commit()
return flask.render_template("index.html")
@app.errorhandler(404)
def page_not_found(e):
"""Handles Page Not Found"""
return flask.render_template("index.html")
if __name__ == "__main__":
socketio.run(
app,
host=os.getenv("IP", "0.0.0.0"),
port=int(os.getenv("PORT", 8080)),
debug=True,
)
|
normal
|
{
"blob_id": "8d48b5b831edb62b2d9624bc23cae45d390fd224",
"index": 8035,
"step-1": "<mask token>\n\n\ndef emit_all_users(channel):\n \"\"\"emits all users\"\"\"\n all_users = [user.name for user in db.session.query(models.User1).all()]\n socketio.emit(channel, {'allUsers': all_users})\n return channel\n\n\n<mask token>\n\n\n@socketio.on('new google user')\ndef on_new_google_user(data):\n \"\"\"new user when log in\"\"\"\n print('Got an event for new google user input with data:', data)\n push_new_user_to_db(data['name'], data['email'], data['pic'], data['room'])\n emit_all_users(USERS_UPDATED_CHANNEL)\n return USERS_UPDATED_CHANNEL\n\n\n@socketio.on('email results')\ndef on_send_results(data):\n name = 'Madison'\n msg = 'Hello ' + name + \"\"\"! After taking your questionnaire us here at Covid Catcher recommended the following...\n\"\"\"\n msg += data['results']\n print(msg)\n print(requests.post(\n 'https://api.mailgun.net/v3/sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org/messages'\n , auth=('api', os.environ['MAIL_API_KEY']), data={'from':\n 'Excited User <mailgun@sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org>'\n , 'to': ['miatkem@gmail.com'], 'subject':\n 'Covid Catcher Questionnaire Results', 'text': msg}).text)\n\n\n<mask token>\n\n\n@socketio.on('faq questions')\ndef on_faq_questions(category):\n \"\"\"get questions and answers in a category\"\"\"\n if category == '' or category == None:\n faqs = get_all_questions()\n else:\n faqs = get_all_questions(category)\n response = []\n for faq in faqs:\n response.append({'question': faq.question, 'answer': faq.answer})\n socketio.emit('faq list', response)\n\n\ndef push_new_user_to_db(name, email, picture, room):\n \"\"\"puts new user in the database\"\"\"\n global login\n all_users = [user.email for user in db.session.query(models.User1).all()]\n if email in all_users:\n print(email, ' is already a user in the database!')\n else:\n db.session.add(models.User1(name, email, picture, room))\n db.session.commit()\n login = 1\n userLog()\n emit_all_users(USERS_UPDATED_CHANNEL)\n return name\n\n\ndef get_state_colors():\n \"\"\"Colors for USA map\"\"\"\n state_colors = []\n state_cases = []\n state_active = []\n for i in get_covid_stats_for_all_states():\n state_colors.append(i.color)\n state_cases.append(i.cases)\n state_active.append(i.activeCases)\n socketio.emit('colors', {'colors': state_colors, 'cases': state_cases,\n 'active': state_active})\n\n\ndef userLog():\n \"\"\"User Login Check\"\"\"\n if login == 1:\n socketio.emit(NEWUSER, {'login': 1})\n return True\n\n\n@socketio.on('search loc')\ndef search_loc(data):\n \"\"\"Search for location covid stats\"\"\"\n state = data['loc']\n push_stat_data(state)\n\n\n@socketio.on('connect')\ndef on_connect():\n \"\"\"Socket for when user connects\"\"\"\n articleList()\n get_state_colors()\n ip = request.environ['HTTP_X_FORWARDED_FOR']\n loc = get_location(ip)\n push_stat_data(loc.state)\n return True\n\n\n@socketio.on('search location')\ndef searching(data):\n \"\"\"Search location\"\"\"\n a = data['area']\n areaLoc = search_user(a)\n allsites = get_sites(areaLoc[0], areaLoc[1])\n title_list = []\n address_list = []\n lat_list = []\n lng_list = []\n phone_list = []\n web_list = []\n miles_list = []\n counter = 0\n for site in allsites:\n if counter != 3:\n title_list.append(site.title)\n address_list.append(site.entireAddress)\n lat_list.append(site.latitude)\n lng_list.append(site.longitude)\n phone_list.append(site.phone)\n web_list.append(site.web)\n miles_list.append(site.miles)\n counter += 1\n else:\n break\n socketio.emit(SITE, {'user_lat': areaLoc[0], 'user_lng': areaLoc[1],\n 'title': title_list, 'address': address_list, 'latitude': lat_list,\n 'longitude': lng_list, 'phone': phone_list, 'web': web_list,\n 'miles': miles_list, 'key': api_k}, room=request.sid)\n return True\n\n\n<mask token>\n\n\ndef articleList():\n \"\"\"Calls the Article API\"\"\"\n articles = get_news(5, since=news.YESTERDAY.strftime('%yyyy-%mm-%dd'),\n query='covid')\n title_list = []\n desc_list = []\n url_list = []\n image_list = []\n source_list = []\n for art in articles:\n image_list.append(art.image)\n title_list.append(art.title)\n source_list.append(art.source)\n desc_list.append(art.description)\n url_list.append(art.url)\n socketio.emit(ARTICLE, {'title': title_list, 'desc': desc_list, 'url':\n url_list, 'img': image_list, 'sources': source_list})\n return True\n\n\n@app.route('/')\ndef index():\n \"\"\"loads page\"\"\"\n models.db.create_all()\n db.session.commit()\n return flask.render_template('index.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef emit_all_users(channel):\n \"\"\"emits all users\"\"\"\n all_users = [user.name for user in db.session.query(models.User1).all()]\n socketio.emit(channel, {'allUsers': all_users})\n return channel\n\n\ndef push_stat_data(state):\n \"\"\"Calls Covid API\"\"\"\n information = get_covid_stats_by_state(state)\n print(state)\n case = information.cases\n newCases = information.todaysCases\n death = information.deaths\n newDeaths = information.todayDeaths\n rec = information.recovered\n county_list = []\n county_confirmed = []\n county_deaths = []\n county_rec = []\n updated = []\n print('CASES DEATHS AND RECOVERED: ', case, death, rec)\n allcounty = get_covid_stats_by_county(state, '')\n for x in allcounty:\n county_list.append(x.county)\n county_confirmed.append(x.confirmed)\n county_deaths.append(x.deaths)\n county_rec.append(x.recovered)\n updated.append(x.updatedAt)\n socketio.emit(STATISTICS, {'state': state, 'cases': case, 'new_cases':\n newCases, 'deaths': death, 'new_deaths': newDeaths, 'recovered':\n rec, 'countyNames': county_list, 'countyCases': county_confirmed,\n 'countyDeaths': county_deaths, 'countyRecovered': county_rec,\n 'updated': updated}, room=request.sid)\n r = 'stats are pushed'\n return r\n\n\n@socketio.on('new google user')\ndef on_new_google_user(data):\n \"\"\"new user when log in\"\"\"\n print('Got an event for new google user input with data:', data)\n push_new_user_to_db(data['name'], data['email'], data['pic'], data['room'])\n emit_all_users(USERS_UPDATED_CHANNEL)\n return USERS_UPDATED_CHANNEL\n\n\n@socketio.on('email results')\ndef on_send_results(data):\n name = 'Madison'\n msg = 'Hello ' + name + \"\"\"! After taking your questionnaire us here at Covid Catcher recommended the following...\n\"\"\"\n msg += data['results']\n print(msg)\n print(requests.post(\n 'https://api.mailgun.net/v3/sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org/messages'\n , auth=('api', os.environ['MAIL_API_KEY']), data={'from':\n 'Excited User <mailgun@sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org>'\n , 'to': ['miatkem@gmail.com'], 'subject':\n 'Covid Catcher Questionnaire Results', 'text': msg}).text)\n\n\n<mask token>\n\n\n@socketio.on('faq questions')\ndef on_faq_questions(category):\n \"\"\"get questions and answers in a category\"\"\"\n if category == '' or category == None:\n faqs = get_all_questions()\n else:\n faqs = get_all_questions(category)\n response = []\n for faq in faqs:\n response.append({'question': faq.question, 'answer': faq.answer})\n socketio.emit('faq list', response)\n\n\ndef push_new_user_to_db(name, email, picture, room):\n \"\"\"puts new user in the database\"\"\"\n global login\n all_users = [user.email for user in db.session.query(models.User1).all()]\n if email in all_users:\n print(email, ' is already a user in the database!')\n else:\n db.session.add(models.User1(name, email, picture, room))\n db.session.commit()\n login = 1\n userLog()\n emit_all_users(USERS_UPDATED_CHANNEL)\n return name\n\n\ndef get_state_colors():\n \"\"\"Colors for USA map\"\"\"\n state_colors = []\n state_cases = []\n state_active = []\n for i in get_covid_stats_for_all_states():\n state_colors.append(i.color)\n state_cases.append(i.cases)\n state_active.append(i.activeCases)\n socketio.emit('colors', {'colors': state_colors, 'cases': state_cases,\n 'active': state_active})\n\n\ndef userLog():\n \"\"\"User Login Check\"\"\"\n if login == 1:\n socketio.emit(NEWUSER, {'login': 1})\n return True\n\n\n@socketio.on('search loc')\ndef search_loc(data):\n \"\"\"Search for location covid stats\"\"\"\n state = data['loc']\n push_stat_data(state)\n\n\n@socketio.on('connect')\ndef on_connect():\n \"\"\"Socket for when user connects\"\"\"\n articleList()\n get_state_colors()\n ip = request.environ['HTTP_X_FORWARDED_FOR']\n loc = get_location(ip)\n push_stat_data(loc.state)\n return True\n\n\n@socketio.on('search location')\ndef searching(data):\n \"\"\"Search location\"\"\"\n a = data['area']\n areaLoc = search_user(a)\n allsites = get_sites(areaLoc[0], areaLoc[1])\n title_list = []\n address_list = []\n lat_list = []\n lng_list = []\n phone_list = []\n web_list = []\n miles_list = []\n counter = 0\n for site in allsites:\n if counter != 3:\n title_list.append(site.title)\n address_list.append(site.entireAddress)\n lat_list.append(site.latitude)\n lng_list.append(site.longitude)\n phone_list.append(site.phone)\n web_list.append(site.web)\n miles_list.append(site.miles)\n counter += 1\n else:\n break\n socketio.emit(SITE, {'user_lat': areaLoc[0], 'user_lng': areaLoc[1],\n 'title': title_list, 'address': address_list, 'latitude': lat_list,\n 'longitude': lng_list, 'phone': phone_list, 'web': web_list,\n 'miles': miles_list, 'key': api_k}, room=request.sid)\n return True\n\n\n<mask token>\n\n\ndef articleList():\n \"\"\"Calls the Article API\"\"\"\n articles = get_news(5, since=news.YESTERDAY.strftime('%yyyy-%mm-%dd'),\n query='covid')\n title_list = []\n desc_list = []\n url_list = []\n image_list = []\n source_list = []\n for art in articles:\n image_list.append(art.image)\n title_list.append(art.title)\n source_list.append(art.source)\n desc_list.append(art.description)\n url_list.append(art.url)\n socketio.emit(ARTICLE, {'title': title_list, 'desc': desc_list, 'url':\n url_list, 'img': image_list, 'sources': source_list})\n return True\n\n\n@app.route('/')\ndef index():\n \"\"\"loads page\"\"\"\n models.db.create_all()\n db.session.commit()\n return flask.render_template('index.html')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef emit_all_users(channel):\n \"\"\"emits all users\"\"\"\n all_users = [user.name for user in db.session.query(models.User1).all()]\n socketio.emit(channel, {'allUsers': all_users})\n return channel\n\n\ndef push_stat_data(state):\n \"\"\"Calls Covid API\"\"\"\n information = get_covid_stats_by_state(state)\n print(state)\n case = information.cases\n newCases = information.todaysCases\n death = information.deaths\n newDeaths = information.todayDeaths\n rec = information.recovered\n county_list = []\n county_confirmed = []\n county_deaths = []\n county_rec = []\n updated = []\n print('CASES DEATHS AND RECOVERED: ', case, death, rec)\n allcounty = get_covid_stats_by_county(state, '')\n for x in allcounty:\n county_list.append(x.county)\n county_confirmed.append(x.confirmed)\n county_deaths.append(x.deaths)\n county_rec.append(x.recovered)\n updated.append(x.updatedAt)\n socketio.emit(STATISTICS, {'state': state, 'cases': case, 'new_cases':\n newCases, 'deaths': death, 'new_deaths': newDeaths, 'recovered':\n rec, 'countyNames': county_list, 'countyCases': county_confirmed,\n 'countyDeaths': county_deaths, 'countyRecovered': county_rec,\n 'updated': updated}, room=request.sid)\n r = 'stats are pushed'\n return r\n\n\n@socketio.on('new google user')\ndef on_new_google_user(data):\n \"\"\"new user when log in\"\"\"\n print('Got an event for new google user input with data:', data)\n push_new_user_to_db(data['name'], data['email'], data['pic'], data['room'])\n emit_all_users(USERS_UPDATED_CHANNEL)\n return USERS_UPDATED_CHANNEL\n\n\n@socketio.on('email results')\ndef on_send_results(data):\n name = 'Madison'\n msg = 'Hello ' + name + \"\"\"! After taking your questionnaire us here at Covid Catcher recommended the following...\n\"\"\"\n msg += data['results']\n print(msg)\n print(requests.post(\n 'https://api.mailgun.net/v3/sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org/messages'\n , auth=('api', os.environ['MAIL_API_KEY']), data={'from':\n 'Excited User <mailgun@sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org>'\n , 'to': ['miatkem@gmail.com'], 'subject':\n 'Covid Catcher Questionnaire Results', 'text': msg}).text)\n\n\n<mask token>\n\n\n@socketio.on('faq questions')\ndef on_faq_questions(category):\n \"\"\"get questions and answers in a category\"\"\"\n if category == '' or category == None:\n faqs = get_all_questions()\n else:\n faqs = get_all_questions(category)\n response = []\n for faq in faqs:\n response.append({'question': faq.question, 'answer': faq.answer})\n socketio.emit('faq list', response)\n\n\ndef push_new_user_to_db(name, email, picture, room):\n \"\"\"puts new user in the database\"\"\"\n global login\n all_users = [user.email for user in db.session.query(models.User1).all()]\n if email in all_users:\n print(email, ' is already a user in the database!')\n else:\n db.session.add(models.User1(name, email, picture, room))\n db.session.commit()\n login = 1\n userLog()\n emit_all_users(USERS_UPDATED_CHANNEL)\n return name\n\n\ndef get_state_colors():\n \"\"\"Colors for USA map\"\"\"\n state_colors = []\n state_cases = []\n state_active = []\n for i in get_covid_stats_for_all_states():\n state_colors.append(i.color)\n state_cases.append(i.cases)\n state_active.append(i.activeCases)\n socketio.emit('colors', {'colors': state_colors, 'cases': state_cases,\n 'active': state_active})\n\n\ndef userLog():\n \"\"\"User Login Check\"\"\"\n if login == 1:\n socketio.emit(NEWUSER, {'login': 1})\n return True\n\n\n@socketio.on('search loc')\ndef search_loc(data):\n \"\"\"Search for location covid stats\"\"\"\n state = data['loc']\n push_stat_data(state)\n\n\n@socketio.on('connect')\ndef on_connect():\n \"\"\"Socket for when user connects\"\"\"\n articleList()\n get_state_colors()\n ip = request.environ['HTTP_X_FORWARDED_FOR']\n loc = get_location(ip)\n push_stat_data(loc.state)\n return True\n\n\n@socketio.on('search location')\ndef searching(data):\n \"\"\"Search location\"\"\"\n a = data['area']\n areaLoc = search_user(a)\n allsites = get_sites(areaLoc[0], areaLoc[1])\n title_list = []\n address_list = []\n lat_list = []\n lng_list = []\n phone_list = []\n web_list = []\n miles_list = []\n counter = 0\n for site in allsites:\n if counter != 3:\n title_list.append(site.title)\n address_list.append(site.entireAddress)\n lat_list.append(site.latitude)\n lng_list.append(site.longitude)\n phone_list.append(site.phone)\n web_list.append(site.web)\n miles_list.append(site.miles)\n counter += 1\n else:\n break\n socketio.emit(SITE, {'user_lat': areaLoc[0], 'user_lng': areaLoc[1],\n 'title': title_list, 'address': address_list, 'latitude': lat_list,\n 'longitude': lng_list, 'phone': phone_list, 'web': web_list,\n 'miles': miles_list, 'key': api_k}, room=request.sid)\n return True\n\n\n<mask token>\n\n\ndef articleList():\n \"\"\"Calls the Article API\"\"\"\n articles = get_news(5, since=news.YESTERDAY.strftime('%yyyy-%mm-%dd'),\n query='covid')\n title_list = []\n desc_list = []\n url_list = []\n image_list = []\n source_list = []\n for art in articles:\n image_list.append(art.image)\n title_list.append(art.title)\n source_list.append(art.source)\n desc_list.append(art.description)\n url_list.append(art.url)\n socketio.emit(ARTICLE, {'title': title_list, 'desc': desc_list, 'url':\n url_list, 'img': image_list, 'sources': source_list})\n return True\n\n\n@app.route('/')\ndef index():\n \"\"\"loads page\"\"\"\n models.db.create_all()\n db.session.commit()\n return flask.render_template('index.html')\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n \"\"\"Handles Page Not Found\"\"\"\n return flask.render_template('index.html')\n\n\n<mask token>\n",
"step-4": "<mask token>\nimport os\nfrom os.path import join, dirname\nimport json\nimport requests\nimport flask\nfrom flask import request\nimport flask_sqlalchemy\nimport flask_socketio\nfrom dotenv import load_dotenv\nfrom covid import get_covid_stats_by_state\nfrom covid import get_covid_stats_by_county\nfrom covid import get_covid_stats_for_all_states\nfrom faq import get_all_questions\nfrom faq import get_all_categories\nfrom faq import FAQ\nimport news\nfrom news import get_news\nimport location\nfrom location import get_location\nimport sites\nfrom sites import get_sites\nfrom sites import search_user\nfrom sites import TestingSites\napp = flask.Flask(__name__)\nsocketio = flask_socketio.SocketIO(app)\nsocketio.init_app(app, cors_allowed_origins='*')\ndotenv_path = join(dirname(__file__), 'sql.env')\nload_dotenv(dotenv_path)\ndotenv_path = join(dirname(__file__), 'api-keys.env')\nload_dotenv(dotenv_path)\ndatabase_uri = os.environ['DATABASE_URL']\napi_k = os.environ['MAP_API_KEY']\napp.config['SQLALCHEMY_DATABASE_URI'] = database_uri\nlogin = 0\ndb = flask_sqlalchemy.SQLAlchemy(app)\ndb.init_app(app)\ndb.app = app\nUSERS_UPDATED_CHANNEL = 'users updated'\nSTATISTICS = 'stats'\nNEWUSER = 'new user'\nFAQS = 'faq lists'\nARTICLE = 'article list'\nSITE = 'site page'\nSEARCH = 'searching'\nimport models\n\n\ndef emit_all_users(channel):\n \"\"\"emits all users\"\"\"\n all_users = [user.name for user in db.session.query(models.User1).all()]\n socketio.emit(channel, {'allUsers': all_users})\n return channel\n\n\ndef push_stat_data(state):\n \"\"\"Calls Covid API\"\"\"\n information = get_covid_stats_by_state(state)\n print(state)\n case = information.cases\n newCases = information.todaysCases\n death = information.deaths\n newDeaths = information.todayDeaths\n rec = information.recovered\n county_list = []\n county_confirmed = []\n county_deaths = []\n county_rec = []\n updated = []\n print('CASES DEATHS AND RECOVERED: ', case, death, rec)\n allcounty = get_covid_stats_by_county(state, '')\n for x in allcounty:\n county_list.append(x.county)\n county_confirmed.append(x.confirmed)\n county_deaths.append(x.deaths)\n county_rec.append(x.recovered)\n updated.append(x.updatedAt)\n socketio.emit(STATISTICS, {'state': state, 'cases': case, 'new_cases':\n newCases, 'deaths': death, 'new_deaths': newDeaths, 'recovered':\n rec, 'countyNames': county_list, 'countyCases': county_confirmed,\n 'countyDeaths': county_deaths, 'countyRecovered': county_rec,\n 'updated': updated}, room=request.sid)\n r = 'stats are pushed'\n return r\n\n\n@socketio.on('new google user')\ndef on_new_google_user(data):\n \"\"\"new user when log in\"\"\"\n print('Got an event for new google user input with data:', data)\n push_new_user_to_db(data['name'], data['email'], data['pic'], data['room'])\n emit_all_users(USERS_UPDATED_CHANNEL)\n return USERS_UPDATED_CHANNEL\n\n\n@socketio.on('email results')\ndef on_send_results(data):\n name = 'Madison'\n msg = 'Hello ' + name + \"\"\"! After taking your questionnaire us here at Covid Catcher recommended the following...\n\"\"\"\n msg += data['results']\n print(msg)\n print(requests.post(\n 'https://api.mailgun.net/v3/sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org/messages'\n , auth=('api', os.environ['MAIL_API_KEY']), data={'from':\n 'Excited User <mailgun@sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org>'\n , 'to': ['miatkem@gmail.com'], 'subject':\n 'Covid Catcher Questionnaire Results', 'text': msg}).text)\n\n\n@socketio.on('faq categories')\ndef on_faq_categories():\n \"\"\"get all categories for faqs\"\"\"\n categories = get_all_categories()\n socketio.emit('faq category list', categories)\n\n\n@socketio.on('faq questions')\ndef on_faq_questions(category):\n \"\"\"get questions and answers in a category\"\"\"\n if category == '' or category == None:\n faqs = get_all_questions()\n else:\n faqs = get_all_questions(category)\n response = []\n for faq in faqs:\n response.append({'question': faq.question, 'answer': faq.answer})\n socketio.emit('faq list', response)\n\n\ndef push_new_user_to_db(name, email, picture, room):\n \"\"\"puts new user in the database\"\"\"\n global login\n all_users = [user.email for user in db.session.query(models.User1).all()]\n if email in all_users:\n print(email, ' is already a user in the database!')\n else:\n db.session.add(models.User1(name, email, picture, room))\n db.session.commit()\n login = 1\n userLog()\n emit_all_users(USERS_UPDATED_CHANNEL)\n return name\n\n\ndef get_state_colors():\n \"\"\"Colors for USA map\"\"\"\n state_colors = []\n state_cases = []\n state_active = []\n for i in get_covid_stats_for_all_states():\n state_colors.append(i.color)\n state_cases.append(i.cases)\n state_active.append(i.activeCases)\n socketio.emit('colors', {'colors': state_colors, 'cases': state_cases,\n 'active': state_active})\n\n\ndef userLog():\n \"\"\"User Login Check\"\"\"\n if login == 1:\n socketio.emit(NEWUSER, {'login': 1})\n return True\n\n\n@socketio.on('search loc')\ndef search_loc(data):\n \"\"\"Search for location covid stats\"\"\"\n state = data['loc']\n push_stat_data(state)\n\n\n@socketio.on('connect')\ndef on_connect():\n \"\"\"Socket for when user connects\"\"\"\n articleList()\n get_state_colors()\n ip = request.environ['HTTP_X_FORWARDED_FOR']\n loc = get_location(ip)\n push_stat_data(loc.state)\n return True\n\n\n@socketio.on('search location')\ndef searching(data):\n \"\"\"Search location\"\"\"\n a = data['area']\n areaLoc = search_user(a)\n allsites = get_sites(areaLoc[0], areaLoc[1])\n title_list = []\n address_list = []\n lat_list = []\n lng_list = []\n phone_list = []\n web_list = []\n miles_list = []\n counter = 0\n for site in allsites:\n if counter != 3:\n title_list.append(site.title)\n address_list.append(site.entireAddress)\n lat_list.append(site.latitude)\n lng_list.append(site.longitude)\n phone_list.append(site.phone)\n web_list.append(site.web)\n miles_list.append(site.miles)\n counter += 1\n else:\n break\n socketio.emit(SITE, {'user_lat': areaLoc[0], 'user_lng': areaLoc[1],\n 'title': title_list, 'address': address_list, 'latitude': lat_list,\n 'longitude': lng_list, 'phone': phone_list, 'web': web_list,\n 'miles': miles_list, 'key': api_k}, room=request.sid)\n return True\n\n\n<mask token>\n\n\ndef articleList():\n \"\"\"Calls the Article API\"\"\"\n articles = get_news(5, since=news.YESTERDAY.strftime('%yyyy-%mm-%dd'),\n query='covid')\n title_list = []\n desc_list = []\n url_list = []\n image_list = []\n source_list = []\n for art in articles:\n image_list.append(art.image)\n title_list.append(art.title)\n source_list.append(art.source)\n desc_list.append(art.description)\n url_list.append(art.url)\n socketio.emit(ARTICLE, {'title': title_list, 'desc': desc_list, 'url':\n url_list, 'img': image_list, 'sources': source_list})\n return True\n\n\n@app.route('/')\ndef index():\n \"\"\"loads page\"\"\"\n models.db.create_all()\n db.session.commit()\n return flask.render_template('index.html')\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n \"\"\"Handles Page Not Found\"\"\"\n return flask.render_template('index.html')\n\n\nif __name__ == '__main__':\n socketio.run(app, host=os.getenv('IP', '0.0.0.0'), port=int(os.getenv(\n 'PORT', 8080)), debug=True)\n",
"step-5": "# pylint: disable=C0103, C0413, E1101, W0611\n\"\"\"Covid Catcher Backend\"\"\"\nimport os\nfrom os.path import join, dirname\nimport json\nimport requests\nimport flask\nfrom flask import request\nimport flask_sqlalchemy\nimport flask_socketio\nfrom dotenv import load_dotenv\nfrom covid import get_covid_stats_by_state\nfrom covid import get_covid_stats_by_county\nfrom covid import get_covid_stats_for_all_states\nfrom faq import get_all_questions\nfrom faq import get_all_categories\nfrom faq import FAQ\nimport news\nfrom news import get_news\nimport location\nfrom location import get_location\nimport sites\nfrom sites import get_sites\nfrom sites import search_user\nfrom sites import TestingSites\n\napp = flask.Flask(__name__)\nsocketio = flask_socketio.SocketIO(app)\nsocketio.init_app(app, cors_allowed_origins=\"*\")\ndotenv_path = join(dirname(__file__), \"sql.env\")\nload_dotenv(dotenv_path)\ndotenv_path = join(dirname(__file__), \"api-keys.env\")\nload_dotenv(dotenv_path)\ndatabase_uri = os.environ[\"DATABASE_URL\"]\napi_k = os.environ[\"MAP_API_KEY\"]\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = database_uri\nlogin = 0\n\ndb = flask_sqlalchemy.SQLAlchemy(app)\ndb.init_app(app)\ndb.app = app\nUSERS_UPDATED_CHANNEL = \"users updated\"\nSTATISTICS = \"stats\"\nNEWUSER = \"new user\"\nFAQS = \"faq lists\"\nARTICLE = \"article list\"\nSITE = \"site page\"\nSEARCH = \"searching\"\nimport models\n\n\ndef emit_all_users(channel):\n \"\"\"emits all users\"\"\"\n all_users = [user.name for user in db.session.query(models.User1).all()]\n socketio.emit(channel, {\"allUsers\": all_users})\n return channel\n\n\ndef push_stat_data(state):\n \"\"\"Calls Covid API\"\"\"\n information = get_covid_stats_by_state(state)\n print(state)\n case = information.cases\n newCases = information.todaysCases\n death = information.deaths\n newDeaths = information.todayDeaths\n rec = information.recovered\n county_list = []\n county_confirmed = []\n county_deaths = []\n county_rec = []\n updated = []\n\n print(\"CASES DEATHS AND RECOVERED: \", case, death, rec)\n allcounty = get_covid_stats_by_county(state, \"\")\n for x in allcounty:\n county_list.append(x.county)\n county_confirmed.append(x.confirmed)\n county_deaths.append(x.deaths)\n county_rec.append(x.recovered)\n updated.append(x.updatedAt)\n\n socketio.emit(\n STATISTICS,\n {\n \"state\": state,\n \"cases\": case,\n \"new_cases\": newCases,\n \"deaths\": death,\n \"new_deaths\": newDeaths,\n \"recovered\": rec,\n \"countyNames\": county_list,\n \"countyCases\": county_confirmed,\n \"countyDeaths\": county_deaths,\n \"countyRecovered\": county_rec,\n \"updated\": updated,\n },\n room=request.sid,\n )\n r = \"stats are pushed\"\n return r\n\n\n@socketio.on(\"new google user\")\ndef on_new_google_user(data):\n \"\"\"new user when log in\"\"\"\n print(\"Got an event for new google user input with data:\", data)\n push_new_user_to_db(data[\"name\"], data[\"email\"], data[\"pic\"], data[\"room\"])\n emit_all_users(USERS_UPDATED_CHANNEL)\n return USERS_UPDATED_CHANNEL\n\n\n@socketio.on(\"email results\")\ndef on_send_results(data):\n #This name would be the user but mailgun will not allow emails to be sent to\n # unverified users without paying.\n name=\"Madison\"\n msg = \"Hello \"+name+\"! After taking your questionnaire us here at Covid Catcher recommended the following...\\n\"\n msg += data['results']\n print(msg)\n print(requests.post(\n\t \"https://api.mailgun.net/v3/sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org/messages\",\n\t\tauth=(\"api\", os.environ[\"MAIL_API_KEY\"]),\n\t\tdata={\"from\": \"Excited User <mailgun@sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org>\",\n\t\t #This only sends to madison becuase mailgun for free can only send to verified emails\n\t\t #To send to the specific users email simply pull the email from the database at this socket\n\t\t # number and send it there\n\t\t\t\"to\": [\"miatkem@gmail.com\"],\n\t\t\t\"subject\": \"Covid Catcher Questionnaire Results\",\n\t\t\t\"text\":msg}).text)\n\n\n@socketio.on(\"faq categories\")\ndef on_faq_categories():\n \"\"\"get all categories for faqs\"\"\"\n categories = get_all_categories()\n socketio.emit(\"faq category list\", categories)\n\n\n@socketio.on(\"faq questions\")\ndef on_faq_questions(category):\n \"\"\"get questions and answers in a category\"\"\"\n if category == \"\" or category == None:\n faqs = get_all_questions()\n else:\n faqs = get_all_questions(category)\n response = []\n for faq in faqs:\n response.append(\n {\n \"question\": faq.question,\n \"answer\": faq.answer,\n }\n )\n socketio.emit(\"faq list\", response)\n\n\ndef push_new_user_to_db(name, email, picture, room):\n \"\"\"puts new user in the database\"\"\"\n global login\n all_users = [user.email for user in db.session.query(models.User1).all()]\n if email in all_users:\n print(email, \" is already a user in the database!\")\n else:\n db.session.add(models.User1(name, email, picture, room))\n db.session.commit()\n login = 1\n userLog()\n emit_all_users(USERS_UPDATED_CHANNEL)\n return name\n\n\ndef get_state_colors():\n \"\"\"Colors for USA map\"\"\"\n state_colors = []\n state_cases = []\n state_active = []\n for i in get_covid_stats_for_all_states():\n state_colors.append(i.color)\n state_cases.append(i.cases)\n state_active.append(i.activeCases)\n socketio.emit(\n \"colors\", {\"colors\": state_colors, \"cases\": state_cases, \"active\": state_active}\n )\n\n\ndef userLog():\n \"\"\"User Login Check\"\"\"\n if login == 1:\n socketio.emit(NEWUSER, {\"login\": 1})\n return True\n\n\n@socketio.on(\"search loc\")\ndef search_loc(data):\n \"\"\"Search for location covid stats\"\"\"\n state = data[\"loc\"]\n push_stat_data(state)\n\n\n@socketio.on(\"connect\")\ndef on_connect():\n \"\"\"Socket for when user connects\"\"\"\n articleList()\n #test_location()\n get_state_colors()\n ip = request.environ[\"HTTP_X_FORWARDED_FOR\"]\n loc = get_location(ip)\n push_stat_data(loc.state)\n return True\n\n\n@socketio.on(\"search location\")\ndef searching(data):\n \"\"\"Search location\"\"\"\n a = data[\"area\"]\n areaLoc = search_user(a)\n allsites = get_sites(areaLoc[0], areaLoc[1])\n title_list = []\n address_list = []\n lat_list = []\n lng_list = []\n phone_list = []\n web_list = []\n miles_list = []\n counter = 0\n for site in allsites:\n if counter != 3:\n title_list.append(site.title)\n address_list.append(site.entireAddress)\n lat_list.append(site.latitude)\n lng_list.append(site.longitude)\n phone_list.append(site.phone)\n web_list.append(site.web)\n miles_list.append(site.miles)\n counter += 1\n else:\n break\n\n socketio.emit(\n SITE,\n {\n \"user_lat\": areaLoc[0],\n \"user_lng\": areaLoc[1],\n \"title\": title_list,\n \"address\": address_list,\n \"latitude\": lat_list,\n \"longitude\": lng_list,\n \"phone\": phone_list,\n \"web\": web_list,\n \"miles\": miles_list,\n \"key\": api_k,\n }, room=request.sid\n )\n return True\n\n'''\ndef test_location():\n \"\"\"Get testing locations\"\"\"\n ip = request.environ[\"HTTP_X_FORWARDED_FOR\"]\n loc = get_location(ip)\n lat = loc.latitude\n lng = loc.longitude\n allsites = get_sites(lat, lng)\n title_list = []\n address_list = []\n lat_list = []\n lng_list = []\n phone_list = []\n web_list = []\n miles_list = []\n counter = 0\n for site in allsites:\n if counter != 3:\n title_list.append(site.title)\n address_list.append(site.entireAddress)\n lat_list.append(site.latitude)\n lng_list.append(site.longitude)\n phone_list.append(site.phone)\n web_list.append(site.web)\n miles_list.append(site.miles)\n counter += 1\n else:\n break\n\n socketio.emit(\n SITE,\n {\n \"user_lat\": lat,\n \"user_lng\": lng,\n \"title\": title_list,\n \"address\": address_list,\n \"latitude\": lat_list,\n \"longitude\": lng_list,\n \"phone\": phone_list,\n \"web\": web_list,\n \"miles\": miles_list,\n \"key\": api_k,\n },\n )\n return True'''\n\n\ndef articleList():\n \"\"\"Calls the Article API\"\"\"\n articles = get_news(\n 5, since=news.YESTERDAY.strftime(\"%yyyy-%mm-%dd\"), query=\"covid\"\n )\n title_list = []\n desc_list = []\n url_list = []\n image_list = []\n source_list = []\n for art in articles:\n image_list.append(art.image)\n title_list.append(art.title)\n source_list.append(art.source)\n desc_list.append(art.description)\n url_list.append(art.url)\n socketio.emit(\n ARTICLE,\n {\n \"title\": title_list,\n \"desc\": desc_list,\n \"url\": url_list,\n \"img\": image_list,\n \"sources\": source_list,\n },\n )\n return True\n\n\n@app.route(\"/\")\ndef index():\n \"\"\"loads page\"\"\"\n models.db.create_all()\n db.session.commit()\n return flask.render_template(\"index.html\")\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n \"\"\"Handles Page Not Found\"\"\"\n return flask.render_template(\"index.html\")\n\n\nif __name__ == \"__main__\":\n socketio.run(\n app,\n host=os.getenv(\"IP\", \"0.0.0.0\"),\n port=int(os.getenv(\"PORT\", 8080)),\n debug=True,\n )\n",
"step-ids": [
12,
13,
14,
18,
19
]
}
|
[
12,
13,
14,
18,
19
] |
<|reserved_special_token_0|>
class UndoDelegator:
<|reserved_special_token_0|>
def undo_block_start(*args):
pass
def undo_block_stop(*args):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Editor:
<|reserved_special_token_0|>
def __init__(self, flist=None, filename=None, key=None, root=None):
self.text = Text()
self.undo = UndoDelegator()
def get_selection_indices(self):
first = self.text.index('1.0')
last = self.text.index('end')
return first, last
class UndoDelegator:
"""Minimally imitate UndoDelegator,UndoDelegator class.
"""
def undo_block_start(*args):
pass
def undo_block_stop(*args):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Editor:
"""Minimally imitate EditorWindow.EditorWindow class.
"""
def __init__(self, flist=None, filename=None, key=None, root=None):
self.text = Text()
self.undo = UndoDelegator()
def get_selection_indices(self):
first = self.text.index('1.0')
last = self.text.index('end')
return first, last
class UndoDelegator:
"""Minimally imitate UndoDelegator,UndoDelegator class.
"""
def undo_block_start(*args):
pass
def undo_block_stop(*args):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from idlelib.idle_test.mock_tk import Text
class Editor:
"""Minimally imitate EditorWindow.EditorWindow class.
"""
def __init__(self, flist=None, filename=None, key=None, root=None):
self.text = Text()
self.undo = UndoDelegator()
def get_selection_indices(self):
first = self.text.index('1.0')
last = self.text.index('end')
return first, last
class UndoDelegator:
"""Minimally imitate UndoDelegator,UndoDelegator class.
"""
def undo_block_start(*args):
pass
def undo_block_stop(*args):
pass
<|reserved_special_token_1|>
'''Mock classes that imitate idlelib modules or classes.
Attributes and methods will be added as needed for tests.
'''
from idlelib.idle_test.mock_tk import Text
class Editor:
'''Minimally imitate EditorWindow.EditorWindow class.
'''
def __init__(self, flist=None, filename=None, key=None, root=None):
self.text = Text()
self.undo = UndoDelegator()
def get_selection_indices(self):
first = self.text.index('1.0')
last = self.text.index('end')
return first, last
class UndoDelegator:
'''Minimally imitate UndoDelegator,UndoDelegator class.
'''
# A real undo block is only needed for user interaction.
def undo_block_start(*args):
pass
def undo_block_stop(*args):
pass
|
flexible
|
{
"blob_id": "3b7c30718838a164eaf3aa12cd7b6a68930346f8",
"index": 8604,
"step-1": "<mask token>\n\n\nclass UndoDelegator:\n <mask token>\n\n def undo_block_start(*args):\n pass\n\n def undo_block_stop(*args):\n pass\n",
"step-2": "<mask token>\n\n\nclass Editor:\n <mask token>\n\n def __init__(self, flist=None, filename=None, key=None, root=None):\n self.text = Text()\n self.undo = UndoDelegator()\n\n def get_selection_indices(self):\n first = self.text.index('1.0')\n last = self.text.index('end')\n return first, last\n\n\nclass UndoDelegator:\n \"\"\"Minimally imitate UndoDelegator,UndoDelegator class.\n \"\"\"\n\n def undo_block_start(*args):\n pass\n\n def undo_block_stop(*args):\n pass\n",
"step-3": "<mask token>\n\n\nclass Editor:\n \"\"\"Minimally imitate EditorWindow.EditorWindow class.\n \"\"\"\n\n def __init__(self, flist=None, filename=None, key=None, root=None):\n self.text = Text()\n self.undo = UndoDelegator()\n\n def get_selection_indices(self):\n first = self.text.index('1.0')\n last = self.text.index('end')\n return first, last\n\n\nclass UndoDelegator:\n \"\"\"Minimally imitate UndoDelegator,UndoDelegator class.\n \"\"\"\n\n def undo_block_start(*args):\n pass\n\n def undo_block_stop(*args):\n pass\n",
"step-4": "<mask token>\nfrom idlelib.idle_test.mock_tk import Text\n\n\nclass Editor:\n \"\"\"Minimally imitate EditorWindow.EditorWindow class.\n \"\"\"\n\n def __init__(self, flist=None, filename=None, key=None, root=None):\n self.text = Text()\n self.undo = UndoDelegator()\n\n def get_selection_indices(self):\n first = self.text.index('1.0')\n last = self.text.index('end')\n return first, last\n\n\nclass UndoDelegator:\n \"\"\"Minimally imitate UndoDelegator,UndoDelegator class.\n \"\"\"\n\n def undo_block_start(*args):\n pass\n\n def undo_block_stop(*args):\n pass\n",
"step-5": "'''Mock classes that imitate idlelib modules or classes.\n\nAttributes and methods will be added as needed for tests.\n'''\n\nfrom idlelib.idle_test.mock_tk import Text\n\nclass Editor:\n '''Minimally imitate EditorWindow.EditorWindow class.\n '''\n def __init__(self, flist=None, filename=None, key=None, root=None):\n self.text = Text()\n self.undo = UndoDelegator()\n\n def get_selection_indices(self):\n first = self.text.index('1.0')\n last = self.text.index('end')\n return first, last\n\nclass UndoDelegator:\n '''Minimally imitate UndoDelegator,UndoDelegator class.\n '''\n # A real undo block is only needed for user interaction.\n def undo_block_start(*args):\n pass\n def undo_block_stop(*args):\n pass\n",
"step-ids": [
3,
7,
8,
9,
10
]
}
|
[
3,
7,
8,
9,
10
] |
"""Unit tests for misc. ticket functions."""
from pdm_utils.classes import bundle
from pdm_utils.classes import genome
from pdm_utils.classes import ticket
from pdm_utils.classes import eval
from pdm_utils.functions import tickets
from pdm_utils.constants import constants
import unittest
class TestTicketFunctions1(unittest.TestCase):
def setUp(self):
self.required_keys = constants.IMPORT_TABLE_STRUCTURE["required"]
self.optional_keys = constants.IMPORT_TABLE_STRUCTURE["optional"]
self.keywords = constants.IMPORT_TABLE_STRUCTURE["keywords"]
self.ticket_dict1 = {}
self.ticket_dict1["type"] = "add"
self.ticket_dict1["phage_id"] = "Trixie"
self.ticket_dict1["description_field"] = "product"
self.ticket_dict1["eval_mode"] = "final"
self.ticket_dict1["host_genus"] = "retrieve"
self.ticket_dict1["cluster"] = "retain"
self.ticket_dict1["subcluster"] = "A2"
self.ticket_dict1["accession"] = "parse"
self.ticket_dict2 = {}
self.ticket_dict3 = {}
self.ticket_dict3["type"] = "ADD"
self.ticket_dict3["phage_id"] = "Trixie"
self.ticket_dict3["description_field"] = "PRODUCT"
self.ticket_dict3["eval_mode"] = "FINAL"
self.ticket_dict3["host_genus"] = "RETRIEVE"
self.ticket_dict3["subcluster"] = None
self.ticket_dict3["accession"] = "PARSE"
self.ticket_dict3["retrieve_record"] = "RETAIN"
self.ticket_dict4 = {}
self.ticket_dict4["type"] = "ADD"
self.ticket_dict4["phage_id"] = "Trixie"
def test_modify_import_data_1(self):
"""Verify returns False if there are missing required keys."""
result = tickets.modify_import_data(self.ticket_dict2,
self.required_keys, self.optional_keys, self.keywords)
self.assertFalse(result)
def test_modify_import_data_2(self):
"""Verify returns False if there are extra keys."""
self.ticket_dict3["extra"] = "extra"
result = tickets.modify_import_data(self.ticket_dict3,
self.required_keys, self.optional_keys, self.keywords)
self.assertFalse(result)
def test_modify_import_data_3(self):
"""Verify returns True with keywords identified and values lowercased."""
result = tickets.modify_import_data(self.ticket_dict3,
self.required_keys, self.optional_keys, self.keywords)
with self.subTest():
self.assertTrue(result)
with self.subTest():
self.assertEqual(self.ticket_dict3["host_genus"], "retrieve")
with self.subTest():
self.assertEqual(self.ticket_dict3["retrieve_record"], "retain")
with self.subTest():
self.assertEqual(self.ticket_dict3["subcluster"], "retrieve")
with self.subTest():
self.assertEqual(self.ticket_dict3["accession"], "parse")
with self.subTest():
self.assertEqual(self.ticket_dict3["type"], "add")
with self.subTest():
self.assertEqual(self.ticket_dict3["description_field"], "product")
with self.subTest():
self.assertEqual(self.ticket_dict3["eval_mode"], "final")
def test_modify_import_data_4(self):
"""Verify returns True with completed dictionary from a
minimal add ticket."""
self.ticket_dict4["description_field"] = "product"
self.ticket_dict4["eval_mode"] = "final"
result = tickets.modify_import_data(self.ticket_dict4,
self.required_keys, self.optional_keys, self.keywords)
with self.subTest():
self.assertTrue(result)
with self.subTest():
self.assertEqual(self.ticket_dict4["host_genus"], "retrieve")
with self.subTest():
self.assertEqual(self.ticket_dict4["cluster"], "retrieve")
with self.subTest():
self.assertEqual(self.ticket_dict4["subcluster"], "retrieve")
with self.subTest():
self.assertEqual(self.ticket_dict4["annotation_author"], "1")
with self.subTest():
self.assertEqual(self.ticket_dict4["retrieve_record"], "1")
with self.subTest():
self.assertEqual(self.ticket_dict4["annotation_status"], "draft")
with self.subTest():
self.assertEqual(self.ticket_dict4["accession"], "")
def test_modify_import_data_5(self):
"""Verify returns True with completed dictionary from a
minimal replace ticket."""
self.ticket_dict4["type"] = "replace"
self.ticket_dict4["description_field"] = "product"
self.ticket_dict4["eval_mode"] = "final"
result = tickets.modify_import_data(self.ticket_dict4,
self.required_keys, self.optional_keys, self.keywords)
with self.subTest():
self.assertTrue(result)
with self.subTest():
self.assertEqual(self.ticket_dict4["host_genus"], "retain")
with self.subTest():
self.assertEqual(self.ticket_dict4["cluster"], "retain")
with self.subTest():
self.assertEqual(self.ticket_dict4["subcluster"], "retain")
with self.subTest():
self.assertEqual(self.ticket_dict4["annotation_author"], "retain")
with self.subTest():
self.assertEqual(self.ticket_dict4["retrieve_record"], "retain")
with self.subTest():
self.assertEqual(self.ticket_dict4["annotation_status"], "final")
with self.subTest():
self.assertEqual(self.ticket_dict4["accession"], "retain")
def test_parse_import_ticket_data_1(self):
"""Verify ticket is generated from correct data dictionary."""
tkt = tickets.parse_import_ticket_data(self.ticket_dict1)
with self.subTest():
self.assertEqual(tkt.type, "add")
with self.subTest():
self.assertEqual(tkt.phage_id, "Trixie")
with self.subTest():
self.assertEqual(tkt.description_field, "product")
with self.subTest():
self.assertEqual(tkt.eval_mode, "final")
with self.subTest():
self.assertEqual(len(tkt.data_dict.keys()), 8)
with self.subTest():
self.assertEqual(tkt.data_retrieve, set(["host_genus"]))
with self.subTest():
self.assertEqual(tkt.data_retain, set(["cluster"]))
with self.subTest():
self.assertEqual(tkt.data_parse, set(["accession"]))
with self.subTest():
self.assertEqual(tkt.data_add, set(["subcluster"]))
def test_parse_import_ticket_data_2(self):
"""Verify ticket is generated from correct data dictionary with
no data in 'retain', 'retrieve', or 'parse' sets."""
self.ticket_dict1["host_genus"] = "Mycobacterium"
self.ticket_dict1["cluster"] = "A"
self.ticket_dict1["subcluster"] = "A2"
self.ticket_dict1["accession"] = "ABC123"
tkt = tickets.parse_import_ticket_data(self.ticket_dict1)
with self.subTest():
self.assertEqual(tkt.type, "add")
with self.subTest():
self.assertEqual(tkt.phage_id, "Trixie")
with self.subTest():
self.assertEqual(tkt.description_field, "product")
with self.subTest():
self.assertEqual(tkt.eval_mode, "final")
with self.subTest():
self.assertEqual(len(tkt.data_dict.keys()), 8)
with self.subTest():
self.assertEqual(tkt.data_retrieve, set())
with self.subTest():
self.assertEqual(tkt.data_retain, set())
with self.subTest():
self.assertEqual(tkt.data_parse, set())
with self.subTest():
self.assertEqual(tkt.data_add, set(["subcluster", "host_genus",
"cluster", "accession"]))
def test_parse_import_ticket_data_3(self):
"""Verify ticket is generated from correct data dictionary with
no data in 'add' sets."""
self.ticket_dict1["host_genus"] = "retrieve"
self.ticket_dict1["cluster"] = "retrieve"
self.ticket_dict1["subcluster"] = "retrieve"
self.ticket_dict1["accession"] = "retrieve"
tkt = tickets.parse_import_ticket_data(self.ticket_dict1)
with self.subTest():
self.assertEqual(tkt.type, "add")
with self.subTest():
self.assertEqual(tkt.phage_id, "Trixie")
with self.subTest():
self.assertEqual(tkt.description_field, "product")
with self.subTest():
self.assertEqual(tkt.eval_mode, "final")
with self.subTest():
self.assertEqual(len(tkt.data_dict.keys()), 8)
with self.subTest():
self.assertEqual(tkt.data_retrieve, set(["subcluster", "host_genus",
"cluster", "accession"]))
with self.subTest():
self.assertEqual(tkt.data_retain, set())
with self.subTest():
self.assertEqual(tkt.data_parse, set())
with self.subTest():
self.assertEqual(tkt.data_add, set())
def test_set_empty_1(self):
"""Verify one None value is set to ''."""
data_dict = {"type":"add","cluster":None}
tickets.set_empty(data_dict)
with self.subTest():
self.assertEqual(data_dict["type"], "add")
with self.subTest():
self.assertEqual(data_dict["cluster"], "")
def test_set_keywords_1(self):
"""Verify one value is lowercased."""
data_dict = {"type":"ADD",
"cluster":"RETRIEVE",
"subcluster": "NONE",
"host_genus": "PARSE",
"retrieve_record": "RETAIN"}
keywords = set(["retrieve", "retain"])
tickets.set_keywords(data_dict, self.keywords)
with self.subTest():
self.assertEqual(data_dict["type"], "ADD")
with self.subTest():
self.assertEqual(data_dict["cluster"], "retrieve")
with self.subTest():
self.assertEqual(data_dict["subcluster"], "none")
with self.subTest():
self.assertEqual(data_dict["host_genus"], "parse")
with self.subTest():
self.assertEqual(data_dict["retrieve_record"], "retain")
def test_set_missing_keys_1(self):
"""Verify one missing key is added."""
data_dict = {"type":"add", "cluster":""}
key_set = set(["type", "host_genus"])
tickets.set_missing_keys(data_dict, key_set)
with self.subTest():
self.assertEqual(len(data_dict.keys()), 3)
with self.subTest():
self.assertEqual(data_dict["host_genus"], "")
def test_set_missing_keys_2(self):
"""Verify no missing key is added."""
data_dict = {"type":"add", "cluster":""}
key_set = set(["type", "cluster"])
tickets.set_missing_keys(data_dict, key_set)
self.assertEqual(len(data_dict.keys()), 2)
def test_set_dict_value_1(self):
"""Verify empty value is replaced with first value."""
data_dict = {"type":"add", "cluster":""}
tickets.set_dict_value(data_dict, "cluster", "A", "B")
self.assertEqual(data_dict["cluster"], "A")
def test_set_dict_value_2(self):
"""Verify empty value is replaced with second value."""
data_dict = {"type":"replace", "cluster":""}
tickets.set_dict_value(data_dict, "cluster", "A", "B")
self.assertEqual(data_dict["cluster"], "B")
def test_set_dict_value_3(self):
"""Verify non-empty value is not replaced."""
data_dict = {"type":"replace", "cluster":"C"}
tickets.set_dict_value(data_dict, "cluster", "A", "B")
self.assertEqual(data_dict["cluster"], "C")
def test_construct_tickets_1(self):
"""Verify two tickets are constructed correctly.
The first ticket contains all required and optional fields.
The second ticket contains all required fields."""
dict_list = [self.ticket_dict1, self.ticket_dict4]
eval_data_dict = {"eval_mode": "custom_eval_mode",
"eval_flag_dict": {"check_locus_tag": False}}
list_of_tickets = tickets.construct_tickets(dict_list,
eval_data_dict, "function", self.required_keys,
self.optional_keys, self.keywords)
with self.subTest():
self.assertEqual(len(list_of_tickets), 2)
with self.subTest():
self.assertEqual(list_of_tickets[0].id, 1)
with self.subTest():
self.assertEqual(list_of_tickets[0].eval_mode, "final")
with self.subTest():
self.assertEqual(list_of_tickets[0].description_field, "product")
with self.subTest():
self.assertTrue(list_of_tickets[0].eval_flags["check_locus_tag"])
with self.subTest():
self.assertEqual(list_of_tickets[1].id, 2)
with self.subTest():
self.assertEqual(list_of_tickets[1].eval_mode, "custom_eval_mode")
with self.subTest():
self.assertEqual(list_of_tickets[1].description_field, "function")
with self.subTest():
self.assertFalse(list_of_tickets[1].eval_flags["check_locus_tag"])
def test_construct_tickets_2(self):
"""Verify one ticket is constructed correctly. The second data
dictionary is not structured correctly."""
dict_list = [self.ticket_dict1, self.ticket_dict2]
eval_data_dict = {"eval_mode": "custom_eval_mode",
"eval_flag_dict": {}}
list_of_tickets = tickets.construct_tickets(dict_list,
eval_data_dict, "function", self.required_keys,
self.optional_keys, self.keywords)
with self.subTest():
self.assertEqual(len(list_of_tickets), 1)
def test_construct_tickets_3(self):
"""Verify four tickets constructed correctly. The first two tickets
contain all required and optional fields. The second two tickets
contain all required fields. Verify that each eval_flag dictionary
is a separate object that can be modified without impacting the other
eval_flag dictionaries."""
tkt_dict1 = {}
tkt_dict1["type"] = "add"
tkt_dict1["phage_id"] = "Trixie"
tkt_dict1["description_field"] = "product"
tkt_dict1["eval_mode"] = "final"
tkt_dict2 = {}
tkt_dict2["type"] = "add"
tkt_dict2["phage_id"] = "L5"
tkt_dict2["description_field"] = "product"
tkt_dict2["eval_mode"] = "final"
tkt_dict3 = {}
tkt_dict3["type"] = "add"
tkt_dict3["phage_id"] = "RedRock"
tkt_dict4 = {}
tkt_dict4["type"] = "add"
tkt_dict4["phage_id"] = "Bxb1"
dict_list = [tkt_dict1, tkt_dict2, tkt_dict3, tkt_dict4]
eval_data_dict = {"eval_mode": "custom_eval_mode",
"eval_flag_dict": {"check_locus_tag": False}}
tkt_list = tickets.construct_tickets(dict_list,
eval_data_dict, "function", self.required_keys,
self.optional_keys, self.keywords)
tkt_list[0].eval_flags["check_locus_tag"] = 0
tkt_list[1].eval_flags["check_locus_tag"] = 1
tkt_list[2].eval_flags["check_locus_tag"] = 2
tkt_list[3].eval_flags["check_locus_tag"] = 3
with self.subTest():
self.assertEqual(tkt_list[0].eval_flags["check_locus_tag"], 0)
with self.subTest():
self.assertEqual(tkt_list[1].eval_flags["check_locus_tag"], 1)
with self.subTest():
self.assertEqual(tkt_list[2].eval_flags["check_locus_tag"], 2)
with self.subTest():
self.assertEqual(tkt_list[3].eval_flags["check_locus_tag"], 3)
def test_identify_duplicates_1(self):
"""Verify no duplicates are produced."""
ticket1 = ticket.ImportTicket()
ticket1.id = 1
ticket1.type = "replace"
ticket1.phage_id = "Trixie"
ticket2 = ticket.ImportTicket()
ticket2.id = 2
ticket2.type = "replace"
ticket2.phage_id = "L5"
null_set = set(["none"])
list_of_tickets = [ticket1, ticket2]
id_dupes, phage_id_dupes = \
tickets.identify_duplicates(list_of_tickets, null_set=null_set)
with self.subTest():
self.assertEqual(len(id_dupes), 0)
with self.subTest():
self.assertEqual(len(phage_id_dupes), 0)
def test_identify_duplicates_2(self):
"""Verify two tickets with 'none' duplicates
do not generate an error."""
ticket1 = ticket.ImportTicket()
ticket1.id = "none"
ticket1.type = "replace"
ticket1.phage_id = "none"
ticket2 = ticket.ImportTicket()
ticket2.id = "none"
ticket2.type = "replace"
ticket2.phage_id = "none"
null_set = set(["none"])
list_of_tickets = [ticket1, ticket2]
id_dupes, phage_id_dupes = \
tickets.identify_duplicates(list_of_tickets, null_set=null_set)
with self.subTest():
self.assertEqual(len(id_dupes), 0)
with self.subTest():
self.assertEqual(len(phage_id_dupes), 0)
def test_identify_duplicates_3(self):
"""Verify two tickets with id duplicates
do generate an error."""
ticket1 = ticket.ImportTicket()
ticket1.id = 1
ticket1.type = "replace"
ticket1.phage_id = "L5"
ticket2 = ticket.ImportTicket()
ticket2.id = 1
ticket2.type = "replace"
ticket2.phage_id = "Trixie"
null_set = set(["none"])
list_of_tickets = [ticket1, ticket2]
id_dupes, phage_id_dupes = \
tickets.identify_duplicates(list_of_tickets, null_set=null_set)
with self.subTest():
self.assertEqual(len(id_dupes), 1)
with self.subTest():
self.assertEqual(len(phage_id_dupes), 0)
def test_identify_duplicates_4(self):
"""Verify two tickets with Primary Phage ID duplicates
do generate an error."""
ticket1 = ticket.ImportTicket()
ticket1.id = 1
ticket1.type = "replace"
ticket1.phage_id = "Trixie"
ticket2 = ticket.ImportTicket()
ticket2.id = 2
ticket2.type = "replace"
ticket2.phage_id = "Trixie"
null_set = set(["none"])
list_of_tickets = [ticket1, ticket2]
id_dupes, phage_id_dupes = \
tickets.identify_duplicates(list_of_tickets, null_set=null_set)
with self.subTest():
self.assertEqual(len(id_dupes), 0)
with self.subTest():
self.assertEqual(len(phage_id_dupes), 1)
def test_identify_duplicates_6(self):
"""Verify two tickets with multiple duplicates
do generate multiple errors."""
ticket1 = ticket.ImportTicket()
ticket1.id = 1
ticket1.type = "replace"
ticket1.phage_id = "Trixie"
ticket2 = ticket.ImportTicket()
ticket2.id = 1
ticket2.type = "replace"
ticket2.phage_id = "Trixie"
null_set = set(["none"])
list_of_tickets = [ticket1, ticket2]
id_dupes, phage_id_dupes = \
tickets.identify_duplicates(list_of_tickets, null_set=null_set)
with self.subTest():
self.assertEqual(len(id_dupes), 1)
with self.subTest():
self.assertEqual(len(phage_id_dupes), 1)
class TestTicketFunctions2(unittest.TestCase):
def setUp(self):
self.ticket1 = ticket.ImportTicket()
self.ticket2 = ticket.ImportTicket()
self.ticket1.phage_id = "Trixie"
self.ticket2.phage_id = "L5"
self.bundle1 = bundle.Bundle()
self.bundle2 = bundle.Bundle()
self.bundle1.ticket = self.ticket1
self.bundle2.ticket = self.ticket2
class TestTicketFunctions3(unittest.TestCase):
def setUp(self):
self.data_dict = {}
self.data_dict["host_genus"] = "Mycobacterium smegmatis"
self.data_dict["accession"] = "ABC123.1"
self.data_dict["annotation_status"] = "final"
self.data_dict["cluster"] = "A"
self.data_dict["subcluster"] = "A2"
self.data_dict["annotation_author"] = 1
self.data_dict["retrieve_record"] = 1
self.tkt1 = ticket.ImportTicket()
self.tkt1.phage_id = "Trixie_Draft"
self.tkt1.data_dict = self.data_dict
def test_get_genome_1(self):
"""Verify no data from ticket is added to genome."""
self.tkt1.data_add = set([""])
gnm = tickets.get_genome(self.tkt1, gnm_type="add")
with self.subTest():
self.assertEqual(gnm.id, "Trixie")
with self.subTest():
self.assertEqual(gnm.name, "Trixie_Draft")
with self.subTest():
self.assertEqual(gnm.type, "add")
with self.subTest():
self.assertEqual(gnm.host_genus, "")
with self.subTest():
self.assertEqual(gnm.cluster, "")
with self.subTest():
self.assertEqual(gnm.subcluster, "")
with self.subTest():
self.assertEqual(gnm.annotation_status, "")
with self.subTest():
self.assertEqual(gnm.annotation_author, -1)
with self.subTest():
self.assertEqual(gnm.retrieve_record, -1)
with self.subTest():
self.assertEqual(gnm.accession, "")
def test_get_genome_2(self):
"""Verify host_genus data from ticket is added to genome."""
self.tkt1.data_add = set(["host_genus"])
gnm = tickets.get_genome(self.tkt1, gnm_type="add")
with self.subTest():
self.assertEqual(gnm.host_genus, "Mycobacterium")
with self.subTest():
self.assertEqual(gnm.cluster, "")
def test_get_genome_3(self):
"""Verify cluster data from ticket is added to genome."""
self.tkt1.data_add = set(["cluster"])
gnm = tickets.get_genome(self.tkt1, gnm_type="add")
with self.subTest():
self.assertEqual(gnm.host_genus, "")
with self.subTest():
self.assertEqual(gnm.cluster, "A")
def test_get_genome_4(self):
"""Verify subcluster data from ticket is added to genome."""
self.tkt1.data_add = set(["subcluster"])
gnm = tickets.get_genome(self.tkt1, gnm_type="add")
with self.subTest():
self.assertEqual(gnm.host_genus, "")
with self.subTest():
self.assertEqual(gnm.subcluster, "A2")
def test_get_genome_5(self):
"""Verify annotation_status data from ticket is added to genome."""
self.tkt1.data_add = set(["annotation_status"])
gnm = tickets.get_genome(self.tkt1, gnm_type="add")
with self.subTest():
self.assertEqual(gnm.host_genus, "")
with self.subTest():
self.assertEqual(gnm.annotation_status, "final")
def test_get_genome_6(self):
"""Verify annotation_author data from ticket is added to genome."""
self.tkt1.data_add = set(["annotation_author"])
gnm = tickets.get_genome(self.tkt1, gnm_type="add")
with self.subTest():
self.assertEqual(gnm.host_genus, "")
with self.subTest():
self.assertEqual(gnm.annotation_author, 1)
def test_get_genome_7(self):
"""Verify retrieve_record data from ticket is added to genome."""
self.tkt1.data_add = set(["retrieve_record"])
gnm = tickets.get_genome(self.tkt1, gnm_type="add")
with self.subTest():
self.assertEqual(gnm.host_genus, "")
with self.subTest():
self.assertEqual(gnm.retrieve_record, 1)
def test_get_genome_8(self):
"""Verify accession data from ticket is added to genome."""
self.tkt1.data_add = set(["accession"])
gnm = tickets.get_genome(self.tkt1, gnm_type="add")
with self.subTest():
self.assertEqual(gnm.host_genus, "")
with self.subTest():
self.assertEqual(gnm.accession, "ABC123")
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "d8ba2557e20920eaadd2fd35f0ebdf1b4a5b33da",
"index": 9010,
"step-1": "<mask token>\n\n\nclass TestTicketFunctions1(unittest.TestCase):\n\n def setUp(self):\n self.required_keys = constants.IMPORT_TABLE_STRUCTURE['required']\n self.optional_keys = constants.IMPORT_TABLE_STRUCTURE['optional']\n self.keywords = constants.IMPORT_TABLE_STRUCTURE['keywords']\n self.ticket_dict1 = {}\n self.ticket_dict1['type'] = 'add'\n self.ticket_dict1['phage_id'] = 'Trixie'\n self.ticket_dict1['description_field'] = 'product'\n self.ticket_dict1['eval_mode'] = 'final'\n self.ticket_dict1['host_genus'] = 'retrieve'\n self.ticket_dict1['cluster'] = 'retain'\n self.ticket_dict1['subcluster'] = 'A2'\n self.ticket_dict1['accession'] = 'parse'\n self.ticket_dict2 = {}\n self.ticket_dict3 = {}\n self.ticket_dict3['type'] = 'ADD'\n self.ticket_dict3['phage_id'] = 'Trixie'\n self.ticket_dict3['description_field'] = 'PRODUCT'\n self.ticket_dict3['eval_mode'] = 'FINAL'\n self.ticket_dict3['host_genus'] = 'RETRIEVE'\n self.ticket_dict3['subcluster'] = None\n self.ticket_dict3['accession'] = 'PARSE'\n self.ticket_dict3['retrieve_record'] = 'RETAIN'\n self.ticket_dict4 = {}\n self.ticket_dict4['type'] = 'ADD'\n self.ticket_dict4['phage_id'] = 'Trixie'\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_modify_import_data_5(self):\n \"\"\"Verify returns True with completed dictionary from a\n minimal replace ticket.\"\"\"\n self.ticket_dict4['type'] = 'replace'\n self.ticket_dict4['description_field'] = 'product'\n self.ticket_dict4['eval_mode'] = 'final'\n result = tickets.modify_import_data(self.ticket_dict4, self.\n required_keys, self.optional_keys, self.keywords)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertEqual(self.ticket_dict4['host_genus'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['cluster'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['subcluster'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['annotation_author'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['retrieve_record'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['annotation_status'], 'final')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['accession'], 'retain')\n <mask token>\n\n def test_parse_import_ticket_data_2(self):\n \"\"\"Verify ticket is generated from correct data dictionary with\n no data in 'retain', 'retrieve', or 'parse' sets.\"\"\"\n self.ticket_dict1['host_genus'] = 'Mycobacterium'\n self.ticket_dict1['cluster'] = 'A'\n self.ticket_dict1['subcluster'] = 'A2'\n self.ticket_dict1['accession'] = 'ABC123'\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, 'add')\n with self.subTest():\n self.assertEqual(tkt.phage_id, 'Trixie')\n with self.subTest():\n self.assertEqual(tkt.description_field, 'product')\n with self.subTest():\n self.assertEqual(tkt.eval_mode, 'final')\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set())\n with self.subTest():\n self.assertEqual(tkt.data_retain, set())\n with self.subTest():\n self.assertEqual(tkt.data_parse, set())\n with self.subTest():\n self.assertEqual(tkt.data_add, set(['subcluster', 'host_genus',\n 'cluster', 'accession']))\n\n def test_parse_import_ticket_data_3(self):\n \"\"\"Verify ticket is generated from correct data dictionary with\n no data in 'add' sets.\"\"\"\n self.ticket_dict1['host_genus'] = 'retrieve'\n self.ticket_dict1['cluster'] = 'retrieve'\n self.ticket_dict1['subcluster'] = 'retrieve'\n self.ticket_dict1['accession'] = 'retrieve'\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, 'add')\n with self.subTest():\n self.assertEqual(tkt.phage_id, 'Trixie')\n with self.subTest():\n self.assertEqual(tkt.description_field, 'product')\n with self.subTest():\n self.assertEqual(tkt.eval_mode, 'final')\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set(['subcluster',\n 'host_genus', 'cluster', 'accession']))\n with self.subTest():\n self.assertEqual(tkt.data_retain, set())\n with self.subTest():\n self.assertEqual(tkt.data_parse, set())\n with self.subTest():\n self.assertEqual(tkt.data_add, set())\n <mask token>\n\n def test_set_keywords_1(self):\n \"\"\"Verify one value is lowercased.\"\"\"\n data_dict = {'type': 'ADD', 'cluster': 'RETRIEVE', 'subcluster':\n 'NONE', 'host_genus': 'PARSE', 'retrieve_record': 'RETAIN'}\n keywords = set(['retrieve', 'retain'])\n tickets.set_keywords(data_dict, self.keywords)\n with self.subTest():\n self.assertEqual(data_dict['type'], 'ADD')\n with self.subTest():\n self.assertEqual(data_dict['cluster'], 'retrieve')\n with self.subTest():\n self.assertEqual(data_dict['subcluster'], 'none')\n with self.subTest():\n self.assertEqual(data_dict['host_genus'], 'parse')\n with self.subTest():\n self.assertEqual(data_dict['retrieve_record'], 'retain')\n\n def test_set_missing_keys_1(self):\n \"\"\"Verify one missing key is added.\"\"\"\n data_dict = {'type': 'add', 'cluster': ''}\n key_set = set(['type', 'host_genus'])\n tickets.set_missing_keys(data_dict, key_set)\n with self.subTest():\n self.assertEqual(len(data_dict.keys()), 3)\n with self.subTest():\n self.assertEqual(data_dict['host_genus'], '')\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_construct_tickets_1(self):\n \"\"\"Verify two tickets are constructed correctly.\n The first ticket contains all required and optional fields.\n The second ticket contains all required fields.\"\"\"\n dict_list = [self.ticket_dict1, self.ticket_dict4]\n eval_data_dict = {'eval_mode': 'custom_eval_mode', 'eval_flag_dict':\n {'check_locus_tag': False}}\n list_of_tickets = tickets.construct_tickets(dict_list,\n eval_data_dict, 'function', self.required_keys, self.\n optional_keys, self.keywords)\n with self.subTest():\n self.assertEqual(len(list_of_tickets), 2)\n with self.subTest():\n self.assertEqual(list_of_tickets[0].id, 1)\n with self.subTest():\n self.assertEqual(list_of_tickets[0].eval_mode, 'final')\n with self.subTest():\n self.assertEqual(list_of_tickets[0].description_field, 'product')\n with self.subTest():\n self.assertTrue(list_of_tickets[0].eval_flags['check_locus_tag'])\n with self.subTest():\n self.assertEqual(list_of_tickets[1].id, 2)\n with self.subTest():\n self.assertEqual(list_of_tickets[1].eval_mode, 'custom_eval_mode')\n with self.subTest():\n self.assertEqual(list_of_tickets[1].description_field, 'function')\n with self.subTest():\n self.assertFalse(list_of_tickets[1].eval_flags['check_locus_tag'])\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_identify_duplicates_4(self):\n \"\"\"Verify two tickets with Primary Phage ID duplicates\n do generate an error.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = 'replace'\n ticket1.phage_id = 'Trixie'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = 'replace'\n ticket2.phage_id = 'Trixie'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)\n\n def test_identify_duplicates_6(self):\n \"\"\"Verify two tickets with multiple duplicates\n do generate multiple errors.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = 'replace'\n ticket1.phage_id = 'Trixie'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = 'replace'\n ticket2.phage_id = 'Trixie'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)\n\n\nclass TestTicketFunctions2(unittest.TestCase):\n\n def setUp(self):\n self.ticket1 = ticket.ImportTicket()\n self.ticket2 = ticket.ImportTicket()\n self.ticket1.phage_id = 'Trixie'\n self.ticket2.phage_id = 'L5'\n self.bundle1 = bundle.Bundle()\n self.bundle2 = bundle.Bundle()\n self.bundle1.ticket = self.ticket1\n self.bundle2.ticket = self.ticket2\n\n\nclass TestTicketFunctions3(unittest.TestCase):\n\n def setUp(self):\n self.data_dict = {}\n self.data_dict['host_genus'] = 'Mycobacterium smegmatis'\n self.data_dict['accession'] = 'ABC123.1'\n self.data_dict['annotation_status'] = 'final'\n self.data_dict['cluster'] = 'A'\n self.data_dict['subcluster'] = 'A2'\n self.data_dict['annotation_author'] = 1\n self.data_dict['retrieve_record'] = 1\n self.tkt1 = ticket.ImportTicket()\n self.tkt1.phage_id = 'Trixie_Draft'\n self.tkt1.data_dict = self.data_dict\n\n def test_get_genome_1(self):\n \"\"\"Verify no data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([''])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.id, 'Trixie')\n with self.subTest():\n self.assertEqual(gnm.name, 'Trixie_Draft')\n with self.subTest():\n self.assertEqual(gnm.type, 'add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.cluster, '')\n with self.subTest():\n self.assertEqual(gnm.subcluster, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_status, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_author, -1)\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, -1)\n with self.subTest():\n self.assertEqual(gnm.accession, '')\n\n def test_get_genome_2(self):\n \"\"\"Verify host_genus data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['host_genus'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, 'Mycobacterium')\n with self.subTest():\n self.assertEqual(gnm.cluster, '')\n\n def test_get_genome_3(self):\n \"\"\"Verify cluster data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['cluster'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.cluster, 'A')\n\n def test_get_genome_4(self):\n \"\"\"Verify subcluster data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['subcluster'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.subcluster, 'A2')\n\n def test_get_genome_5(self):\n \"\"\"Verify annotation_status data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['annotation_status'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_status, 'final')\n\n def test_get_genome_6(self):\n \"\"\"Verify annotation_author data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['annotation_author'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_author, 1)\n\n def test_get_genome_7(self):\n \"\"\"Verify retrieve_record data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['retrieve_record'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, 1)\n\n def test_get_genome_8(self):\n \"\"\"Verify accession data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['accession'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.accession, 'ABC123')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestTicketFunctions1(unittest.TestCase):\n\n def setUp(self):\n self.required_keys = constants.IMPORT_TABLE_STRUCTURE['required']\n self.optional_keys = constants.IMPORT_TABLE_STRUCTURE['optional']\n self.keywords = constants.IMPORT_TABLE_STRUCTURE['keywords']\n self.ticket_dict1 = {}\n self.ticket_dict1['type'] = 'add'\n self.ticket_dict1['phage_id'] = 'Trixie'\n self.ticket_dict1['description_field'] = 'product'\n self.ticket_dict1['eval_mode'] = 'final'\n self.ticket_dict1['host_genus'] = 'retrieve'\n self.ticket_dict1['cluster'] = 'retain'\n self.ticket_dict1['subcluster'] = 'A2'\n self.ticket_dict1['accession'] = 'parse'\n self.ticket_dict2 = {}\n self.ticket_dict3 = {}\n self.ticket_dict3['type'] = 'ADD'\n self.ticket_dict3['phage_id'] = 'Trixie'\n self.ticket_dict3['description_field'] = 'PRODUCT'\n self.ticket_dict3['eval_mode'] = 'FINAL'\n self.ticket_dict3['host_genus'] = 'RETRIEVE'\n self.ticket_dict3['subcluster'] = None\n self.ticket_dict3['accession'] = 'PARSE'\n self.ticket_dict3['retrieve_record'] = 'RETAIN'\n self.ticket_dict4 = {}\n self.ticket_dict4['type'] = 'ADD'\n self.ticket_dict4['phage_id'] = 'Trixie'\n <mask token>\n\n def test_modify_import_data_2(self):\n \"\"\"Verify returns False if there are extra keys.\"\"\"\n self.ticket_dict3['extra'] = 'extra'\n result = tickets.modify_import_data(self.ticket_dict3, self.\n required_keys, self.optional_keys, self.keywords)\n self.assertFalse(result)\n <mask token>\n <mask token>\n\n def test_modify_import_data_5(self):\n \"\"\"Verify returns True with completed dictionary from a\n minimal replace ticket.\"\"\"\n self.ticket_dict4['type'] = 'replace'\n self.ticket_dict4['description_field'] = 'product'\n self.ticket_dict4['eval_mode'] = 'final'\n result = tickets.modify_import_data(self.ticket_dict4, self.\n required_keys, self.optional_keys, self.keywords)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertEqual(self.ticket_dict4['host_genus'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['cluster'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['subcluster'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['annotation_author'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['retrieve_record'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['annotation_status'], 'final')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['accession'], 'retain')\n <mask token>\n\n def test_parse_import_ticket_data_2(self):\n \"\"\"Verify ticket is generated from correct data dictionary with\n no data in 'retain', 'retrieve', or 'parse' sets.\"\"\"\n self.ticket_dict1['host_genus'] = 'Mycobacterium'\n self.ticket_dict1['cluster'] = 'A'\n self.ticket_dict1['subcluster'] = 'A2'\n self.ticket_dict1['accession'] = 'ABC123'\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, 'add')\n with self.subTest():\n self.assertEqual(tkt.phage_id, 'Trixie')\n with self.subTest():\n self.assertEqual(tkt.description_field, 'product')\n with self.subTest():\n self.assertEqual(tkt.eval_mode, 'final')\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set())\n with self.subTest():\n self.assertEqual(tkt.data_retain, set())\n with self.subTest():\n self.assertEqual(tkt.data_parse, set())\n with self.subTest():\n self.assertEqual(tkt.data_add, set(['subcluster', 'host_genus',\n 'cluster', 'accession']))\n\n def test_parse_import_ticket_data_3(self):\n \"\"\"Verify ticket is generated from correct data dictionary with\n no data in 'add' sets.\"\"\"\n self.ticket_dict1['host_genus'] = 'retrieve'\n self.ticket_dict1['cluster'] = 'retrieve'\n self.ticket_dict1['subcluster'] = 'retrieve'\n self.ticket_dict1['accession'] = 'retrieve'\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, 'add')\n with self.subTest():\n self.assertEqual(tkt.phage_id, 'Trixie')\n with self.subTest():\n self.assertEqual(tkt.description_field, 'product')\n with self.subTest():\n self.assertEqual(tkt.eval_mode, 'final')\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set(['subcluster',\n 'host_genus', 'cluster', 'accession']))\n with self.subTest():\n self.assertEqual(tkt.data_retain, set())\n with self.subTest():\n self.assertEqual(tkt.data_parse, set())\n with self.subTest():\n self.assertEqual(tkt.data_add, set())\n <mask token>\n\n def test_set_keywords_1(self):\n \"\"\"Verify one value is lowercased.\"\"\"\n data_dict = {'type': 'ADD', 'cluster': 'RETRIEVE', 'subcluster':\n 'NONE', 'host_genus': 'PARSE', 'retrieve_record': 'RETAIN'}\n keywords = set(['retrieve', 'retain'])\n tickets.set_keywords(data_dict, self.keywords)\n with self.subTest():\n self.assertEqual(data_dict['type'], 'ADD')\n with self.subTest():\n self.assertEqual(data_dict['cluster'], 'retrieve')\n with self.subTest():\n self.assertEqual(data_dict['subcluster'], 'none')\n with self.subTest():\n self.assertEqual(data_dict['host_genus'], 'parse')\n with self.subTest():\n self.assertEqual(data_dict['retrieve_record'], 'retain')\n\n def test_set_missing_keys_1(self):\n \"\"\"Verify one missing key is added.\"\"\"\n data_dict = {'type': 'add', 'cluster': ''}\n key_set = set(['type', 'host_genus'])\n tickets.set_missing_keys(data_dict, key_set)\n with self.subTest():\n self.assertEqual(len(data_dict.keys()), 3)\n with self.subTest():\n self.assertEqual(data_dict['host_genus'], '')\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_construct_tickets_1(self):\n \"\"\"Verify two tickets are constructed correctly.\n The first ticket contains all required and optional fields.\n The second ticket contains all required fields.\"\"\"\n dict_list = [self.ticket_dict1, self.ticket_dict4]\n eval_data_dict = {'eval_mode': 'custom_eval_mode', 'eval_flag_dict':\n {'check_locus_tag': False}}\n list_of_tickets = tickets.construct_tickets(dict_list,\n eval_data_dict, 'function', self.required_keys, self.\n optional_keys, self.keywords)\n with self.subTest():\n self.assertEqual(len(list_of_tickets), 2)\n with self.subTest():\n self.assertEqual(list_of_tickets[0].id, 1)\n with self.subTest():\n self.assertEqual(list_of_tickets[0].eval_mode, 'final')\n with self.subTest():\n self.assertEqual(list_of_tickets[0].description_field, 'product')\n with self.subTest():\n self.assertTrue(list_of_tickets[0].eval_flags['check_locus_tag'])\n with self.subTest():\n self.assertEqual(list_of_tickets[1].id, 2)\n with self.subTest():\n self.assertEqual(list_of_tickets[1].eval_mode, 'custom_eval_mode')\n with self.subTest():\n self.assertEqual(list_of_tickets[1].description_field, 'function')\n with self.subTest():\n self.assertFalse(list_of_tickets[1].eval_flags['check_locus_tag'])\n <mask token>\n <mask token>\n <mask token>\n\n def test_identify_duplicates_2(self):\n \"\"\"Verify two tickets with 'none' duplicates\n do not generate an error.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 'none'\n ticket1.type = 'replace'\n ticket1.phage_id = 'none'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 'none'\n ticket2.type = 'replace'\n ticket2.phage_id = 'none'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)\n <mask token>\n\n def test_identify_duplicates_4(self):\n \"\"\"Verify two tickets with Primary Phage ID duplicates\n do generate an error.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = 'replace'\n ticket1.phage_id = 'Trixie'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = 'replace'\n ticket2.phage_id = 'Trixie'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)\n\n def test_identify_duplicates_6(self):\n \"\"\"Verify two tickets with multiple duplicates\n do generate multiple errors.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = 'replace'\n ticket1.phage_id = 'Trixie'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = 'replace'\n ticket2.phage_id = 'Trixie'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)\n\n\nclass TestTicketFunctions2(unittest.TestCase):\n\n def setUp(self):\n self.ticket1 = ticket.ImportTicket()\n self.ticket2 = ticket.ImportTicket()\n self.ticket1.phage_id = 'Trixie'\n self.ticket2.phage_id = 'L5'\n self.bundle1 = bundle.Bundle()\n self.bundle2 = bundle.Bundle()\n self.bundle1.ticket = self.ticket1\n self.bundle2.ticket = self.ticket2\n\n\nclass TestTicketFunctions3(unittest.TestCase):\n\n def setUp(self):\n self.data_dict = {}\n self.data_dict['host_genus'] = 'Mycobacterium smegmatis'\n self.data_dict['accession'] = 'ABC123.1'\n self.data_dict['annotation_status'] = 'final'\n self.data_dict['cluster'] = 'A'\n self.data_dict['subcluster'] = 'A2'\n self.data_dict['annotation_author'] = 1\n self.data_dict['retrieve_record'] = 1\n self.tkt1 = ticket.ImportTicket()\n self.tkt1.phage_id = 'Trixie_Draft'\n self.tkt1.data_dict = self.data_dict\n\n def test_get_genome_1(self):\n \"\"\"Verify no data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([''])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.id, 'Trixie')\n with self.subTest():\n self.assertEqual(gnm.name, 'Trixie_Draft')\n with self.subTest():\n self.assertEqual(gnm.type, 'add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.cluster, '')\n with self.subTest():\n self.assertEqual(gnm.subcluster, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_status, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_author, -1)\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, -1)\n with self.subTest():\n self.assertEqual(gnm.accession, '')\n\n def test_get_genome_2(self):\n \"\"\"Verify host_genus data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['host_genus'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, 'Mycobacterium')\n with self.subTest():\n self.assertEqual(gnm.cluster, '')\n\n def test_get_genome_3(self):\n \"\"\"Verify cluster data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['cluster'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.cluster, 'A')\n\n def test_get_genome_4(self):\n \"\"\"Verify subcluster data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['subcluster'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.subcluster, 'A2')\n\n def test_get_genome_5(self):\n \"\"\"Verify annotation_status data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['annotation_status'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_status, 'final')\n\n def test_get_genome_6(self):\n \"\"\"Verify annotation_author data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['annotation_author'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_author, 1)\n\n def test_get_genome_7(self):\n \"\"\"Verify retrieve_record data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['retrieve_record'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, 1)\n\n def test_get_genome_8(self):\n \"\"\"Verify accession data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['accession'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.accession, 'ABC123')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestTicketFunctions1(unittest.TestCase):\n\n def setUp(self):\n self.required_keys = constants.IMPORT_TABLE_STRUCTURE['required']\n self.optional_keys = constants.IMPORT_TABLE_STRUCTURE['optional']\n self.keywords = constants.IMPORT_TABLE_STRUCTURE['keywords']\n self.ticket_dict1 = {}\n self.ticket_dict1['type'] = 'add'\n self.ticket_dict1['phage_id'] = 'Trixie'\n self.ticket_dict1['description_field'] = 'product'\n self.ticket_dict1['eval_mode'] = 'final'\n self.ticket_dict1['host_genus'] = 'retrieve'\n self.ticket_dict1['cluster'] = 'retain'\n self.ticket_dict1['subcluster'] = 'A2'\n self.ticket_dict1['accession'] = 'parse'\n self.ticket_dict2 = {}\n self.ticket_dict3 = {}\n self.ticket_dict3['type'] = 'ADD'\n self.ticket_dict3['phage_id'] = 'Trixie'\n self.ticket_dict3['description_field'] = 'PRODUCT'\n self.ticket_dict3['eval_mode'] = 'FINAL'\n self.ticket_dict3['host_genus'] = 'RETRIEVE'\n self.ticket_dict3['subcluster'] = None\n self.ticket_dict3['accession'] = 'PARSE'\n self.ticket_dict3['retrieve_record'] = 'RETAIN'\n self.ticket_dict4 = {}\n self.ticket_dict4['type'] = 'ADD'\n self.ticket_dict4['phage_id'] = 'Trixie'\n <mask token>\n\n def test_modify_import_data_2(self):\n \"\"\"Verify returns False if there are extra keys.\"\"\"\n self.ticket_dict3['extra'] = 'extra'\n result = tickets.modify_import_data(self.ticket_dict3, self.\n required_keys, self.optional_keys, self.keywords)\n self.assertFalse(result)\n <mask token>\n <mask token>\n\n def test_modify_import_data_5(self):\n \"\"\"Verify returns True with completed dictionary from a\n minimal replace ticket.\"\"\"\n self.ticket_dict4['type'] = 'replace'\n self.ticket_dict4['description_field'] = 'product'\n self.ticket_dict4['eval_mode'] = 'final'\n result = tickets.modify_import_data(self.ticket_dict4, self.\n required_keys, self.optional_keys, self.keywords)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertEqual(self.ticket_dict4['host_genus'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['cluster'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['subcluster'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['annotation_author'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['retrieve_record'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['annotation_status'], 'final')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['accession'], 'retain')\n\n def test_parse_import_ticket_data_1(self):\n \"\"\"Verify ticket is generated from correct data dictionary.\"\"\"\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, 'add')\n with self.subTest():\n self.assertEqual(tkt.phage_id, 'Trixie')\n with self.subTest():\n self.assertEqual(tkt.description_field, 'product')\n with self.subTest():\n self.assertEqual(tkt.eval_mode, 'final')\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set(['host_genus']))\n with self.subTest():\n self.assertEqual(tkt.data_retain, set(['cluster']))\n with self.subTest():\n self.assertEqual(tkt.data_parse, set(['accession']))\n with self.subTest():\n self.assertEqual(tkt.data_add, set(['subcluster']))\n\n def test_parse_import_ticket_data_2(self):\n \"\"\"Verify ticket is generated from correct data dictionary with\n no data in 'retain', 'retrieve', or 'parse' sets.\"\"\"\n self.ticket_dict1['host_genus'] = 'Mycobacterium'\n self.ticket_dict1['cluster'] = 'A'\n self.ticket_dict1['subcluster'] = 'A2'\n self.ticket_dict1['accession'] = 'ABC123'\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, 'add')\n with self.subTest():\n self.assertEqual(tkt.phage_id, 'Trixie')\n with self.subTest():\n self.assertEqual(tkt.description_field, 'product')\n with self.subTest():\n self.assertEqual(tkt.eval_mode, 'final')\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set())\n with self.subTest():\n self.assertEqual(tkt.data_retain, set())\n with self.subTest():\n self.assertEqual(tkt.data_parse, set())\n with self.subTest():\n self.assertEqual(tkt.data_add, set(['subcluster', 'host_genus',\n 'cluster', 'accession']))\n\n def test_parse_import_ticket_data_3(self):\n \"\"\"Verify ticket is generated from correct data dictionary with\n no data in 'add' sets.\"\"\"\n self.ticket_dict1['host_genus'] = 'retrieve'\n self.ticket_dict1['cluster'] = 'retrieve'\n self.ticket_dict1['subcluster'] = 'retrieve'\n self.ticket_dict1['accession'] = 'retrieve'\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, 'add')\n with self.subTest():\n self.assertEqual(tkt.phage_id, 'Trixie')\n with self.subTest():\n self.assertEqual(tkt.description_field, 'product')\n with self.subTest():\n self.assertEqual(tkt.eval_mode, 'final')\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set(['subcluster',\n 'host_genus', 'cluster', 'accession']))\n with self.subTest():\n self.assertEqual(tkt.data_retain, set())\n with self.subTest():\n self.assertEqual(tkt.data_parse, set())\n with self.subTest():\n self.assertEqual(tkt.data_add, set())\n <mask token>\n\n def test_set_keywords_1(self):\n \"\"\"Verify one value is lowercased.\"\"\"\n data_dict = {'type': 'ADD', 'cluster': 'RETRIEVE', 'subcluster':\n 'NONE', 'host_genus': 'PARSE', 'retrieve_record': 'RETAIN'}\n keywords = set(['retrieve', 'retain'])\n tickets.set_keywords(data_dict, self.keywords)\n with self.subTest():\n self.assertEqual(data_dict['type'], 'ADD')\n with self.subTest():\n self.assertEqual(data_dict['cluster'], 'retrieve')\n with self.subTest():\n self.assertEqual(data_dict['subcluster'], 'none')\n with self.subTest():\n self.assertEqual(data_dict['host_genus'], 'parse')\n with self.subTest():\n self.assertEqual(data_dict['retrieve_record'], 'retain')\n\n def test_set_missing_keys_1(self):\n \"\"\"Verify one missing key is added.\"\"\"\n data_dict = {'type': 'add', 'cluster': ''}\n key_set = set(['type', 'host_genus'])\n tickets.set_missing_keys(data_dict, key_set)\n with self.subTest():\n self.assertEqual(len(data_dict.keys()), 3)\n with self.subTest():\n self.assertEqual(data_dict['host_genus'], '')\n\n def test_set_missing_keys_2(self):\n \"\"\"Verify no missing key is added.\"\"\"\n data_dict = {'type': 'add', 'cluster': ''}\n key_set = set(['type', 'cluster'])\n tickets.set_missing_keys(data_dict, key_set)\n self.assertEqual(len(data_dict.keys()), 2)\n <mask token>\n <mask token>\n <mask token>\n\n def test_construct_tickets_1(self):\n \"\"\"Verify two tickets are constructed correctly.\n The first ticket contains all required and optional fields.\n The second ticket contains all required fields.\"\"\"\n dict_list = [self.ticket_dict1, self.ticket_dict4]\n eval_data_dict = {'eval_mode': 'custom_eval_mode', 'eval_flag_dict':\n {'check_locus_tag': False}}\n list_of_tickets = tickets.construct_tickets(dict_list,\n eval_data_dict, 'function', self.required_keys, self.\n optional_keys, self.keywords)\n with self.subTest():\n self.assertEqual(len(list_of_tickets), 2)\n with self.subTest():\n self.assertEqual(list_of_tickets[0].id, 1)\n with self.subTest():\n self.assertEqual(list_of_tickets[0].eval_mode, 'final')\n with self.subTest():\n self.assertEqual(list_of_tickets[0].description_field, 'product')\n with self.subTest():\n self.assertTrue(list_of_tickets[0].eval_flags['check_locus_tag'])\n with self.subTest():\n self.assertEqual(list_of_tickets[1].id, 2)\n with self.subTest():\n self.assertEqual(list_of_tickets[1].eval_mode, 'custom_eval_mode')\n with self.subTest():\n self.assertEqual(list_of_tickets[1].description_field, 'function')\n with self.subTest():\n self.assertFalse(list_of_tickets[1].eval_flags['check_locus_tag'])\n <mask token>\n <mask token>\n\n def test_identify_duplicates_1(self):\n \"\"\"Verify no duplicates are produced.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = 'replace'\n ticket1.phage_id = 'Trixie'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = 'replace'\n ticket2.phage_id = 'L5'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)\n\n def test_identify_duplicates_2(self):\n \"\"\"Verify two tickets with 'none' duplicates\n do not generate an error.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 'none'\n ticket1.type = 'replace'\n ticket1.phage_id = 'none'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 'none'\n ticket2.type = 'replace'\n ticket2.phage_id = 'none'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)\n <mask token>\n\n def test_identify_duplicates_4(self):\n \"\"\"Verify two tickets with Primary Phage ID duplicates\n do generate an error.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = 'replace'\n ticket1.phage_id = 'Trixie'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = 'replace'\n ticket2.phage_id = 'Trixie'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)\n\n def test_identify_duplicates_6(self):\n \"\"\"Verify two tickets with multiple duplicates\n do generate multiple errors.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = 'replace'\n ticket1.phage_id = 'Trixie'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = 'replace'\n ticket2.phage_id = 'Trixie'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)\n\n\nclass TestTicketFunctions2(unittest.TestCase):\n\n def setUp(self):\n self.ticket1 = ticket.ImportTicket()\n self.ticket2 = ticket.ImportTicket()\n self.ticket1.phage_id = 'Trixie'\n self.ticket2.phage_id = 'L5'\n self.bundle1 = bundle.Bundle()\n self.bundle2 = bundle.Bundle()\n self.bundle1.ticket = self.ticket1\n self.bundle2.ticket = self.ticket2\n\n\nclass TestTicketFunctions3(unittest.TestCase):\n\n def setUp(self):\n self.data_dict = {}\n self.data_dict['host_genus'] = 'Mycobacterium smegmatis'\n self.data_dict['accession'] = 'ABC123.1'\n self.data_dict['annotation_status'] = 'final'\n self.data_dict['cluster'] = 'A'\n self.data_dict['subcluster'] = 'A2'\n self.data_dict['annotation_author'] = 1\n self.data_dict['retrieve_record'] = 1\n self.tkt1 = ticket.ImportTicket()\n self.tkt1.phage_id = 'Trixie_Draft'\n self.tkt1.data_dict = self.data_dict\n\n def test_get_genome_1(self):\n \"\"\"Verify no data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([''])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.id, 'Trixie')\n with self.subTest():\n self.assertEqual(gnm.name, 'Trixie_Draft')\n with self.subTest():\n self.assertEqual(gnm.type, 'add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.cluster, '')\n with self.subTest():\n self.assertEqual(gnm.subcluster, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_status, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_author, -1)\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, -1)\n with self.subTest():\n self.assertEqual(gnm.accession, '')\n\n def test_get_genome_2(self):\n \"\"\"Verify host_genus data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['host_genus'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, 'Mycobacterium')\n with self.subTest():\n self.assertEqual(gnm.cluster, '')\n\n def test_get_genome_3(self):\n \"\"\"Verify cluster data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['cluster'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.cluster, 'A')\n\n def test_get_genome_4(self):\n \"\"\"Verify subcluster data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['subcluster'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.subcluster, 'A2')\n\n def test_get_genome_5(self):\n \"\"\"Verify annotation_status data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['annotation_status'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_status, 'final')\n\n def test_get_genome_6(self):\n \"\"\"Verify annotation_author data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['annotation_author'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_author, 1)\n\n def test_get_genome_7(self):\n \"\"\"Verify retrieve_record data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['retrieve_record'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, 1)\n\n def test_get_genome_8(self):\n \"\"\"Verify accession data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['accession'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.accession, 'ABC123')\n\n\n<mask token>\n",
"step-4": "<mask token>\nfrom pdm_utils.classes import bundle\nfrom pdm_utils.classes import genome\nfrom pdm_utils.classes import ticket\nfrom pdm_utils.classes import eval\nfrom pdm_utils.functions import tickets\nfrom pdm_utils.constants import constants\nimport unittest\n\n\nclass TestTicketFunctions1(unittest.TestCase):\n\n def setUp(self):\n self.required_keys = constants.IMPORT_TABLE_STRUCTURE['required']\n self.optional_keys = constants.IMPORT_TABLE_STRUCTURE['optional']\n self.keywords = constants.IMPORT_TABLE_STRUCTURE['keywords']\n self.ticket_dict1 = {}\n self.ticket_dict1['type'] = 'add'\n self.ticket_dict1['phage_id'] = 'Trixie'\n self.ticket_dict1['description_field'] = 'product'\n self.ticket_dict1['eval_mode'] = 'final'\n self.ticket_dict1['host_genus'] = 'retrieve'\n self.ticket_dict1['cluster'] = 'retain'\n self.ticket_dict1['subcluster'] = 'A2'\n self.ticket_dict1['accession'] = 'parse'\n self.ticket_dict2 = {}\n self.ticket_dict3 = {}\n self.ticket_dict3['type'] = 'ADD'\n self.ticket_dict3['phage_id'] = 'Trixie'\n self.ticket_dict3['description_field'] = 'PRODUCT'\n self.ticket_dict3['eval_mode'] = 'FINAL'\n self.ticket_dict3['host_genus'] = 'RETRIEVE'\n self.ticket_dict3['subcluster'] = None\n self.ticket_dict3['accession'] = 'PARSE'\n self.ticket_dict3['retrieve_record'] = 'RETAIN'\n self.ticket_dict4 = {}\n self.ticket_dict4['type'] = 'ADD'\n self.ticket_dict4['phage_id'] = 'Trixie'\n\n def test_modify_import_data_1(self):\n \"\"\"Verify returns False if there are missing required keys.\"\"\"\n result = tickets.modify_import_data(self.ticket_dict2, self.\n required_keys, self.optional_keys, self.keywords)\n self.assertFalse(result)\n\n def test_modify_import_data_2(self):\n \"\"\"Verify returns False if there are extra keys.\"\"\"\n self.ticket_dict3['extra'] = 'extra'\n result = tickets.modify_import_data(self.ticket_dict3, self.\n required_keys, self.optional_keys, self.keywords)\n self.assertFalse(result)\n\n def test_modify_import_data_3(self):\n \"\"\"Verify returns True with keywords identified and values lowercased.\"\"\"\n result = tickets.modify_import_data(self.ticket_dict3, self.\n required_keys, self.optional_keys, self.keywords)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertEqual(self.ticket_dict3['host_genus'], 'retrieve')\n with self.subTest():\n self.assertEqual(self.ticket_dict3['retrieve_record'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict3['subcluster'], 'retrieve')\n with self.subTest():\n self.assertEqual(self.ticket_dict3['accession'], 'parse')\n with self.subTest():\n self.assertEqual(self.ticket_dict3['type'], 'add')\n with self.subTest():\n self.assertEqual(self.ticket_dict3['description_field'], 'product')\n with self.subTest():\n self.assertEqual(self.ticket_dict3['eval_mode'], 'final')\n\n def test_modify_import_data_4(self):\n \"\"\"Verify returns True with completed dictionary from a\n minimal add ticket.\"\"\"\n self.ticket_dict4['description_field'] = 'product'\n self.ticket_dict4['eval_mode'] = 'final'\n result = tickets.modify_import_data(self.ticket_dict4, self.\n required_keys, self.optional_keys, self.keywords)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertEqual(self.ticket_dict4['host_genus'], 'retrieve')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['cluster'], 'retrieve')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['subcluster'], 'retrieve')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['annotation_author'], '1')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['retrieve_record'], '1')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['annotation_status'], 'draft')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['accession'], '')\n\n def test_modify_import_data_5(self):\n \"\"\"Verify returns True with completed dictionary from a\n minimal replace ticket.\"\"\"\n self.ticket_dict4['type'] = 'replace'\n self.ticket_dict4['description_field'] = 'product'\n self.ticket_dict4['eval_mode'] = 'final'\n result = tickets.modify_import_data(self.ticket_dict4, self.\n required_keys, self.optional_keys, self.keywords)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertEqual(self.ticket_dict4['host_genus'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['cluster'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['subcluster'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['annotation_author'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['retrieve_record'], 'retain')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['annotation_status'], 'final')\n with self.subTest():\n self.assertEqual(self.ticket_dict4['accession'], 'retain')\n\n def test_parse_import_ticket_data_1(self):\n \"\"\"Verify ticket is generated from correct data dictionary.\"\"\"\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, 'add')\n with self.subTest():\n self.assertEqual(tkt.phage_id, 'Trixie')\n with self.subTest():\n self.assertEqual(tkt.description_field, 'product')\n with self.subTest():\n self.assertEqual(tkt.eval_mode, 'final')\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set(['host_genus']))\n with self.subTest():\n self.assertEqual(tkt.data_retain, set(['cluster']))\n with self.subTest():\n self.assertEqual(tkt.data_parse, set(['accession']))\n with self.subTest():\n self.assertEqual(tkt.data_add, set(['subcluster']))\n\n def test_parse_import_ticket_data_2(self):\n \"\"\"Verify ticket is generated from correct data dictionary with\n no data in 'retain', 'retrieve', or 'parse' sets.\"\"\"\n self.ticket_dict1['host_genus'] = 'Mycobacterium'\n self.ticket_dict1['cluster'] = 'A'\n self.ticket_dict1['subcluster'] = 'A2'\n self.ticket_dict1['accession'] = 'ABC123'\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, 'add')\n with self.subTest():\n self.assertEqual(tkt.phage_id, 'Trixie')\n with self.subTest():\n self.assertEqual(tkt.description_field, 'product')\n with self.subTest():\n self.assertEqual(tkt.eval_mode, 'final')\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set())\n with self.subTest():\n self.assertEqual(tkt.data_retain, set())\n with self.subTest():\n self.assertEqual(tkt.data_parse, set())\n with self.subTest():\n self.assertEqual(tkt.data_add, set(['subcluster', 'host_genus',\n 'cluster', 'accession']))\n\n def test_parse_import_ticket_data_3(self):\n \"\"\"Verify ticket is generated from correct data dictionary with\n no data in 'add' sets.\"\"\"\n self.ticket_dict1['host_genus'] = 'retrieve'\n self.ticket_dict1['cluster'] = 'retrieve'\n self.ticket_dict1['subcluster'] = 'retrieve'\n self.ticket_dict1['accession'] = 'retrieve'\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, 'add')\n with self.subTest():\n self.assertEqual(tkt.phage_id, 'Trixie')\n with self.subTest():\n self.assertEqual(tkt.description_field, 'product')\n with self.subTest():\n self.assertEqual(tkt.eval_mode, 'final')\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set(['subcluster',\n 'host_genus', 'cluster', 'accession']))\n with self.subTest():\n self.assertEqual(tkt.data_retain, set())\n with self.subTest():\n self.assertEqual(tkt.data_parse, set())\n with self.subTest():\n self.assertEqual(tkt.data_add, set())\n\n def test_set_empty_1(self):\n \"\"\"Verify one None value is set to ''.\"\"\"\n data_dict = {'type': 'add', 'cluster': None}\n tickets.set_empty(data_dict)\n with self.subTest():\n self.assertEqual(data_dict['type'], 'add')\n with self.subTest():\n self.assertEqual(data_dict['cluster'], '')\n\n def test_set_keywords_1(self):\n \"\"\"Verify one value is lowercased.\"\"\"\n data_dict = {'type': 'ADD', 'cluster': 'RETRIEVE', 'subcluster':\n 'NONE', 'host_genus': 'PARSE', 'retrieve_record': 'RETAIN'}\n keywords = set(['retrieve', 'retain'])\n tickets.set_keywords(data_dict, self.keywords)\n with self.subTest():\n self.assertEqual(data_dict['type'], 'ADD')\n with self.subTest():\n self.assertEqual(data_dict['cluster'], 'retrieve')\n with self.subTest():\n self.assertEqual(data_dict['subcluster'], 'none')\n with self.subTest():\n self.assertEqual(data_dict['host_genus'], 'parse')\n with self.subTest():\n self.assertEqual(data_dict['retrieve_record'], 'retain')\n\n def test_set_missing_keys_1(self):\n \"\"\"Verify one missing key is added.\"\"\"\n data_dict = {'type': 'add', 'cluster': ''}\n key_set = set(['type', 'host_genus'])\n tickets.set_missing_keys(data_dict, key_set)\n with self.subTest():\n self.assertEqual(len(data_dict.keys()), 3)\n with self.subTest():\n self.assertEqual(data_dict['host_genus'], '')\n\n def test_set_missing_keys_2(self):\n \"\"\"Verify no missing key is added.\"\"\"\n data_dict = {'type': 'add', 'cluster': ''}\n key_set = set(['type', 'cluster'])\n tickets.set_missing_keys(data_dict, key_set)\n self.assertEqual(len(data_dict.keys()), 2)\n\n def test_set_dict_value_1(self):\n \"\"\"Verify empty value is replaced with first value.\"\"\"\n data_dict = {'type': 'add', 'cluster': ''}\n tickets.set_dict_value(data_dict, 'cluster', 'A', 'B')\n self.assertEqual(data_dict['cluster'], 'A')\n\n def test_set_dict_value_2(self):\n \"\"\"Verify empty value is replaced with second value.\"\"\"\n data_dict = {'type': 'replace', 'cluster': ''}\n tickets.set_dict_value(data_dict, 'cluster', 'A', 'B')\n self.assertEqual(data_dict['cluster'], 'B')\n\n def test_set_dict_value_3(self):\n \"\"\"Verify non-empty value is not replaced.\"\"\"\n data_dict = {'type': 'replace', 'cluster': 'C'}\n tickets.set_dict_value(data_dict, 'cluster', 'A', 'B')\n self.assertEqual(data_dict['cluster'], 'C')\n\n def test_construct_tickets_1(self):\n \"\"\"Verify two tickets are constructed correctly.\n The first ticket contains all required and optional fields.\n The second ticket contains all required fields.\"\"\"\n dict_list = [self.ticket_dict1, self.ticket_dict4]\n eval_data_dict = {'eval_mode': 'custom_eval_mode', 'eval_flag_dict':\n {'check_locus_tag': False}}\n list_of_tickets = tickets.construct_tickets(dict_list,\n eval_data_dict, 'function', self.required_keys, self.\n optional_keys, self.keywords)\n with self.subTest():\n self.assertEqual(len(list_of_tickets), 2)\n with self.subTest():\n self.assertEqual(list_of_tickets[0].id, 1)\n with self.subTest():\n self.assertEqual(list_of_tickets[0].eval_mode, 'final')\n with self.subTest():\n self.assertEqual(list_of_tickets[0].description_field, 'product')\n with self.subTest():\n self.assertTrue(list_of_tickets[0].eval_flags['check_locus_tag'])\n with self.subTest():\n self.assertEqual(list_of_tickets[1].id, 2)\n with self.subTest():\n self.assertEqual(list_of_tickets[1].eval_mode, 'custom_eval_mode')\n with self.subTest():\n self.assertEqual(list_of_tickets[1].description_field, 'function')\n with self.subTest():\n self.assertFalse(list_of_tickets[1].eval_flags['check_locus_tag'])\n\n def test_construct_tickets_2(self):\n \"\"\"Verify one ticket is constructed correctly. The second data\n dictionary is not structured correctly.\"\"\"\n dict_list = [self.ticket_dict1, self.ticket_dict2]\n eval_data_dict = {'eval_mode': 'custom_eval_mode', 'eval_flag_dict': {}\n }\n list_of_tickets = tickets.construct_tickets(dict_list,\n eval_data_dict, 'function', self.required_keys, self.\n optional_keys, self.keywords)\n with self.subTest():\n self.assertEqual(len(list_of_tickets), 1)\n\n def test_construct_tickets_3(self):\n \"\"\"Verify four tickets constructed correctly. The first two tickets\n contain all required and optional fields. The second two tickets\n contain all required fields. Verify that each eval_flag dictionary\n is a separate object that can be modified without impacting the other\n eval_flag dictionaries.\"\"\"\n tkt_dict1 = {}\n tkt_dict1['type'] = 'add'\n tkt_dict1['phage_id'] = 'Trixie'\n tkt_dict1['description_field'] = 'product'\n tkt_dict1['eval_mode'] = 'final'\n tkt_dict2 = {}\n tkt_dict2['type'] = 'add'\n tkt_dict2['phage_id'] = 'L5'\n tkt_dict2['description_field'] = 'product'\n tkt_dict2['eval_mode'] = 'final'\n tkt_dict3 = {}\n tkt_dict3['type'] = 'add'\n tkt_dict3['phage_id'] = 'RedRock'\n tkt_dict4 = {}\n tkt_dict4['type'] = 'add'\n tkt_dict4['phage_id'] = 'Bxb1'\n dict_list = [tkt_dict1, tkt_dict2, tkt_dict3, tkt_dict4]\n eval_data_dict = {'eval_mode': 'custom_eval_mode', 'eval_flag_dict':\n {'check_locus_tag': False}}\n tkt_list = tickets.construct_tickets(dict_list, eval_data_dict,\n 'function', self.required_keys, self.optional_keys, self.keywords)\n tkt_list[0].eval_flags['check_locus_tag'] = 0\n tkt_list[1].eval_flags['check_locus_tag'] = 1\n tkt_list[2].eval_flags['check_locus_tag'] = 2\n tkt_list[3].eval_flags['check_locus_tag'] = 3\n with self.subTest():\n self.assertEqual(tkt_list[0].eval_flags['check_locus_tag'], 0)\n with self.subTest():\n self.assertEqual(tkt_list[1].eval_flags['check_locus_tag'], 1)\n with self.subTest():\n self.assertEqual(tkt_list[2].eval_flags['check_locus_tag'], 2)\n with self.subTest():\n self.assertEqual(tkt_list[3].eval_flags['check_locus_tag'], 3)\n\n def test_identify_duplicates_1(self):\n \"\"\"Verify no duplicates are produced.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = 'replace'\n ticket1.phage_id = 'Trixie'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = 'replace'\n ticket2.phage_id = 'L5'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)\n\n def test_identify_duplicates_2(self):\n \"\"\"Verify two tickets with 'none' duplicates\n do not generate an error.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 'none'\n ticket1.type = 'replace'\n ticket1.phage_id = 'none'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 'none'\n ticket2.type = 'replace'\n ticket2.phage_id = 'none'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)\n\n def test_identify_duplicates_3(self):\n \"\"\"Verify two tickets with id duplicates\n do generate an error.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = 'replace'\n ticket1.phage_id = 'L5'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = 'replace'\n ticket2.phage_id = 'Trixie'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)\n\n def test_identify_duplicates_4(self):\n \"\"\"Verify two tickets with Primary Phage ID duplicates\n do generate an error.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = 'replace'\n ticket1.phage_id = 'Trixie'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = 'replace'\n ticket2.phage_id = 'Trixie'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)\n\n def test_identify_duplicates_6(self):\n \"\"\"Verify two tickets with multiple duplicates\n do generate multiple errors.\"\"\"\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = 'replace'\n ticket1.phage_id = 'Trixie'\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = 'replace'\n ticket2.phage_id = 'Trixie'\n null_set = set(['none'])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = tickets.identify_duplicates(list_of_tickets,\n null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)\n\n\nclass TestTicketFunctions2(unittest.TestCase):\n\n def setUp(self):\n self.ticket1 = ticket.ImportTicket()\n self.ticket2 = ticket.ImportTicket()\n self.ticket1.phage_id = 'Trixie'\n self.ticket2.phage_id = 'L5'\n self.bundle1 = bundle.Bundle()\n self.bundle2 = bundle.Bundle()\n self.bundle1.ticket = self.ticket1\n self.bundle2.ticket = self.ticket2\n\n\nclass TestTicketFunctions3(unittest.TestCase):\n\n def setUp(self):\n self.data_dict = {}\n self.data_dict['host_genus'] = 'Mycobacterium smegmatis'\n self.data_dict['accession'] = 'ABC123.1'\n self.data_dict['annotation_status'] = 'final'\n self.data_dict['cluster'] = 'A'\n self.data_dict['subcluster'] = 'A2'\n self.data_dict['annotation_author'] = 1\n self.data_dict['retrieve_record'] = 1\n self.tkt1 = ticket.ImportTicket()\n self.tkt1.phage_id = 'Trixie_Draft'\n self.tkt1.data_dict = self.data_dict\n\n def test_get_genome_1(self):\n \"\"\"Verify no data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([''])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.id, 'Trixie')\n with self.subTest():\n self.assertEqual(gnm.name, 'Trixie_Draft')\n with self.subTest():\n self.assertEqual(gnm.type, 'add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.cluster, '')\n with self.subTest():\n self.assertEqual(gnm.subcluster, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_status, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_author, -1)\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, -1)\n with self.subTest():\n self.assertEqual(gnm.accession, '')\n\n def test_get_genome_2(self):\n \"\"\"Verify host_genus data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['host_genus'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, 'Mycobacterium')\n with self.subTest():\n self.assertEqual(gnm.cluster, '')\n\n def test_get_genome_3(self):\n \"\"\"Verify cluster data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['cluster'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.cluster, 'A')\n\n def test_get_genome_4(self):\n \"\"\"Verify subcluster data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['subcluster'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.subcluster, 'A2')\n\n def test_get_genome_5(self):\n \"\"\"Verify annotation_status data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['annotation_status'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_status, 'final')\n\n def test_get_genome_6(self):\n \"\"\"Verify annotation_author data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['annotation_author'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.annotation_author, 1)\n\n def test_get_genome_7(self):\n \"\"\"Verify retrieve_record data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['retrieve_record'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, 1)\n\n def test_get_genome_8(self):\n \"\"\"Verify accession data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set(['accession'])\n gnm = tickets.get_genome(self.tkt1, gnm_type='add')\n with self.subTest():\n self.assertEqual(gnm.host_genus, '')\n with self.subTest():\n self.assertEqual(gnm.accession, 'ABC123')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "\"\"\"Unit tests for misc. ticket functions.\"\"\"\n\nfrom pdm_utils.classes import bundle\nfrom pdm_utils.classes import genome\nfrom pdm_utils.classes import ticket\nfrom pdm_utils.classes import eval\nfrom pdm_utils.functions import tickets\nfrom pdm_utils.constants import constants\nimport unittest\n\n\n\n\n\nclass TestTicketFunctions1(unittest.TestCase):\n\n\n def setUp(self):\n self.required_keys = constants.IMPORT_TABLE_STRUCTURE[\"required\"]\n self.optional_keys = constants.IMPORT_TABLE_STRUCTURE[\"optional\"]\n self.keywords = constants.IMPORT_TABLE_STRUCTURE[\"keywords\"]\n\n self.ticket_dict1 = {}\n self.ticket_dict1[\"type\"] = \"add\"\n self.ticket_dict1[\"phage_id\"] = \"Trixie\"\n self.ticket_dict1[\"description_field\"] = \"product\"\n self.ticket_dict1[\"eval_mode\"] = \"final\"\n self.ticket_dict1[\"host_genus\"] = \"retrieve\"\n self.ticket_dict1[\"cluster\"] = \"retain\"\n self.ticket_dict1[\"subcluster\"] = \"A2\"\n self.ticket_dict1[\"accession\"] = \"parse\"\n\n\n self.ticket_dict2 = {}\n\n self.ticket_dict3 = {}\n self.ticket_dict3[\"type\"] = \"ADD\"\n self.ticket_dict3[\"phage_id\"] = \"Trixie\"\n self.ticket_dict3[\"description_field\"] = \"PRODUCT\"\n self.ticket_dict3[\"eval_mode\"] = \"FINAL\"\n self.ticket_dict3[\"host_genus\"] = \"RETRIEVE\"\n self.ticket_dict3[\"subcluster\"] = None\n self.ticket_dict3[\"accession\"] = \"PARSE\"\n self.ticket_dict3[\"retrieve_record\"] = \"RETAIN\"\n\n\n self.ticket_dict4 = {}\n self.ticket_dict4[\"type\"] = \"ADD\"\n self.ticket_dict4[\"phage_id\"] = \"Trixie\"\n\n\n def test_modify_import_data_1(self):\n \"\"\"Verify returns False if there are missing required keys.\"\"\"\n result = tickets.modify_import_data(self.ticket_dict2,\n self.required_keys, self.optional_keys, self.keywords)\n self.assertFalse(result)\n\n\n def test_modify_import_data_2(self):\n \"\"\"Verify returns False if there are extra keys.\"\"\"\n self.ticket_dict3[\"extra\"] = \"extra\"\n result = tickets.modify_import_data(self.ticket_dict3,\n self.required_keys, self.optional_keys, self.keywords)\n self.assertFalse(result)\n\n\n def test_modify_import_data_3(self):\n \"\"\"Verify returns True with keywords identified and values lowercased.\"\"\"\n result = tickets.modify_import_data(self.ticket_dict3,\n self.required_keys, self.optional_keys, self.keywords)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertEqual(self.ticket_dict3[\"host_genus\"], \"retrieve\")\n with self.subTest():\n self.assertEqual(self.ticket_dict3[\"retrieve_record\"], \"retain\")\n with self.subTest():\n self.assertEqual(self.ticket_dict3[\"subcluster\"], \"retrieve\")\n with self.subTest():\n self.assertEqual(self.ticket_dict3[\"accession\"], \"parse\")\n with self.subTest():\n self.assertEqual(self.ticket_dict3[\"type\"], \"add\")\n with self.subTest():\n self.assertEqual(self.ticket_dict3[\"description_field\"], \"product\")\n with self.subTest():\n self.assertEqual(self.ticket_dict3[\"eval_mode\"], \"final\")\n\n\n def test_modify_import_data_4(self):\n \"\"\"Verify returns True with completed dictionary from a\n minimal add ticket.\"\"\"\n self.ticket_dict4[\"description_field\"] = \"product\"\n self.ticket_dict4[\"eval_mode\"] = \"final\"\n result = tickets.modify_import_data(self.ticket_dict4,\n self.required_keys, self.optional_keys, self.keywords)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"host_genus\"], \"retrieve\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"cluster\"], \"retrieve\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"subcluster\"], \"retrieve\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"annotation_author\"], \"1\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"retrieve_record\"], \"1\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"annotation_status\"], \"draft\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"accession\"], \"\")\n\n\n def test_modify_import_data_5(self):\n \"\"\"Verify returns True with completed dictionary from a\n minimal replace ticket.\"\"\"\n self.ticket_dict4[\"type\"] = \"replace\"\n self.ticket_dict4[\"description_field\"] = \"product\"\n self.ticket_dict4[\"eval_mode\"] = \"final\"\n result = tickets.modify_import_data(self.ticket_dict4,\n self.required_keys, self.optional_keys, self.keywords)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"host_genus\"], \"retain\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"cluster\"], \"retain\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"subcluster\"], \"retain\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"annotation_author\"], \"retain\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"retrieve_record\"], \"retain\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"annotation_status\"], \"final\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"accession\"], \"retain\")\n\n\n\n\n def test_parse_import_ticket_data_1(self):\n \"\"\"Verify ticket is generated from correct data dictionary.\"\"\"\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, \"add\")\n with self.subTest():\n self.assertEqual(tkt.phage_id, \"Trixie\")\n with self.subTest():\n self.assertEqual(tkt.description_field, \"product\")\n with self.subTest():\n self.assertEqual(tkt.eval_mode, \"final\")\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set([\"host_genus\"]))\n with self.subTest():\n self.assertEqual(tkt.data_retain, set([\"cluster\"]))\n with self.subTest():\n self.assertEqual(tkt.data_parse, set([\"accession\"]))\n with self.subTest():\n self.assertEqual(tkt.data_add, set([\"subcluster\"]))\n\n def test_parse_import_ticket_data_2(self):\n \"\"\"Verify ticket is generated from correct data dictionary with\n no data in 'retain', 'retrieve', or 'parse' sets.\"\"\"\n self.ticket_dict1[\"host_genus\"] = \"Mycobacterium\"\n self.ticket_dict1[\"cluster\"] = \"A\"\n self.ticket_dict1[\"subcluster\"] = \"A2\"\n self.ticket_dict1[\"accession\"] = \"ABC123\"\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, \"add\")\n with self.subTest():\n self.assertEqual(tkt.phage_id, \"Trixie\")\n with self.subTest():\n self.assertEqual(tkt.description_field, \"product\")\n with self.subTest():\n self.assertEqual(tkt.eval_mode, \"final\")\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set())\n with self.subTest():\n self.assertEqual(tkt.data_retain, set())\n with self.subTest():\n self.assertEqual(tkt.data_parse, set())\n with self.subTest():\n self.assertEqual(tkt.data_add, set([\"subcluster\", \"host_genus\",\n \"cluster\", \"accession\"]))\n\n def test_parse_import_ticket_data_3(self):\n \"\"\"Verify ticket is generated from correct data dictionary with\n no data in 'add' sets.\"\"\"\n self.ticket_dict1[\"host_genus\"] = \"retrieve\"\n self.ticket_dict1[\"cluster\"] = \"retrieve\"\n self.ticket_dict1[\"subcluster\"] = \"retrieve\"\n self.ticket_dict1[\"accession\"] = \"retrieve\"\n tkt = tickets.parse_import_ticket_data(self.ticket_dict1)\n with self.subTest():\n self.assertEqual(tkt.type, \"add\")\n with self.subTest():\n self.assertEqual(tkt.phage_id, \"Trixie\")\n with self.subTest():\n self.assertEqual(tkt.description_field, \"product\")\n with self.subTest():\n self.assertEqual(tkt.eval_mode, \"final\")\n with self.subTest():\n self.assertEqual(len(tkt.data_dict.keys()), 8)\n with self.subTest():\n self.assertEqual(tkt.data_retrieve, set([\"subcluster\", \"host_genus\",\n \"cluster\", \"accession\"]))\n with self.subTest():\n self.assertEqual(tkt.data_retain, set())\n with self.subTest():\n self.assertEqual(tkt.data_parse, set())\n with self.subTest():\n self.assertEqual(tkt.data_add, set())\n\n\n\n\n def test_set_empty_1(self):\n \"\"\"Verify one None value is set to ''.\"\"\"\n data_dict = {\"type\":\"add\",\"cluster\":None}\n tickets.set_empty(data_dict)\n with self.subTest():\n self.assertEqual(data_dict[\"type\"], \"add\")\n with self.subTest():\n self.assertEqual(data_dict[\"cluster\"], \"\")\n\n\n\n\n def test_set_keywords_1(self):\n \"\"\"Verify one value is lowercased.\"\"\"\n data_dict = {\"type\":\"ADD\",\n \"cluster\":\"RETRIEVE\",\n \"subcluster\": \"NONE\",\n \"host_genus\": \"PARSE\",\n \"retrieve_record\": \"RETAIN\"}\n keywords = set([\"retrieve\", \"retain\"])\n tickets.set_keywords(data_dict, self.keywords)\n with self.subTest():\n self.assertEqual(data_dict[\"type\"], \"ADD\")\n with self.subTest():\n self.assertEqual(data_dict[\"cluster\"], \"retrieve\")\n with self.subTest():\n self.assertEqual(data_dict[\"subcluster\"], \"none\")\n with self.subTest():\n self.assertEqual(data_dict[\"host_genus\"], \"parse\")\n with self.subTest():\n self.assertEqual(data_dict[\"retrieve_record\"], \"retain\")\n\n\n\n\n def test_set_missing_keys_1(self):\n \"\"\"Verify one missing key is added.\"\"\"\n data_dict = {\"type\":\"add\", \"cluster\":\"\"}\n key_set = set([\"type\", \"host_genus\"])\n tickets.set_missing_keys(data_dict, key_set)\n with self.subTest():\n self.assertEqual(len(data_dict.keys()), 3)\n with self.subTest():\n self.assertEqual(data_dict[\"host_genus\"], \"\")\n\n def test_set_missing_keys_2(self):\n \"\"\"Verify no missing key is added.\"\"\"\n data_dict = {\"type\":\"add\", \"cluster\":\"\"}\n key_set = set([\"type\", \"cluster\"])\n tickets.set_missing_keys(data_dict, key_set)\n self.assertEqual(len(data_dict.keys()), 2)\n\n\n\n\n def test_set_dict_value_1(self):\n \"\"\"Verify empty value is replaced with first value.\"\"\"\n data_dict = {\"type\":\"add\", \"cluster\":\"\"}\n tickets.set_dict_value(data_dict, \"cluster\", \"A\", \"B\")\n self.assertEqual(data_dict[\"cluster\"], \"A\")\n\n def test_set_dict_value_2(self):\n \"\"\"Verify empty value is replaced with second value.\"\"\"\n data_dict = {\"type\":\"replace\", \"cluster\":\"\"}\n tickets.set_dict_value(data_dict, \"cluster\", \"A\", \"B\")\n self.assertEqual(data_dict[\"cluster\"], \"B\")\n\n def test_set_dict_value_3(self):\n \"\"\"Verify non-empty value is not replaced.\"\"\"\n data_dict = {\"type\":\"replace\", \"cluster\":\"C\"}\n tickets.set_dict_value(data_dict, \"cluster\", \"A\", \"B\")\n self.assertEqual(data_dict[\"cluster\"], \"C\")\n\n\n\n\n def test_construct_tickets_1(self):\n \"\"\"Verify two tickets are constructed correctly.\n The first ticket contains all required and optional fields.\n The second ticket contains all required fields.\"\"\"\n dict_list = [self.ticket_dict1, self.ticket_dict4]\n eval_data_dict = {\"eval_mode\": \"custom_eval_mode\",\n \"eval_flag_dict\": {\"check_locus_tag\": False}}\n list_of_tickets = tickets.construct_tickets(dict_list,\n eval_data_dict, \"function\", self.required_keys,\n self.optional_keys, self.keywords)\n with self.subTest():\n self.assertEqual(len(list_of_tickets), 2)\n with self.subTest():\n self.assertEqual(list_of_tickets[0].id, 1)\n with self.subTest():\n self.assertEqual(list_of_tickets[0].eval_mode, \"final\")\n with self.subTest():\n self.assertEqual(list_of_tickets[0].description_field, \"product\")\n with self.subTest():\n self.assertTrue(list_of_tickets[0].eval_flags[\"check_locus_tag\"])\n with self.subTest():\n self.assertEqual(list_of_tickets[1].id, 2)\n with self.subTest():\n self.assertEqual(list_of_tickets[1].eval_mode, \"custom_eval_mode\")\n with self.subTest():\n self.assertEqual(list_of_tickets[1].description_field, \"function\")\n with self.subTest():\n self.assertFalse(list_of_tickets[1].eval_flags[\"check_locus_tag\"])\n\n def test_construct_tickets_2(self):\n \"\"\"Verify one ticket is constructed correctly. The second data\n dictionary is not structured correctly.\"\"\"\n dict_list = [self.ticket_dict1, self.ticket_dict2]\n eval_data_dict = {\"eval_mode\": \"custom_eval_mode\",\n \"eval_flag_dict\": {}}\n list_of_tickets = tickets.construct_tickets(dict_list,\n eval_data_dict, \"function\", self.required_keys,\n self.optional_keys, self.keywords)\n with self.subTest():\n self.assertEqual(len(list_of_tickets), 1)\n\n def test_construct_tickets_3(self):\n \"\"\"Verify four tickets constructed correctly. The first two tickets\n contain all required and optional fields. The second two tickets\n contain all required fields. Verify that each eval_flag dictionary\n is a separate object that can be modified without impacting the other\n eval_flag dictionaries.\"\"\"\n\n tkt_dict1 = {}\n tkt_dict1[\"type\"] = \"add\"\n tkt_dict1[\"phage_id\"] = \"Trixie\"\n tkt_dict1[\"description_field\"] = \"product\"\n tkt_dict1[\"eval_mode\"] = \"final\"\n\n tkt_dict2 = {}\n tkt_dict2[\"type\"] = \"add\"\n tkt_dict2[\"phage_id\"] = \"L5\"\n tkt_dict2[\"description_field\"] = \"product\"\n tkt_dict2[\"eval_mode\"] = \"final\"\n\n tkt_dict3 = {}\n tkt_dict3[\"type\"] = \"add\"\n tkt_dict3[\"phage_id\"] = \"RedRock\"\n\n tkt_dict4 = {}\n tkt_dict4[\"type\"] = \"add\"\n tkt_dict4[\"phage_id\"] = \"Bxb1\"\n\n dict_list = [tkt_dict1, tkt_dict2, tkt_dict3, tkt_dict4]\n eval_data_dict = {\"eval_mode\": \"custom_eval_mode\",\n \"eval_flag_dict\": {\"check_locus_tag\": False}}\n tkt_list = tickets.construct_tickets(dict_list,\n eval_data_dict, \"function\", self.required_keys,\n self.optional_keys, self.keywords)\n\n tkt_list[0].eval_flags[\"check_locus_tag\"] = 0\n tkt_list[1].eval_flags[\"check_locus_tag\"] = 1\n tkt_list[2].eval_flags[\"check_locus_tag\"] = 2\n tkt_list[3].eval_flags[\"check_locus_tag\"] = 3\n\n with self.subTest():\n self.assertEqual(tkt_list[0].eval_flags[\"check_locus_tag\"], 0)\n with self.subTest():\n self.assertEqual(tkt_list[1].eval_flags[\"check_locus_tag\"], 1)\n with self.subTest():\n self.assertEqual(tkt_list[2].eval_flags[\"check_locus_tag\"], 2)\n with self.subTest():\n self.assertEqual(tkt_list[3].eval_flags[\"check_locus_tag\"], 3)\n\n\n\n def test_identify_duplicates_1(self):\n \"\"\"Verify no duplicates are produced.\"\"\"\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"Trixie\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = \"replace\"\n ticket2.phage_id = \"L5\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)\n\n\n def test_identify_duplicates_2(self):\n \"\"\"Verify two tickets with 'none' duplicates\n do not generate an error.\"\"\"\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = \"none\"\n ticket1.type = \"replace\"\n ticket1.phage_id = \"none\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = \"none\"\n ticket2.type = \"replace\"\n ticket2.phage_id = \"none\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)\n\n\n def test_identify_duplicates_3(self):\n \"\"\"Verify two tickets with id duplicates\n do generate an error.\"\"\"\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"L5\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = \"replace\"\n ticket2.phage_id = \"Trixie\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)\n\n\n\n def test_identify_duplicates_4(self):\n \"\"\"Verify two tickets with Primary Phage ID duplicates\n do generate an error.\"\"\"\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"Trixie\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = \"replace\"\n ticket2.phage_id = \"Trixie\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)\n\n\n def test_identify_duplicates_6(self):\n \"\"\"Verify two tickets with multiple duplicates\n do generate multiple errors.\"\"\"\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"Trixie\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = \"replace\"\n ticket2.phage_id = \"Trixie\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)\n\n\n\nclass TestTicketFunctions2(unittest.TestCase):\n\n def setUp(self):\n\n self.ticket1 = ticket.ImportTicket()\n self.ticket2 = ticket.ImportTicket()\n\n self.ticket1.phage_id = \"Trixie\"\n self.ticket2.phage_id = \"L5\"\n\n self.bundle1 = bundle.Bundle()\n self.bundle2 = bundle.Bundle()\n\n self.bundle1.ticket = self.ticket1\n self.bundle2.ticket = self.ticket2\n\n\n\n\nclass TestTicketFunctions3(unittest.TestCase):\n\n def setUp(self):\n self.data_dict = {}\n self.data_dict[\"host_genus\"] = \"Mycobacterium smegmatis\"\n self.data_dict[\"accession\"] = \"ABC123.1\"\n self.data_dict[\"annotation_status\"] = \"final\"\n self.data_dict[\"cluster\"] = \"A\"\n self.data_dict[\"subcluster\"] = \"A2\"\n self.data_dict[\"annotation_author\"] = 1\n self.data_dict[\"retrieve_record\"] = 1\n self.tkt1 = ticket.ImportTicket()\n self.tkt1.phage_id = \"Trixie_Draft\"\n self.tkt1.data_dict = self.data_dict\n\n def test_get_genome_1(self):\n \"\"\"Verify no data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([\"\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.id, \"Trixie\")\n with self.subTest():\n self.assertEqual(gnm.name, \"Trixie_Draft\")\n with self.subTest():\n self.assertEqual(gnm.type, \"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.cluster, \"\")\n with self.subTest():\n self.assertEqual(gnm.subcluster, \"\")\n with self.subTest():\n self.assertEqual(gnm.annotation_status, \"\")\n with self.subTest():\n self.assertEqual(gnm.annotation_author, -1)\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, -1)\n with self.subTest():\n self.assertEqual(gnm.accession, \"\")\n\n def test_get_genome_2(self):\n \"\"\"Verify host_genus data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([\"host_genus\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"Mycobacterium\")\n with self.subTest():\n self.assertEqual(gnm.cluster, \"\")\n\n def test_get_genome_3(self):\n \"\"\"Verify cluster data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([\"cluster\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.cluster, \"A\")\n\n def test_get_genome_4(self):\n \"\"\"Verify subcluster data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([\"subcluster\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.subcluster, \"A2\")\n\n def test_get_genome_5(self):\n \"\"\"Verify annotation_status data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([\"annotation_status\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.annotation_status, \"final\")\n\n def test_get_genome_6(self):\n \"\"\"Verify annotation_author data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([\"annotation_author\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.annotation_author, 1)\n\n def test_get_genome_7(self):\n \"\"\"Verify retrieve_record data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([\"retrieve_record\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, 1)\n\n def test_get_genome_8(self):\n \"\"\"Verify accession data from ticket is added to genome.\"\"\"\n self.tkt1.data_add = set([\"accession\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.accession, \"ABC123\")\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
22,
24,
27,
39,
40
]
}
|
[
22,
24,
27,
39,
40
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def dir_slash():
slash = '/'
if 'win' in sys.platform:
slash = '\\'
return slash
<|reserved_special_token_1|>
import sys
def dir_slash():
slash = '/'
if 'win' in sys.platform:
slash = '\\'
return slash
|
flexible
|
{
"blob_id": "b12c8d0cb1cd1e48df6246fe3f16467b2db296e0",
"index": 745,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef dir_slash():\n slash = '/'\n if 'win' in sys.platform:\n slash = '\\\\'\n return slash\n",
"step-3": "import sys\n\n\ndef dir_slash():\n slash = '/'\n if 'win' in sys.platform:\n slash = '\\\\'\n return slash\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/python
##
# @file
# This file is part of SeisSol.
#
# @author Sebastian Rettenberger (rettenbs AT in.tum.de, http://www5.in.tum.de/wiki/index.php/Sebastian_Rettenberger,_M.Sc.)
#
# @section LICENSE
# Copyright (c) 2013, SeisSol Group
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import metis
import subprocess
METIS_MESH = 'metis.mesh'
METIS_GRAPH = 'metis.graph'
class Partitioner:
"""Converts a mesh into graph and partitions it using metis"""
def __init__(self, mesh, partitions, tmpdir):
metisMesh = tmpdir.path(METIS_MESH)
# Write metis mesh
metis.MeshWriter(metisMesh, mesh.elements())
# Convert to graph
metisGraph = tmpdir.path(METIS_GRAPH)
p = subprocess.Popen(['m2gmetis', '-ncommon=3', metisMesh, metisGraph],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, errmsg = p.communicate()
if p.returncode:
raise Exception(errmsg.strip())
# Run metis
p = subprocess.Popen(['gpmetis', '-ptype=rb', metisGraph, str(partitions)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, errmsg = p.communicate()
if p.returncode:
raise Exception(errmsg.strip())
# Read partitions
self.__partition = metis.PartitionReader(metisGraph+'.part.'+str(partitions),
partitions, len(mesh.elements()))
if self.__partition.size() != len(mesh.elements()):
raise Exception('Mesh size and partition size do not match: mesh size = '
+str(len(mesh.elements()))+' != partition size = '+str(self.__partition.size()))
def partition(self):
return self.__partition
|
normal
|
{
"blob_id": "91e1ac12ba99a8efd8f7f26310244d83bdd4aa52",
"index": 2510,
"step-1": "<mask token>\n\n\nclass Partitioner:\n <mask token>\n\n def __init__(self, mesh, partitions, tmpdir):\n metisMesh = tmpdir.path(METIS_MESH)\n metis.MeshWriter(metisMesh, mesh.elements())\n metisGraph = tmpdir.path(METIS_GRAPH)\n p = subprocess.Popen(['m2gmetis', '-ncommon=3', metisMesh,\n metisGraph], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n _, errmsg = p.communicate()\n if p.returncode:\n raise Exception(errmsg.strip())\n p = subprocess.Popen(['gpmetis', '-ptype=rb', metisGraph, str(\n partitions)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n _, errmsg = p.communicate()\n if p.returncode:\n raise Exception(errmsg.strip())\n self.__partition = metis.PartitionReader(metisGraph + '.part.' +\n str(partitions), partitions, len(mesh.elements()))\n if self.__partition.size() != len(mesh.elements()):\n raise Exception(\n 'Mesh size and partition size do not match: mesh size = ' +\n str(len(mesh.elements())) + ' != partition size = ' + str(\n self.__partition.size()))\n\n def partition(self):\n return self.__partition\n",
"step-2": "<mask token>\n\n\nclass Partitioner:\n \"\"\"Converts a mesh into graph and partitions it using metis\"\"\"\n\n def __init__(self, mesh, partitions, tmpdir):\n metisMesh = tmpdir.path(METIS_MESH)\n metis.MeshWriter(metisMesh, mesh.elements())\n metisGraph = tmpdir.path(METIS_GRAPH)\n p = subprocess.Popen(['m2gmetis', '-ncommon=3', metisMesh,\n metisGraph], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n _, errmsg = p.communicate()\n if p.returncode:\n raise Exception(errmsg.strip())\n p = subprocess.Popen(['gpmetis', '-ptype=rb', metisGraph, str(\n partitions)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n _, errmsg = p.communicate()\n if p.returncode:\n raise Exception(errmsg.strip())\n self.__partition = metis.PartitionReader(metisGraph + '.part.' +\n str(partitions), partitions, len(mesh.elements()))\n if self.__partition.size() != len(mesh.elements()):\n raise Exception(\n 'Mesh size and partition size do not match: mesh size = ' +\n str(len(mesh.elements())) + ' != partition size = ' + str(\n self.__partition.size()))\n\n def partition(self):\n return self.__partition\n",
"step-3": "<mask token>\nMETIS_MESH = 'metis.mesh'\nMETIS_GRAPH = 'metis.graph'\n\n\nclass Partitioner:\n \"\"\"Converts a mesh into graph and partitions it using metis\"\"\"\n\n def __init__(self, mesh, partitions, tmpdir):\n metisMesh = tmpdir.path(METIS_MESH)\n metis.MeshWriter(metisMesh, mesh.elements())\n metisGraph = tmpdir.path(METIS_GRAPH)\n p = subprocess.Popen(['m2gmetis', '-ncommon=3', metisMesh,\n metisGraph], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n _, errmsg = p.communicate()\n if p.returncode:\n raise Exception(errmsg.strip())\n p = subprocess.Popen(['gpmetis', '-ptype=rb', metisGraph, str(\n partitions)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n _, errmsg = p.communicate()\n if p.returncode:\n raise Exception(errmsg.strip())\n self.__partition = metis.PartitionReader(metisGraph + '.part.' +\n str(partitions), partitions, len(mesh.elements()))\n if self.__partition.size() != len(mesh.elements()):\n raise Exception(\n 'Mesh size and partition size do not match: mesh size = ' +\n str(len(mesh.elements())) + ' != partition size = ' + str(\n self.__partition.size()))\n\n def partition(self):\n return self.__partition\n",
"step-4": "import metis\nimport subprocess\nMETIS_MESH = 'metis.mesh'\nMETIS_GRAPH = 'metis.graph'\n\n\nclass Partitioner:\n \"\"\"Converts a mesh into graph and partitions it using metis\"\"\"\n\n def __init__(self, mesh, partitions, tmpdir):\n metisMesh = tmpdir.path(METIS_MESH)\n metis.MeshWriter(metisMesh, mesh.elements())\n metisGraph = tmpdir.path(METIS_GRAPH)\n p = subprocess.Popen(['m2gmetis', '-ncommon=3', metisMesh,\n metisGraph], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n _, errmsg = p.communicate()\n if p.returncode:\n raise Exception(errmsg.strip())\n p = subprocess.Popen(['gpmetis', '-ptype=rb', metisGraph, str(\n partitions)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n _, errmsg = p.communicate()\n if p.returncode:\n raise Exception(errmsg.strip())\n self.__partition = metis.PartitionReader(metisGraph + '.part.' +\n str(partitions), partitions, len(mesh.elements()))\n if self.__partition.size() != len(mesh.elements()):\n raise Exception(\n 'Mesh size and partition size do not match: mesh size = ' +\n str(len(mesh.elements())) + ' != partition size = ' + str(\n self.__partition.size()))\n\n def partition(self):\n return self.__partition\n",
"step-5": "#!/usr/bin/python\n##\n# @file\n# This file is part of SeisSol.\n#\n# @author Sebastian Rettenberger (rettenbs AT in.tum.de, http://www5.in.tum.de/wiki/index.php/Sebastian_Rettenberger,_M.Sc.)\n#\n# @section LICENSE\n# Copyright (c) 2013, SeisSol Group\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# \n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# \n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# \n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from this\n# software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport metis\n\nimport subprocess\n\nMETIS_MESH = 'metis.mesh'\nMETIS_GRAPH = 'metis.graph'\n\nclass Partitioner:\n \"\"\"Converts a mesh into graph and partitions it using metis\"\"\"\n \n def __init__(self, mesh, partitions, tmpdir):\n metisMesh = tmpdir.path(METIS_MESH)\n \n # Write metis mesh\n metis.MeshWriter(metisMesh, mesh.elements())\n \n # Convert to graph\n metisGraph = tmpdir.path(METIS_GRAPH)\n p = subprocess.Popen(['m2gmetis', '-ncommon=3', metisMesh, metisGraph],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n _, errmsg = p.communicate()\n if p.returncode:\n raise Exception(errmsg.strip())\n \n # Run metis\n p = subprocess.Popen(['gpmetis', '-ptype=rb', metisGraph, str(partitions)],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n _, errmsg = p.communicate()\n if p.returncode:\n raise Exception(errmsg.strip())\n \n # Read partitions\n self.__partition = metis.PartitionReader(metisGraph+'.part.'+str(partitions),\n partitions, len(mesh.elements()))\n \n if self.__partition.size() != len(mesh.elements()):\n raise Exception('Mesh size and partition size do not match: mesh size = '\n +str(len(mesh.elements()))+' != partition size = '+str(self.__partition.size()))\n \n def partition(self):\n return self.__partition\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 01:03:35 2020
@author: Jordan
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import date
## from COVID19_Simple import *
from COVID19_Diff import calc_diff_country
### Dash Stuff ###
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.graph_objects as go
import math
### Initial Code Block; Set Up Data ###
urls = ['https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv',
'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv',
'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv']
### Base Country Data (and Transformations)
final_df = pd.read_csv('C:/Users/Jordan/Documents/COVID19/final_df.csv')
final_df = calc_diff_country(final_df)
final_df['Date'] = pd.to_datetime(final_df['Date'])
final_df['Country_Region'] = final_df['Country_Region'].astype(str)
### 1000 Cases, 10 Deaths, 10 Recovered ### (Global)
## 1000 Cases ##
cases_1000_start = final_df.loc[(final_df['Confirmed'] >= 1000) & (final_df['Country_Region'] != 'Cruise Ship')].groupby(['Country_Region']).min()['Date']
cases_1000_start = cases_1000_start.reset_index()
cases_1000_start = cases_1000_start.rename(columns={"Date":"Start_Date"})
final_df['Country_Region'] = final_df['Country_Region'].str.strip()
cases_1000_start = pd.merge(cases_1000_start,final_df, on = ['Country_Region'],how='right')
cases_1000_start['Start_Date'] = pd.to_datetime(cases_1000_start['Start_Date'])
cases_1000_start['Date'] = pd.to_datetime(cases_1000_start['Date'])
cases_1000_start = cases_1000_start[cases_1000_start['Start_Date'].notna()]
cases_1000_start['Days Since 1000 Cases'] = (cases_1000_start['Date'] - cases_1000_start['Start_Date']).dt.days
## 100 Deaths ##
deaths_100_start = final_df.loc[(final_df['Deaths'] >= 100) & (final_df['Country_Region'] != 'Cruise Ship')].groupby(['Country_Region']).min()['Date']
deaths_100_start = deaths_100_start.reset_index()
deaths_100_start = deaths_100_start.rename(columns={"Date":"Start_Date"})
final_df['Country_Region'] = final_df['Country_Region'].str.strip()
deaths_100_start = pd.merge(deaths_100_start,final_df, on = ['Country_Region'],how='right')
deaths_100_start['Start_Date'] = pd.to_datetime(deaths_100_start['Start_Date'])
deaths_100_start['Date'] = pd.to_datetime(deaths_100_start['Date'])
deaths_100_start = deaths_100_start[deaths_100_start['Start_Date'].notna()]
deaths_100_start['Days Since 100 Deaths'] = (deaths_100_start['Date'] - deaths_100_start['Start_Date']).dt.days
## Mortality Ratios ##
mort = final_df.groupby(['Country_Region'])['Date'].max().reset_index()
mort = pd.merge(mort, final_df, on=['Country_Region', 'Date'], how='left')
mort['Mortality_Percent'] = (mort['Deaths'] / mort['Confirmed'])*100.00
colors_dict_global = {'Europe':'#1D6996','Asia':'#CC503E','Africa':'#94346E', 'North America':'#38A6A5', 'Middle East': '#EDAD08', 'South America':'#E17C05', 'Caribbean & Central America':'#0F8554', 'Oceania':'#73AF48'}
### Dash Portion of the Script ###
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server=app.server
app.layout = html.Div(children=[
html.H2(children='COVID-19 Dashboard'),
html.H4(children='A Basic Dashboard to Help Track the COVID-19 Pandemic'),
html.Br(),
html.H5(children='Global View'),
html.P(children='The Global View highlights how Covid-19 is affecting countries across the world, and how the pandemic is expanding on a country by country basis. The Global View includes the following:'),
html.Div([html.Ul([html.Li([html.B('Cumulative Cases by Country Since First 1000 Cases: '),'This allows us to see how cases are spreading since the first 1000 Cases on a country by country basis']),
html.Li([html.B('Cumulative Cases by Country Since First 100 Deaths: '),'This allows us to see COVID-19 fatalities since the first 100 Deaths on a country by country basis']),
html.Li([html.B('Observed Case - Mortality Ratio (Top 20 Countries by Confirmed Cases): '), 'This allows us to see the percentage of COVID19 fatalities based on reported cases and deaths. (Note that reporting standards vary from country to country, so this is for illustrative purposes only)']),
html.Li([html.B('Recoveries vs. Deaths By Country (Countries with over 100 deaths and 100 recoveries: '), 'This plots Recoveries against Deaths on a country by country basis. (Note that reporting standards vary from country to country, so this is for illustrative purposes only)'])])], style={'font-size': 12}),
html.Br(),
dcc.Dropdown(id='global-dropdown', options=[{'label':y, 'value':y} for y in ['Global Cases Trend', 'Global Deaths Trend', '% Mortality by Confirmed Cases (Top 20 Countries)','Recoveries vs. Deaths By Country']], placeholder = 'Pick Graphs From Here...'),
dcc.Graph(id='global-box-1'),
html.Br(),
html.H5(children='Country View'),
html.P('The Country view allows us to see a closer look on how the COVID-19 Pandemic has expanded. As opposed to a high level aggregation, the Country View provides a day by day time series analysis of the effects of COVID-19. The Country View includes the following:'),
html.Div(style={'font-size': 12}, children=[html.Ul([html.Li([html.B('Confirmed: '), 'Cumulative Confirmed Cases of COVID-19 since January 22nd, 2020']),
html.Li([html.B('Recovered: '), 'Cumulative Recovered Cases of COVID-19 since January 22nd, 2020']),
html.Li([html.B('Deaths: '),'Cumulative Deaths from COVID-19 since January 22nd, 2020']),
html.Li([html.B('Total and Daily Confirmed Cases: '), 'Cumulative and Daily Cases Since January 22nd, 2020. This illustrates the curve of daily cases in relation to the total cases for a country'])])]),
dcc.Dropdown(id='main-dropdown', options=[{'label': x, 'value': x} for x in list(final_df.Country_Region.unique())], placeholder = 'Pick a Country From Here...'),
dcc.Dropdown(id='main-dropdown-2', placeholder = 'Pick Graphs From Here...'),
dcc.Graph(id='box-1'),
html.Div([html.Div([html.H6(children='Most Recent New Cases'), html.H1(id='btext1'), dcc.Graph(id='subplot1')], className = 'four columns', style={'color': '#648FFF'}),
html.Div([html.H6(children='Most Recent Daily Deaths'), html.H1(id='btext2'), dcc.Graph(id='subplot2')], className = 'four columns', style={'color': '#DC267F'}),
html.Div([html.H6(children='Most Recent Daily Recovered'), html.H1(id='btext3'), dcc.Graph(id='subplot3')], className = 'four columns', style={'color': '#009E73', 'layout':'right'})], className="row")
])
## Callback Functionality ##
@app.callback(
Output(component_id='global-box-1', component_property='figure'),
[Input(component_id='global-dropdown', component_property='value')])
def global_update(select_global):
if select_global == 'Global Cases Trend' or select_global is None:
fig1000 = []
anno = []
for group, dataframe in cases_1000_start.groupby(by='Country_Region'):
di = dataframe.sort_values(by=['Days Since 1000 Cases'])
trace = go.Scatter(x=di['Days Since 1000 Cases'].tolist(),
y=di['Confirmed'].tolist(),
mode='lines',
line=dict(color=colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=1),
opacity=0.6,
text= di.Country_Region.tolist(),
legendgroup=list(di.loc[:, 'Continent'])[0],
hovertemplate='<b>%{text}</b><br>'+'<br>Confirmed Cases: %{y}<br>'+'Days Since First 1000 Cases: %{x}<br>',
showlegend=False)
a = {'x': int(di['Days Since 1000 Cases'].max()+1.5),
'y':np.log10(int(di['Confirmed'].max())),
'xref':'x', 'yref':'y',
'showarrow':False,
'text':list(di.loc[:, 'Country_Region'])[0],
'xanchor':'right',
'yanchor':'middle',
'align':'center',
'font':{'size':8, 'color':'black'},
'bordercolor':"#ffffff",
'borderwidth':1,
'borderpad':1,
'bgcolor':"#ffffff",
'opacity':0.6}
fig1000.append(trace)
anno.append(a)
fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start['Days Since 1000 Cases'].max())),
y = [1000 * (math.exp(0.2310491 * i)) for i in list(np.arange(cases_1000_start['Days Since 1000 Cases'].max()))],
name='Cases Double Every 3 Days',
mode='lines',
opacity=.25,
line = dict(color='grey', width=3, dash='dash'),
text=['# of Cases Double Every 3 Days'],
hovertemplate='<b>Cases Double Every 3 Days</b>',
showlegend=True))
fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start['Days Since 1000 Cases'].max())),
y = [1000 * (math.exp(0.099021 * i)) for i in list(np.arange(cases_1000_start['Days Since 1000 Cases'].max()))],
name='Cases Double Every 7 Days',
mode='lines',
opacity=.25,
line = dict(color='grey', width=3, dash='dot'),
text=['# of Cases Double Every 7 Days'],
hovertemplate='<b>Cases Double Every 7 Days</b>',
showlegend=True))
layout_global = go.Layout(yaxis={'title':'Number of Confirmed Cases', 'range':[np.log10(1000), np.log10(cases_1000_start['Confirmed'].max() * 1.10)], 'type':'log', 'fixedrange':True, 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'dtick': 1, 'showline':True, 'mirror':False},
title='Overall Confirmed Cases',
xaxis={'title': 'Days Since First 1000 Cases', 'range': [0, cases_1000_start['Days Since 1000 Cases'].max()], 'fixedrange':True, 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'showline':True, 'mirror':False}, height=750, hovermode='closest', annotations=anno)
fig_global={'data':fig1000, 'layout': layout_global}
return fig_global
elif select_global == 'Global Deaths Trend':
fig100 = []
anno = []
for group, dataframe in deaths_100_start.groupby(by='Country_Region'):
di = dataframe.sort_values(by=['Days Since 100 Deaths'])
trace = go.Scatter(x=di['Days Since 100 Deaths'].tolist(),
y=di['Deaths'].tolist(),
mode='lines',
line=dict(color=colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=1),
opacity=0.6,
text= di.Country_Region.tolist(),
legendgroup=list(di.loc[:, 'Continent'])[0],
hovertemplate='<b>%{text}</b><br>'+'<br>Deaths: %{y}<br>'+'Days Since First 1000 Cases: %{x}<br>',
showlegend=False)
a={'x': int(di['Days Since 100 Deaths'].max()+1.5),
'y':np.log10(int(di['Deaths'].max())),
'xref':'x', 'yref':'y',
'showarrow':False,
'text':list(di.loc[:, 'Country_Region'])[0],
'xanchor':'right',
'yanchor':'middle',
'align':'center',
'font':{'size':8, 'color':'black'},
'bordercolor':"#ffffff",
'borderwidth':1,
'borderpad':1,
'bgcolor':"#ffffff",
'opacity':0.6}
fig100.append(trace)
anno.append(a)
fig100.append(go.Scatter(x=list(np.arange(deaths_100_start['Days Since 100 Deaths'].max())),
y = [100 * (math.exp(0.2310491 * i)) for i in list(np.arange(deaths_100_start['Days Since 100 Deaths'].max()))],
name='Deaths Double Every 3 Days',
mode='lines',
opacity=.25,
line = dict(color='grey', width=3, dash='dash'),
text=['# of Deaths Double Every 3 Days'],
hovertemplate='<b>Deaths Double Every 3 Days</b>',
showlegend=True))
fig100.append(go.Scatter(x=list(np.arange(deaths_100_start['Days Since 100 Deaths'].max())),
y = [100 * (math.exp(0.099021 * i)) for i in list(np.arange(deaths_100_start['Days Since 100 Deaths'].max()))],
name='Deaths Double Every 7 Days',
mode='lines',
opacity=.25,
line = dict(color='grey', width=3, dash='dot'),
text=['# of Deaths Double Every 7 Days'],
hovertemplate='<b>Deaths Double Every 7 Days</b>',
showlegend=True))
layout_global = go.Layout(yaxis={'title':'Number of Deaths', 'range':[np.log10(100), np.log10(cases_1000_start['Deaths'].max() * 1.10)], 'type':'log', 'fixedrange':True, 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'dtick': 1, 'showline':True, 'mirror':False},
title='Overall Deaths',
xaxis={'title': 'Days Since First 100 deaths', 'range': [0, deaths_100_start['Days Since 100 Deaths'].max()], 'fixedrange':True, 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'showline':True, 'mirror':False}, height=750, hovermode='closest', annotations=anno)
fig_global={'data':fig100, 'layout': layout_global}
return fig_global
elif select_global == '% Mortality by Confirmed Cases (Top 20 Countries)':
figmort = []
anno =[]
m = mort.sort_values(by=['Confirmed'], ascending=False).head(20)
m = m.sort_values(by=['Mortality_Percent'], ascending=True).reset_index()
for i in range(len(m)):
m1 = m.loc[i, 'Country_Region']
#m1 = [str(i) for i in m1]
m2 = m.loc[i, 'Mortality_Percent']
#m2 = [str(round(i, 2)) for i in m2]
trace = go.Bar(name='Observed Case - Mortality Ratio',
x = [m2],
y= [m1],
text = [round(m.loc[i, 'Mortality_Percent'], 2)],
orientation ='h',
textposition='auto',
marker = dict(color='#FFB000', opacity=0.6, line=dict(color='rgba(255,176,0, 1)', width=1)),
hovertemplate='<b>%{y}</b><br>'+'<br>Observed Case Mortaility Pct: %{text}%<br>',
showlegend=False)
figmort.append(trace)
layout_global = go.Layout(yaxis={'title':'Country / Region','fixedrange':True, 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},
title='Observed Case - Mortality Ratio',
xaxis={'title': '% Mortality by Confirmed Cases (Top 20 Countries)', 'range': [0, m['Mortality_Percent'].max() + 2], 'fixedrange':True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=750, hovermode='closest')
fig_global={'data':figmort, 'layout': layout_global}
return fig_global
elif select_global == 'Recoveries vs. Deaths By Country':
figscat = []
rc = mort.loc[(mort['Deaths'] >= 100) & (mort['Recovered'] >=100)].reset_index()
for i in range(len(rc)):
scat = go.Scatter(
x=[rc.loc[i, 'Deaths']],
y=[rc.loc[i, 'Recovered']],
mode='markers+text',
text=[rc.loc[i, 'Country_Region']],
marker_color=(colors_dict_global[rc.loc[i, 'Continent']]),
showlegend=False,
marker=dict(size=12,line_width=1, opacity=0.75),
hovertemplate='<b>%{text}</b><br>'+'<br>Recoveries: %{y}<br>'+'Deaths: %{x}<br>',
textposition='bottom center',
textfont=dict(size=10, color='rgba(0, 0, 0, 0.6)')
)
figscat.append(scat)
figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(), 3)),
y = [i for i in list(np.linspace(100, rc['Deaths'].max(), 3))],
mode='lines',
name='Deaths = Recoveries',
opacity=.25,
line = dict(color='grey', width=1),
text=['# of Deaths = # of Recoveries'],
hovertemplate='<b># of Deaths = # of Recoveries</b>',
showlegend=True))
figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(), 3)),
y = [i*2 for i in list(np.linspace(100, rc['Deaths'].max(), 3))],
mode='lines',
name='2 Recoveries for Every Death',
opacity=.25,
line = dict(color='green', width=3, dash='dash'),
text=['2 Recoveries for Every Death'],
hovertemplate='<b>2 Recoveries for Every Death</b>',
showlegend=True))
figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(), 3)),
y = [i/2 for i in list(np.linspace(100, rc['Deaths'].max(), 3))],
mode='lines',
name='2 Deaths for Every Recovery',
opacity=.25,
line = dict(color='firebrick', width=3, dash='dash'),
text=['2 Deaths for Every Recovery'],
hovertemplate='<b>2 Deaths for Every Recovery</b>',
showlegend=True))
layout_global = go.Layout(yaxis={'title':'Number of Recoveries','fixedrange':True, 'automargin': True, 'range':[np.log10(100), np.log10(rc['Recovered'].max() * 1.10)], 'type':'log', 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'dtick': 1, 'showline':True, 'mirror':False},
title='Recoveries vs. Deaths, By Country',
xaxis={'title': 'Number of Deaths','fixedrange':True, 'range':[np.log10(100), np.log10(rc['Deaths'].max() * 1.10)], 'type':'log', 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'dtick': 1, 'showline':True, 'mirror':False}, height=750, hovermode='closest')
fig_global={'data':figscat, 'layout': layout_global}
return fig_global
@app.callback(
[Output(component_id='main-dropdown-2', component_property = 'options'),
Output(component_id='btext1', component_property='children'),
Output(component_id='subplot1', component_property = 'figure'),
Output(component_id='btext2', component_property='children'),
Output(component_id='subplot2', component_property = 'figure'),
Output(component_id='btext3', component_property='children'),
Output(component_id='subplot3', component_property = 'figure')],
[Input(component_id='main-dropdown', component_property = 'value')])
def update_country(selected_country):
if selected_country is None:
selected_country = 'Canada'
options = ['Confirmed','Recovered','Deaths', 'Total and Daily Confirmed Cases']
vals = [{'label': i, 'value': i} for i in options]
trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6),
go.Scatter(name='5 Day Moving Average', x = final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#648FFF', width = 3))]
layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},
title='Daily Confirmed Cases: {0} (Last 45 Days)'.format(selected_country),
xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))
trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),
go.Scatter(name='5 Day Moving Average', x = final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#DC267F', width = 3))]
layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},
title='Daily Deaths: {0} (Last 45 Days)'.format(selected_country),
xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))
trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6),
go.Scatter(name='5 Day Moving Average', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Recovered_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#009E73', width = 3))]
layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},
title='Daily Recovered: {0} (Last 45 Days)'.format(selected_country),
xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))
return vals,final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Confirmed_Diff'],{'data':trace_1, 'layout': layout_t1},final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Deaths_Diff'],{'data':trace_2, 'layout':layout_t2},final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Recovered_Diff'],{'data':trace_3, 'layout':layout_t3}
else:
options = ['Confirmed','Recovered','Deaths', 'Total and Daily Confirmed Cases']
vals = [{'label': i, 'value': i} for i in options]
trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6),
go.Scatter(name='5 Day Moving Average', x = final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#648FFF', width = 3))]
layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},
title='Daily Confirmed Cases: {0} (Last 45 Days)'.format(selected_country),
xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))
trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),
go.Scatter(name='5 Day Moving Average', x = final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#DC267F', width = 3))]
layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},
title='Daily Deaths: {0} (Last 45 Days)'.format(selected_country),
xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))
trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6),
go.Scatter(name='5 Day Moving Average', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Recovered_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#009E73', width = 3))]
layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},
title='Daily Recovered: {0} (Last 45 Days)'.format(selected_country),
xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))
return vals,final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Confirmed_Diff'],{'data':trace_1, 'layout': layout_t1},final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Deaths_Diff'],{'data':trace_2, 'layout':layout_t2},final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Recovered_Diff'],{'data':trace_3, 'layout':layout_t3}
@app.callback(
Output(component_id='box-1',component_property='figure'),
[Input(component_id='main-dropdown', component_property = 'value'),
Input(component_id='main-dropdown-2', component_property = 'value')])
def update_maingraph(selected_country, selected_graph):
if selected_graph is None and selected_country is None:
selected_country = 'Canada'
figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y = final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'], marker_color='#648FFF')]
figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range':[0, (final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'].max() * 1.10)], 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},
title='Overall Progression of COVID-19: {0}'.format(str(selected_country)),
hovermode='x unified', xaxis=dict(title='Date', fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black'))
return {'data':figmain_t, 'layout': figmain_l}
elif selected_graph is None and selected_country is not None:
figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y = final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'], marker_color='#648FFF')]
figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range':[0, (final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'].max() * 1.10)], 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},
title='Overall Progression of COVID-19: {0}'.format(str(selected_country)),
hovermode='x unified', xaxis=dict(title='Date', fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black'))
return {'data':figmain_t, 'layout': figmain_l}
elif selected_graph == 'Total and Daily Confirmed Cases':
figmain_t = [go.Scatter(name='Total Confirmed Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y = final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'], line=dict(color='#1A85FF', width = 1.5), mode='lines'),
go.Scatter(name='Daily Confirmed Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'], line=dict(color='#D41159', width = 3), mode='lines', fill='tozeroy')]
figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range':[0, (final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'].max() * 1.10)], 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},
title='Overall Progression of COVID-19 ({0}): {1}'.format(str(selected_country), str(selected_graph)),
hovermode='x unified', xaxis=dict(title='Date',fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black'))
return {'data':figmain_t, 'layout': figmain_l}
else:
cols_dict = {'Confirmed':'#648FFF', 'Deaths':'#DC267F', 'Recovered':'#009E73'}
figmain_t = [go.Bar(name='Total {0}'.format(selected_graph), x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y = final_df.loc[(final_df['Country_Region'] == selected_country) ,selected_graph], marker_color=cols_dict[selected_graph])]
figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range':[0, (final_df.loc[(final_df['Country_Region'] == selected_country) ,selected_graph].max() * 1.10)], 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},
title='Overall Progression of COVID-19 ({0}): {1}'.format(str(selected_country), str(selected_graph)),
hovermode='x unified', xaxis=dict(title='Date', fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black'))
return {'data':figmain_t, 'layout': figmain_l}
if __name__ == '__main__':
app.run_server()
|
normal
|
{
"blob_id": "1e02d584cde0cdf251aa36abd27b683219ef87ed",
"index": 7539,
"step-1": "<mask token>\n\n\n@app.callback(Output(component_id='global-box-1', component_property=\n 'figure'), [Input(component_id='global-dropdown', component_property=\n 'value')])\ndef global_update(select_global):\n if select_global == 'Global Cases Trend' or select_global is None:\n fig1000 = []\n anno = []\n for group, dataframe in cases_1000_start.groupby(by='Country_Region'):\n di = dataframe.sort_values(by=['Days Since 1000 Cases'])\n trace = go.Scatter(x=di['Days Since 1000 Cases'].tolist(), y=di\n ['Confirmed'].tolist(), mode='lines', line=dict(color=\n colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=\n 1), opacity=0.6, text=di.Country_Region.tolist(),\n legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate=\n '<b>%{text}</b><br>' + '<br>Confirmed Cases: %{y}<br>' +\n 'Days Since First 1000 Cases: %{x}<br>', showlegend=False)\n a = {'x': int(di['Days Since 1000 Cases'].max() + 1.5), 'y': np\n .log10(int(di['Confirmed'].max())), 'xref': 'x', 'yref':\n 'y', 'showarrow': False, 'text': list(di.loc[:,\n 'Country_Region'])[0], 'xanchor': 'right', 'yanchor':\n 'middle', 'align': 'center', 'font': {'size': 8, 'color':\n 'black'}, 'bordercolor': '#ffffff', 'borderwidth': 1,\n 'borderpad': 1, 'bgcolor': '#ffffff', 'opacity': 0.6}\n fig1000.append(trace)\n anno.append(a)\n fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.2310491 *\n i)) for i in list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max()))], name=\n 'Cases Double Every 3 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dash'), text=[\n '# of Cases Double Every 3 Days'], hovertemplate=\n '<b>Cases Double Every 3 Days</b>', showlegend=True))\n fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.099021 *\n i)) for i in list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max()))], name=\n 'Cases Double Every 7 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dot'), text=[\n '# of Cases Double Every 7 Days'], hovertemplate=\n '<b>Cases Double Every 7 Days</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title':\n 'Number of Confirmed Cases', 'range': [np.log10(1000), np.log10\n (cases_1000_start['Confirmed'].max() * 1.1)], 'type': 'log',\n 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black',\n 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': \n False}, title='Overall Confirmed Cases', xaxis={'title':\n 'Days Since First 1000 Cases', 'range': [0, cases_1000_start[\n 'Days Since 1000 Cases'].max()], 'fixedrange': True,\n 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'showline': True, 'mirror': False}, height=750, hovermode=\n 'closest', annotations=anno)\n fig_global = {'data': fig1000, 'layout': layout_global}\n return fig_global\n elif select_global == 'Global Deaths Trend':\n fig100 = []\n anno = []\n for group, dataframe in deaths_100_start.groupby(by='Country_Region'):\n di = dataframe.sort_values(by=['Days Since 100 Deaths'])\n trace = go.Scatter(x=di['Days Since 100 Deaths'].tolist(), y=di\n ['Deaths'].tolist(), mode='lines', line=dict(color=\n colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=\n 1), opacity=0.6, text=di.Country_Region.tolist(),\n legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate=\n '<b>%{text}</b><br>' + '<br>Deaths: %{y}<br>' +\n 'Days Since First 1000 Cases: %{x}<br>', showlegend=False)\n a = {'x': int(di['Days Since 100 Deaths'].max() + 1.5), 'y': np\n .log10(int(di['Deaths'].max())), 'xref': 'x', 'yref': 'y',\n 'showarrow': False, 'text': list(di.loc[:, 'Country_Region'\n ])[0], 'xanchor': 'right', 'yanchor': 'middle', 'align':\n 'center', 'font': {'size': 8, 'color': 'black'},\n 'bordercolor': '#ffffff', 'borderwidth': 1, 'borderpad': 1,\n 'bgcolor': '#ffffff', 'opacity': 0.6}\n fig100.append(trace)\n anno.append(a)\n fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.2310491 *\n i)) for i in list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max()))], name=\n 'Deaths Double Every 3 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dash'), text=[\n '# of Deaths Double Every 3 Days'], hovertemplate=\n '<b>Deaths Double Every 3 Days</b>', showlegend=True))\n fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.099021 *\n i)) for i in list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max()))], name=\n 'Deaths Double Every 7 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dot'), text=[\n '# of Deaths Double Every 7 Days'], hovertemplate=\n '<b>Deaths Double Every 7 Days</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title': 'Number of Deaths',\n 'range': [np.log10(100), np.log10(cases_1000_start['Deaths'].\n max() * 1.1)], 'type': 'log', 'fixedrange': True, 'linewidth': \n 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1,\n 'showline': True, 'mirror': False}, title='Overall Deaths',\n xaxis={'title': 'Days Since First 100 deaths', 'range': [0,\n deaths_100_start['Days Since 100 Deaths'].max()], 'fixedrange':\n True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'showline': True, 'mirror': False}, height=750, hovermode=\n 'closest', annotations=anno)\n fig_global = {'data': fig100, 'layout': layout_global}\n return fig_global\n elif select_global == '% Mortality by Confirmed Cases (Top 20 Countries)':\n figmort = []\n anno = []\n m = mort.sort_values(by=['Confirmed'], ascending=False).head(20)\n m = m.sort_values(by=['Mortality_Percent'], ascending=True\n ).reset_index()\n for i in range(len(m)):\n m1 = m.loc[i, 'Country_Region']\n m2 = m.loc[i, 'Mortality_Percent']\n trace = go.Bar(name='Observed Case - Mortality Ratio', x=[m2],\n y=[m1], text=[round(m.loc[i, 'Mortality_Percent'], 2)],\n orientation='h', textposition='auto', marker=dict(color=\n '#FFB000', opacity=0.6, line=dict(color=\n 'rgba(255,176,0, 1)', width=1)), hovertemplate=\n '<b>%{y}</b><br>' +\n '<br>Observed Case Mortaility Pct: %{text}%<br>',\n showlegend=False)\n figmort.append(trace)\n layout_global = go.Layout(yaxis={'title': 'Country / Region',\n 'fixedrange': True, 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Observed Case - Mortality Ratio', xaxis={'title':\n '% Mortality by Confirmed Cases (Top 20 Countries)', 'range': [\n 0, m['Mortality_Percent'].max() + 2], 'fixedrange': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=750, hovermode='closest')\n fig_global = {'data': figmort, 'layout': layout_global}\n return fig_global\n elif select_global == 'Recoveries vs. Deaths By Country':\n figscat = []\n rc = mort.loc[(mort['Deaths'] >= 100) & (mort['Recovered'] >= 100)\n ].reset_index()\n for i in range(len(rc)):\n scat = go.Scatter(x=[rc.loc[i, 'Deaths']], y=[rc.loc[i,\n 'Recovered']], mode='markers+text', text=[rc.loc[i,\n 'Country_Region']], marker_color=colors_dict_global[rc.loc[\n i, 'Continent']], showlegend=False, marker=dict(size=12,\n line_width=1, opacity=0.75), hovertemplate=\n '<b>%{text}</b><br>' + '<br>Recoveries: %{y}<br>' +\n 'Deaths: %{x}<br>', textposition='bottom center', textfont=\n dict(size=10, color='rgba(0, 0, 0, 0.6)'))\n figscat.append(scat)\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[i for i in list(np.linspace(100, rc['Deaths'].max(),\n 3))], mode='lines', name='Deaths = Recoveries', opacity=0.25,\n line=dict(color='grey', width=1), text=[\n '# of Deaths = # of Recoveries'], hovertemplate=\n '<b># of Deaths = # of Recoveries</b>', showlegend=True))\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[(i * 2) for i in list(np.linspace(100, rc['Deaths'].\n max(), 3))], mode='lines', name='2 Recoveries for Every Death',\n opacity=0.25, line=dict(color='green', width=3, dash='dash'),\n text=['2 Recoveries for Every Death'], hovertemplate=\n '<b>2 Recoveries for Every Death</b>', showlegend=True))\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[(i / 2) for i in list(np.linspace(100, rc['Deaths'].\n max(), 3))], mode='lines', name='2 Deaths for Every Recovery',\n opacity=0.25, line=dict(color='firebrick', width=3, dash='dash'\n ), text=['2 Deaths for Every Recovery'], hovertemplate=\n '<b>2 Deaths for Every Recovery</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title': 'Number of Recoveries',\n 'fixedrange': True, 'automargin': True, 'range': [np.log10(100),\n np.log10(rc['Recovered'].max() * 1.1)], 'type': 'log',\n 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'dtick': 1, 'showline': True, 'mirror': False}, title=\n 'Recoveries vs. Deaths, By Country', xaxis={'title':\n 'Number of Deaths', 'fixedrange': True, 'range': [np.log10(100),\n np.log10(rc['Deaths'].max() * 1.1)], 'type': 'log', 'linewidth':\n 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1,\n 'showline': True, 'mirror': False}, height=750, hovermode='closest'\n )\n fig_global = {'data': figscat, 'layout': layout_global}\n return fig_global\n\n\n@app.callback([Output(component_id='main-dropdown-2', component_property=\n 'options'), Output(component_id='btext1', component_property='children'\n ), Output(component_id='subplot1', component_property='figure'), Output\n (component_id='btext2', component_property='children'), Output(\n component_id='subplot2', component_property='figure'), Output(\n component_id='btext3', component_property='children'), Output(\n component_id='subplot3', component_property='figure')], [Input(\n component_id='main-dropdown', component_property='value')])\ndef update_country(selected_country):\n if selected_country is None:\n selected_country = 'Canada'\n options = ['Confirmed', 'Recovered', 'Deaths',\n 'Total and Daily Confirmed Cases']\n vals = [{'label': i, 'value': i} for i in options]\n trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#648FFF', width=3))]\n layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Confirmed Cases: {0} (Last 45 Days)'.format(\n selected_country), xaxis={'type': 'date', 'automargin': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),\n go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df\n ['Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines',\n line=dict(color='#DC267F', width=3))]\n layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Recovered_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#009E73', width=3))]\n layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max\n ()) & (final_df['Country_Region'] == selected_country),\n 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1\n }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) &\n (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], {\n 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[\n 'Date'] == final_df['Date'].max()) & (final_df['Country_Region'\n ] == selected_country), 'Recovered_Diff'], {'data': trace_3,\n 'layout': layout_t3}\n else:\n options = ['Confirmed', 'Recovered', 'Deaths',\n 'Total and Daily Confirmed Cases']\n vals = [{'label': i, 'value': i} for i in options]\n trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#648FFF', width=3))]\n layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Confirmed Cases: {0} (Last 45 Days)'.format(\n selected_country), xaxis={'type': 'date', 'automargin': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),\n go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df\n ['Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines',\n line=dict(color='#DC267F', width=3))]\n layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Recovered_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#009E73', width=3))]\n layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max\n ()) & (final_df['Country_Region'] == selected_country),\n 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1\n }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) &\n (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], {\n 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[\n 'Date'] == final_df['Date'].max()) & (final_df['Country_Region'\n ] == selected_country), 'Recovered_Diff'], {'data': trace_3,\n 'layout': layout_t3}\n\n\n@app.callback(Output(component_id='box-1', component_property='figure'), [\n Input(component_id='main-dropdown', component_property='value'), Input(\n component_id='main-dropdown-2', component_property='value')])\ndef update_maingraph(selected_country, selected_graph):\n if selected_graph is None and selected_country is None:\n selected_country = 'Canada'\n figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], marker_color='#648FFF')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19: {0}'.format(str(\n selected_country)), hovermode='x unified', xaxis=dict(title=\n 'Date', fixedrange=True, automargin=True, showline=True, mirror\n =False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n elif selected_graph is None and selected_country is not None:\n figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], marker_color='#648FFF')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19: {0}'.format(str(\n selected_country)), hovermode='x unified', xaxis=dict(title=\n 'Date', fixedrange=True, automargin=True, showline=True, mirror\n =False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n elif selected_graph == 'Total and Daily Confirmed Cases':\n figmain_t = [go.Scatter(name='Total Confirmed Cases', x=final_df.\n loc[final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], line=dict(color='#1A85FF', width=1.5), mode=\n 'lines'), go.Scatter(name='Daily Confirmed Cases', x=final_df.\n loc[final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'], line=dict(color='#D41159', width=3), mode=\n 'lines', fill='tozeroy')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19 ({0}): {1}'.format(str(\n selected_country), str(selected_graph)), hovermode='x unified',\n xaxis=dict(title='Date', fixedrange=True, automargin=True,\n showline=True, mirror=False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n else:\n cols_dict = {'Confirmed': '#648FFF', 'Deaths': '#DC267F',\n 'Recovered': '#009E73'}\n figmain_t = [go.Bar(name='Total {0}'.format(selected_graph), x=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Date'], y=final_df.loc[final_df['Country_Region'] ==\n selected_country, selected_graph], marker_color=cols_dict[\n selected_graph])]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n selected_graph].max() * 1.1], 'automargin': True, 'showline': \n True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'},\n title='Overall Progression of COVID-19 ({0}): {1}'.format(str(\n selected_country), str(selected_graph)), hovermode='x unified',\n xaxis=dict(title='Date', fixedrange=True, automargin=True,\n showline=True, mirror=False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.callback(Output(component_id='global-box-1', component_property=\n 'figure'), [Input(component_id='global-dropdown', component_property=\n 'value')])\ndef global_update(select_global):\n if select_global == 'Global Cases Trend' or select_global is None:\n fig1000 = []\n anno = []\n for group, dataframe in cases_1000_start.groupby(by='Country_Region'):\n di = dataframe.sort_values(by=['Days Since 1000 Cases'])\n trace = go.Scatter(x=di['Days Since 1000 Cases'].tolist(), y=di\n ['Confirmed'].tolist(), mode='lines', line=dict(color=\n colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=\n 1), opacity=0.6, text=di.Country_Region.tolist(),\n legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate=\n '<b>%{text}</b><br>' + '<br>Confirmed Cases: %{y}<br>' +\n 'Days Since First 1000 Cases: %{x}<br>', showlegend=False)\n a = {'x': int(di['Days Since 1000 Cases'].max() + 1.5), 'y': np\n .log10(int(di['Confirmed'].max())), 'xref': 'x', 'yref':\n 'y', 'showarrow': False, 'text': list(di.loc[:,\n 'Country_Region'])[0], 'xanchor': 'right', 'yanchor':\n 'middle', 'align': 'center', 'font': {'size': 8, 'color':\n 'black'}, 'bordercolor': '#ffffff', 'borderwidth': 1,\n 'borderpad': 1, 'bgcolor': '#ffffff', 'opacity': 0.6}\n fig1000.append(trace)\n anno.append(a)\n fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.2310491 *\n i)) for i in list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max()))], name=\n 'Cases Double Every 3 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dash'), text=[\n '# of Cases Double Every 3 Days'], hovertemplate=\n '<b>Cases Double Every 3 Days</b>', showlegend=True))\n fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.099021 *\n i)) for i in list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max()))], name=\n 'Cases Double Every 7 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dot'), text=[\n '# of Cases Double Every 7 Days'], hovertemplate=\n '<b>Cases Double Every 7 Days</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title':\n 'Number of Confirmed Cases', 'range': [np.log10(1000), np.log10\n (cases_1000_start['Confirmed'].max() * 1.1)], 'type': 'log',\n 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black',\n 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': \n False}, title='Overall Confirmed Cases', xaxis={'title':\n 'Days Since First 1000 Cases', 'range': [0, cases_1000_start[\n 'Days Since 1000 Cases'].max()], 'fixedrange': True,\n 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'showline': True, 'mirror': False}, height=750, hovermode=\n 'closest', annotations=anno)\n fig_global = {'data': fig1000, 'layout': layout_global}\n return fig_global\n elif select_global == 'Global Deaths Trend':\n fig100 = []\n anno = []\n for group, dataframe in deaths_100_start.groupby(by='Country_Region'):\n di = dataframe.sort_values(by=['Days Since 100 Deaths'])\n trace = go.Scatter(x=di['Days Since 100 Deaths'].tolist(), y=di\n ['Deaths'].tolist(), mode='lines', line=dict(color=\n colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=\n 1), opacity=0.6, text=di.Country_Region.tolist(),\n legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate=\n '<b>%{text}</b><br>' + '<br>Deaths: %{y}<br>' +\n 'Days Since First 1000 Cases: %{x}<br>', showlegend=False)\n a = {'x': int(di['Days Since 100 Deaths'].max() + 1.5), 'y': np\n .log10(int(di['Deaths'].max())), 'xref': 'x', 'yref': 'y',\n 'showarrow': False, 'text': list(di.loc[:, 'Country_Region'\n ])[0], 'xanchor': 'right', 'yanchor': 'middle', 'align':\n 'center', 'font': {'size': 8, 'color': 'black'},\n 'bordercolor': '#ffffff', 'borderwidth': 1, 'borderpad': 1,\n 'bgcolor': '#ffffff', 'opacity': 0.6}\n fig100.append(trace)\n anno.append(a)\n fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.2310491 *\n i)) for i in list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max()))], name=\n 'Deaths Double Every 3 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dash'), text=[\n '# of Deaths Double Every 3 Days'], hovertemplate=\n '<b>Deaths Double Every 3 Days</b>', showlegend=True))\n fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.099021 *\n i)) for i in list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max()))], name=\n 'Deaths Double Every 7 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dot'), text=[\n '# of Deaths Double Every 7 Days'], hovertemplate=\n '<b>Deaths Double Every 7 Days</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title': 'Number of Deaths',\n 'range': [np.log10(100), np.log10(cases_1000_start['Deaths'].\n max() * 1.1)], 'type': 'log', 'fixedrange': True, 'linewidth': \n 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1,\n 'showline': True, 'mirror': False}, title='Overall Deaths',\n xaxis={'title': 'Days Since First 100 deaths', 'range': [0,\n deaths_100_start['Days Since 100 Deaths'].max()], 'fixedrange':\n True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'showline': True, 'mirror': False}, height=750, hovermode=\n 'closest', annotations=anno)\n fig_global = {'data': fig100, 'layout': layout_global}\n return fig_global\n elif select_global == '% Mortality by Confirmed Cases (Top 20 Countries)':\n figmort = []\n anno = []\n m = mort.sort_values(by=['Confirmed'], ascending=False).head(20)\n m = m.sort_values(by=['Mortality_Percent'], ascending=True\n ).reset_index()\n for i in range(len(m)):\n m1 = m.loc[i, 'Country_Region']\n m2 = m.loc[i, 'Mortality_Percent']\n trace = go.Bar(name='Observed Case - Mortality Ratio', x=[m2],\n y=[m1], text=[round(m.loc[i, 'Mortality_Percent'], 2)],\n orientation='h', textposition='auto', marker=dict(color=\n '#FFB000', opacity=0.6, line=dict(color=\n 'rgba(255,176,0, 1)', width=1)), hovertemplate=\n '<b>%{y}</b><br>' +\n '<br>Observed Case Mortaility Pct: %{text}%<br>',\n showlegend=False)\n figmort.append(trace)\n layout_global = go.Layout(yaxis={'title': 'Country / Region',\n 'fixedrange': True, 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Observed Case - Mortality Ratio', xaxis={'title':\n '% Mortality by Confirmed Cases (Top 20 Countries)', 'range': [\n 0, m['Mortality_Percent'].max() + 2], 'fixedrange': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=750, hovermode='closest')\n fig_global = {'data': figmort, 'layout': layout_global}\n return fig_global\n elif select_global == 'Recoveries vs. Deaths By Country':\n figscat = []\n rc = mort.loc[(mort['Deaths'] >= 100) & (mort['Recovered'] >= 100)\n ].reset_index()\n for i in range(len(rc)):\n scat = go.Scatter(x=[rc.loc[i, 'Deaths']], y=[rc.loc[i,\n 'Recovered']], mode='markers+text', text=[rc.loc[i,\n 'Country_Region']], marker_color=colors_dict_global[rc.loc[\n i, 'Continent']], showlegend=False, marker=dict(size=12,\n line_width=1, opacity=0.75), hovertemplate=\n '<b>%{text}</b><br>' + '<br>Recoveries: %{y}<br>' +\n 'Deaths: %{x}<br>', textposition='bottom center', textfont=\n dict(size=10, color='rgba(0, 0, 0, 0.6)'))\n figscat.append(scat)\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[i for i in list(np.linspace(100, rc['Deaths'].max(),\n 3))], mode='lines', name='Deaths = Recoveries', opacity=0.25,\n line=dict(color='grey', width=1), text=[\n '# of Deaths = # of Recoveries'], hovertemplate=\n '<b># of Deaths = # of Recoveries</b>', showlegend=True))\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[(i * 2) for i in list(np.linspace(100, rc['Deaths'].\n max(), 3))], mode='lines', name='2 Recoveries for Every Death',\n opacity=0.25, line=dict(color='green', width=3, dash='dash'),\n text=['2 Recoveries for Every Death'], hovertemplate=\n '<b>2 Recoveries for Every Death</b>', showlegend=True))\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[(i / 2) for i in list(np.linspace(100, rc['Deaths'].\n max(), 3))], mode='lines', name='2 Deaths for Every Recovery',\n opacity=0.25, line=dict(color='firebrick', width=3, dash='dash'\n ), text=['2 Deaths for Every Recovery'], hovertemplate=\n '<b>2 Deaths for Every Recovery</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title': 'Number of Recoveries',\n 'fixedrange': True, 'automargin': True, 'range': [np.log10(100),\n np.log10(rc['Recovered'].max() * 1.1)], 'type': 'log',\n 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'dtick': 1, 'showline': True, 'mirror': False}, title=\n 'Recoveries vs. Deaths, By Country', xaxis={'title':\n 'Number of Deaths', 'fixedrange': True, 'range': [np.log10(100),\n np.log10(rc['Deaths'].max() * 1.1)], 'type': 'log', 'linewidth':\n 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1,\n 'showline': True, 'mirror': False}, height=750, hovermode='closest'\n )\n fig_global = {'data': figscat, 'layout': layout_global}\n return fig_global\n\n\n@app.callback([Output(component_id='main-dropdown-2', component_property=\n 'options'), Output(component_id='btext1', component_property='children'\n ), Output(component_id='subplot1', component_property='figure'), Output\n (component_id='btext2', component_property='children'), Output(\n component_id='subplot2', component_property='figure'), Output(\n component_id='btext3', component_property='children'), Output(\n component_id='subplot3', component_property='figure')], [Input(\n component_id='main-dropdown', component_property='value')])\ndef update_country(selected_country):\n if selected_country is None:\n selected_country = 'Canada'\n options = ['Confirmed', 'Recovered', 'Deaths',\n 'Total and Daily Confirmed Cases']\n vals = [{'label': i, 'value': i} for i in options]\n trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#648FFF', width=3))]\n layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Confirmed Cases: {0} (Last 45 Days)'.format(\n selected_country), xaxis={'type': 'date', 'automargin': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),\n go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df\n ['Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines',\n line=dict(color='#DC267F', width=3))]\n layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Recovered_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#009E73', width=3))]\n layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max\n ()) & (final_df['Country_Region'] == selected_country),\n 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1\n }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) &\n (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], {\n 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[\n 'Date'] == final_df['Date'].max()) & (final_df['Country_Region'\n ] == selected_country), 'Recovered_Diff'], {'data': trace_3,\n 'layout': layout_t3}\n else:\n options = ['Confirmed', 'Recovered', 'Deaths',\n 'Total and Daily Confirmed Cases']\n vals = [{'label': i, 'value': i} for i in options]\n trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#648FFF', width=3))]\n layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Confirmed Cases: {0} (Last 45 Days)'.format(\n selected_country), xaxis={'type': 'date', 'automargin': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),\n go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df\n ['Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines',\n line=dict(color='#DC267F', width=3))]\n layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Recovered_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#009E73', width=3))]\n layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max\n ()) & (final_df['Country_Region'] == selected_country),\n 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1\n }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) &\n (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], {\n 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[\n 'Date'] == final_df['Date'].max()) & (final_df['Country_Region'\n ] == selected_country), 'Recovered_Diff'], {'data': trace_3,\n 'layout': layout_t3}\n\n\n@app.callback(Output(component_id='box-1', component_property='figure'), [\n Input(component_id='main-dropdown', component_property='value'), Input(\n component_id='main-dropdown-2', component_property='value')])\ndef update_maingraph(selected_country, selected_graph):\n if selected_graph is None and selected_country is None:\n selected_country = 'Canada'\n figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], marker_color='#648FFF')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19: {0}'.format(str(\n selected_country)), hovermode='x unified', xaxis=dict(title=\n 'Date', fixedrange=True, automargin=True, showline=True, mirror\n =False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n elif selected_graph is None and selected_country is not None:\n figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], marker_color='#648FFF')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19: {0}'.format(str(\n selected_country)), hovermode='x unified', xaxis=dict(title=\n 'Date', fixedrange=True, automargin=True, showline=True, mirror\n =False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n elif selected_graph == 'Total and Daily Confirmed Cases':\n figmain_t = [go.Scatter(name='Total Confirmed Cases', x=final_df.\n loc[final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], line=dict(color='#1A85FF', width=1.5), mode=\n 'lines'), go.Scatter(name='Daily Confirmed Cases', x=final_df.\n loc[final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'], line=dict(color='#D41159', width=3), mode=\n 'lines', fill='tozeroy')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19 ({0}): {1}'.format(str(\n selected_country), str(selected_graph)), hovermode='x unified',\n xaxis=dict(title='Date', fixedrange=True, automargin=True,\n showline=True, mirror=False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n else:\n cols_dict = {'Confirmed': '#648FFF', 'Deaths': '#DC267F',\n 'Recovered': '#009E73'}\n figmain_t = [go.Bar(name='Total {0}'.format(selected_graph), x=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Date'], y=final_df.loc[final_df['Country_Region'] ==\n selected_country, selected_graph], marker_color=cols_dict[\n selected_graph])]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n selected_graph].max() * 1.1], 'automargin': True, 'showline': \n True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'},\n title='Overall Progression of COVID-19 ({0}): {1}'.format(str(\n selected_country), str(selected_graph)), hovermode='x unified',\n xaxis=dict(title='Date', fixedrange=True, automargin=True,\n showline=True, mirror=False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n\n\nif __name__ == '__main__':\n app.run_server()\n",
"step-3": "<mask token>\nurls = [\n 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'\n ,\n 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'\n ,\n 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv'\n ]\nfinal_df = pd.read_csv('C:/Users/Jordan/Documents/COVID19/final_df.csv')\nfinal_df = calc_diff_country(final_df)\nfinal_df['Date'] = pd.to_datetime(final_df['Date'])\nfinal_df['Country_Region'] = final_df['Country_Region'].astype(str)\ncases_1000_start = final_df.loc[(final_df['Confirmed'] >= 1000) & (final_df\n ['Country_Region'] != 'Cruise Ship')].groupby(['Country_Region']).min()[\n 'Date']\ncases_1000_start = cases_1000_start.reset_index()\ncases_1000_start = cases_1000_start.rename(columns={'Date': 'Start_Date'})\nfinal_df['Country_Region'] = final_df['Country_Region'].str.strip()\ncases_1000_start = pd.merge(cases_1000_start, final_df, on=[\n 'Country_Region'], how='right')\ncases_1000_start['Start_Date'] = pd.to_datetime(cases_1000_start['Start_Date'])\ncases_1000_start['Date'] = pd.to_datetime(cases_1000_start['Date'])\ncases_1000_start = cases_1000_start[cases_1000_start['Start_Date'].notna()]\ncases_1000_start['Days Since 1000 Cases'] = (cases_1000_start['Date'] -\n cases_1000_start['Start_Date']).dt.days\ndeaths_100_start = final_df.loc[(final_df['Deaths'] >= 100) & (final_df[\n 'Country_Region'] != 'Cruise Ship')].groupby(['Country_Region']).min()[\n 'Date']\ndeaths_100_start = deaths_100_start.reset_index()\ndeaths_100_start = deaths_100_start.rename(columns={'Date': 'Start_Date'})\nfinal_df['Country_Region'] = final_df['Country_Region'].str.strip()\ndeaths_100_start = pd.merge(deaths_100_start, final_df, on=[\n 'Country_Region'], how='right')\ndeaths_100_start['Start_Date'] = pd.to_datetime(deaths_100_start['Start_Date'])\ndeaths_100_start['Date'] = pd.to_datetime(deaths_100_start['Date'])\ndeaths_100_start = deaths_100_start[deaths_100_start['Start_Date'].notna()]\ndeaths_100_start['Days Since 100 Deaths'] = (deaths_100_start['Date'] -\n deaths_100_start['Start_Date']).dt.days\nmort = final_df.groupby(['Country_Region'])['Date'].max().reset_index()\nmort = pd.merge(mort, final_df, on=['Country_Region', 'Date'], how='left')\nmort['Mortality_Percent'] = mort['Deaths'] / mort['Confirmed'] * 100.0\ncolors_dict_global = {'Europe': '#1D6996', 'Asia': '#CC503E', 'Africa':\n '#94346E', 'North America': '#38A6A5', 'Middle East': '#EDAD08',\n 'South America': '#E17C05', 'Caribbean & Central America': '#0F8554',\n 'Oceania': '#73AF48'}\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\nserver = app.server\napp.layout = html.Div(children=[html.H2(children='COVID-19 Dashboard'),\n html.H4(children=\n 'A Basic Dashboard to Help Track the COVID-19 Pandemic'), html.Br(),\n html.H5(children='Global View'), html.P(children=\n 'The Global View highlights how Covid-19 is affecting countries across the world, and how the pandemic is expanding on a country by country basis. The Global View includes the following:'\n ), html.Div([html.Ul([html.Li([html.B(\n 'Cumulative Cases by Country Since First 1000 Cases: '),\n 'This allows us to see how cases are spreading since the first 1000 Cases on a country by country basis'\n ]), html.Li([html.B(\n 'Cumulative Cases by Country Since First 100 Deaths: '),\n 'This allows us to see COVID-19 fatalities since the first 100 Deaths on a country by country basis'\n ]), html.Li([html.B(\n 'Observed Case - Mortality Ratio (Top 20 Countries by Confirmed Cases): '\n ),\n 'This allows us to see the percentage of COVID19 fatalities based on reported cases and deaths. (Note that reporting standards vary from country to country, so this is for illustrative purposes only)'\n ]), html.Li([html.B(\n 'Recoveries vs. Deaths By Country (Countries with over 100 deaths and 100 recoveries: '\n ),\n 'This plots Recoveries against Deaths on a country by country basis. (Note that reporting standards vary from country to country, so this is for illustrative purposes only)'\n ])])], style={'font-size': 12}), html.Br(), dcc.Dropdown(id=\n 'global-dropdown', options=[{'label': y, 'value': y} for y in [\n 'Global Cases Trend', 'Global Deaths Trend',\n '% Mortality by Confirmed Cases (Top 20 Countries)',\n 'Recoveries vs. Deaths By Country']], placeholder=\n 'Pick Graphs From Here...'), dcc.Graph(id='global-box-1'), html.Br(),\n html.H5(children='Country View'), html.P(\n 'The Country view allows us to see a closer look on how the COVID-19 Pandemic has expanded. As opposed to a high level aggregation, the Country View provides a day by day time series analysis of the effects of COVID-19. The Country View includes the following:'\n ), html.Div(style={'font-size': 12}, children=[html.Ul([html.Li([html.B\n ('Confirmed: '),\n 'Cumulative Confirmed Cases of COVID-19 since January 22nd, 2020']),\n html.Li([html.B('Recovered: '),\n 'Cumulative Recovered Cases of COVID-19 since January 22nd, 2020']),\n html.Li([html.B('Deaths: '),\n 'Cumulative Deaths from COVID-19 since January 22nd, 2020']), html.Li([\n html.B('Total and Daily Confirmed Cases: '),\n 'Cumulative and Daily Cases Since January 22nd, 2020. This illustrates the curve of daily cases in relation to the total cases for a country'\n ])])]), dcc.Dropdown(id='main-dropdown', options=[{'label': x, 'value':\n x} for x in list(final_df.Country_Region.unique())], placeholder=\n 'Pick a Country From Here...'), dcc.Dropdown(id='main-dropdown-2',\n placeholder='Pick Graphs From Here...'), dcc.Graph(id='box-1'), html.\n Div([html.Div([html.H6(children='Most Recent New Cases'), html.H1(id=\n 'btext1'), dcc.Graph(id='subplot1')], className='four columns', style={\n 'color': '#648FFF'}), html.Div([html.H6(children=\n 'Most Recent Daily Deaths'), html.H1(id='btext2'), dcc.Graph(id=\n 'subplot2')], className='four columns', style={'color': '#DC267F'}),\n html.Div([html.H6(children='Most Recent Daily Recovered'), html.H1(id=\n 'btext3'), dcc.Graph(id='subplot3')], className='four columns', style={\n 'color': '#009E73', 'layout': 'right'})], className='row')])\n\n\n@app.callback(Output(component_id='global-box-1', component_property=\n 'figure'), [Input(component_id='global-dropdown', component_property=\n 'value')])\ndef global_update(select_global):\n if select_global == 'Global Cases Trend' or select_global is None:\n fig1000 = []\n anno = []\n for group, dataframe in cases_1000_start.groupby(by='Country_Region'):\n di = dataframe.sort_values(by=['Days Since 1000 Cases'])\n trace = go.Scatter(x=di['Days Since 1000 Cases'].tolist(), y=di\n ['Confirmed'].tolist(), mode='lines', line=dict(color=\n colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=\n 1), opacity=0.6, text=di.Country_Region.tolist(),\n legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate=\n '<b>%{text}</b><br>' + '<br>Confirmed Cases: %{y}<br>' +\n 'Days Since First 1000 Cases: %{x}<br>', showlegend=False)\n a = {'x': int(di['Days Since 1000 Cases'].max() + 1.5), 'y': np\n .log10(int(di['Confirmed'].max())), 'xref': 'x', 'yref':\n 'y', 'showarrow': False, 'text': list(di.loc[:,\n 'Country_Region'])[0], 'xanchor': 'right', 'yanchor':\n 'middle', 'align': 'center', 'font': {'size': 8, 'color':\n 'black'}, 'bordercolor': '#ffffff', 'borderwidth': 1,\n 'borderpad': 1, 'bgcolor': '#ffffff', 'opacity': 0.6}\n fig1000.append(trace)\n anno.append(a)\n fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.2310491 *\n i)) for i in list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max()))], name=\n 'Cases Double Every 3 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dash'), text=[\n '# of Cases Double Every 3 Days'], hovertemplate=\n '<b>Cases Double Every 3 Days</b>', showlegend=True))\n fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.099021 *\n i)) for i in list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max()))], name=\n 'Cases Double Every 7 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dot'), text=[\n '# of Cases Double Every 7 Days'], hovertemplate=\n '<b>Cases Double Every 7 Days</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title':\n 'Number of Confirmed Cases', 'range': [np.log10(1000), np.log10\n (cases_1000_start['Confirmed'].max() * 1.1)], 'type': 'log',\n 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black',\n 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': \n False}, title='Overall Confirmed Cases', xaxis={'title':\n 'Days Since First 1000 Cases', 'range': [0, cases_1000_start[\n 'Days Since 1000 Cases'].max()], 'fixedrange': True,\n 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'showline': True, 'mirror': False}, height=750, hovermode=\n 'closest', annotations=anno)\n fig_global = {'data': fig1000, 'layout': layout_global}\n return fig_global\n elif select_global == 'Global Deaths Trend':\n fig100 = []\n anno = []\n for group, dataframe in deaths_100_start.groupby(by='Country_Region'):\n di = dataframe.sort_values(by=['Days Since 100 Deaths'])\n trace = go.Scatter(x=di['Days Since 100 Deaths'].tolist(), y=di\n ['Deaths'].tolist(), mode='lines', line=dict(color=\n colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=\n 1), opacity=0.6, text=di.Country_Region.tolist(),\n legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate=\n '<b>%{text}</b><br>' + '<br>Deaths: %{y}<br>' +\n 'Days Since First 1000 Cases: %{x}<br>', showlegend=False)\n a = {'x': int(di['Days Since 100 Deaths'].max() + 1.5), 'y': np\n .log10(int(di['Deaths'].max())), 'xref': 'x', 'yref': 'y',\n 'showarrow': False, 'text': list(di.loc[:, 'Country_Region'\n ])[0], 'xanchor': 'right', 'yanchor': 'middle', 'align':\n 'center', 'font': {'size': 8, 'color': 'black'},\n 'bordercolor': '#ffffff', 'borderwidth': 1, 'borderpad': 1,\n 'bgcolor': '#ffffff', 'opacity': 0.6}\n fig100.append(trace)\n anno.append(a)\n fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.2310491 *\n i)) for i in list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max()))], name=\n 'Deaths Double Every 3 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dash'), text=[\n '# of Deaths Double Every 3 Days'], hovertemplate=\n '<b>Deaths Double Every 3 Days</b>', showlegend=True))\n fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.099021 *\n i)) for i in list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max()))], name=\n 'Deaths Double Every 7 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dot'), text=[\n '# of Deaths Double Every 7 Days'], hovertemplate=\n '<b>Deaths Double Every 7 Days</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title': 'Number of Deaths',\n 'range': [np.log10(100), np.log10(cases_1000_start['Deaths'].\n max() * 1.1)], 'type': 'log', 'fixedrange': True, 'linewidth': \n 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1,\n 'showline': True, 'mirror': False}, title='Overall Deaths',\n xaxis={'title': 'Days Since First 100 deaths', 'range': [0,\n deaths_100_start['Days Since 100 Deaths'].max()], 'fixedrange':\n True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'showline': True, 'mirror': False}, height=750, hovermode=\n 'closest', annotations=anno)\n fig_global = {'data': fig100, 'layout': layout_global}\n return fig_global\n elif select_global == '% Mortality by Confirmed Cases (Top 20 Countries)':\n figmort = []\n anno = []\n m = mort.sort_values(by=['Confirmed'], ascending=False).head(20)\n m = m.sort_values(by=['Mortality_Percent'], ascending=True\n ).reset_index()\n for i in range(len(m)):\n m1 = m.loc[i, 'Country_Region']\n m2 = m.loc[i, 'Mortality_Percent']\n trace = go.Bar(name='Observed Case - Mortality Ratio', x=[m2],\n y=[m1], text=[round(m.loc[i, 'Mortality_Percent'], 2)],\n orientation='h', textposition='auto', marker=dict(color=\n '#FFB000', opacity=0.6, line=dict(color=\n 'rgba(255,176,0, 1)', width=1)), hovertemplate=\n '<b>%{y}</b><br>' +\n '<br>Observed Case Mortaility Pct: %{text}%<br>',\n showlegend=False)\n figmort.append(trace)\n layout_global = go.Layout(yaxis={'title': 'Country / Region',\n 'fixedrange': True, 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Observed Case - Mortality Ratio', xaxis={'title':\n '% Mortality by Confirmed Cases (Top 20 Countries)', 'range': [\n 0, m['Mortality_Percent'].max() + 2], 'fixedrange': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=750, hovermode='closest')\n fig_global = {'data': figmort, 'layout': layout_global}\n return fig_global\n elif select_global == 'Recoveries vs. Deaths By Country':\n figscat = []\n rc = mort.loc[(mort['Deaths'] >= 100) & (mort['Recovered'] >= 100)\n ].reset_index()\n for i in range(len(rc)):\n scat = go.Scatter(x=[rc.loc[i, 'Deaths']], y=[rc.loc[i,\n 'Recovered']], mode='markers+text', text=[rc.loc[i,\n 'Country_Region']], marker_color=colors_dict_global[rc.loc[\n i, 'Continent']], showlegend=False, marker=dict(size=12,\n line_width=1, opacity=0.75), hovertemplate=\n '<b>%{text}</b><br>' + '<br>Recoveries: %{y}<br>' +\n 'Deaths: %{x}<br>', textposition='bottom center', textfont=\n dict(size=10, color='rgba(0, 0, 0, 0.6)'))\n figscat.append(scat)\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[i for i in list(np.linspace(100, rc['Deaths'].max(),\n 3))], mode='lines', name='Deaths = Recoveries', opacity=0.25,\n line=dict(color='grey', width=1), text=[\n '# of Deaths = # of Recoveries'], hovertemplate=\n '<b># of Deaths = # of Recoveries</b>', showlegend=True))\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[(i * 2) for i in list(np.linspace(100, rc['Deaths'].\n max(), 3))], mode='lines', name='2 Recoveries for Every Death',\n opacity=0.25, line=dict(color='green', width=3, dash='dash'),\n text=['2 Recoveries for Every Death'], hovertemplate=\n '<b>2 Recoveries for Every Death</b>', showlegend=True))\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[(i / 2) for i in list(np.linspace(100, rc['Deaths'].\n max(), 3))], mode='lines', name='2 Deaths for Every Recovery',\n opacity=0.25, line=dict(color='firebrick', width=3, dash='dash'\n ), text=['2 Deaths for Every Recovery'], hovertemplate=\n '<b>2 Deaths for Every Recovery</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title': 'Number of Recoveries',\n 'fixedrange': True, 'automargin': True, 'range': [np.log10(100),\n np.log10(rc['Recovered'].max() * 1.1)], 'type': 'log',\n 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'dtick': 1, 'showline': True, 'mirror': False}, title=\n 'Recoveries vs. Deaths, By Country', xaxis={'title':\n 'Number of Deaths', 'fixedrange': True, 'range': [np.log10(100),\n np.log10(rc['Deaths'].max() * 1.1)], 'type': 'log', 'linewidth':\n 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1,\n 'showline': True, 'mirror': False}, height=750, hovermode='closest'\n )\n fig_global = {'data': figscat, 'layout': layout_global}\n return fig_global\n\n\n@app.callback([Output(component_id='main-dropdown-2', component_property=\n 'options'), Output(component_id='btext1', component_property='children'\n ), Output(component_id='subplot1', component_property='figure'), Output\n (component_id='btext2', component_property='children'), Output(\n component_id='subplot2', component_property='figure'), Output(\n component_id='btext3', component_property='children'), Output(\n component_id='subplot3', component_property='figure')], [Input(\n component_id='main-dropdown', component_property='value')])\ndef update_country(selected_country):\n if selected_country is None:\n selected_country = 'Canada'\n options = ['Confirmed', 'Recovered', 'Deaths',\n 'Total and Daily Confirmed Cases']\n vals = [{'label': i, 'value': i} for i in options]\n trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#648FFF', width=3))]\n layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Confirmed Cases: {0} (Last 45 Days)'.format(\n selected_country), xaxis={'type': 'date', 'automargin': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),\n go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df\n ['Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines',\n line=dict(color='#DC267F', width=3))]\n layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Recovered_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#009E73', width=3))]\n layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max\n ()) & (final_df['Country_Region'] == selected_country),\n 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1\n }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) &\n (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], {\n 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[\n 'Date'] == final_df['Date'].max()) & (final_df['Country_Region'\n ] == selected_country), 'Recovered_Diff'], {'data': trace_3,\n 'layout': layout_t3}\n else:\n options = ['Confirmed', 'Recovered', 'Deaths',\n 'Total and Daily Confirmed Cases']\n vals = [{'label': i, 'value': i} for i in options]\n trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#648FFF', width=3))]\n layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Confirmed Cases: {0} (Last 45 Days)'.format(\n selected_country), xaxis={'type': 'date', 'automargin': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),\n go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df\n ['Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines',\n line=dict(color='#DC267F', width=3))]\n layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Recovered_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#009E73', width=3))]\n layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max\n ()) & (final_df['Country_Region'] == selected_country),\n 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1\n }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) &\n (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], {\n 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[\n 'Date'] == final_df['Date'].max()) & (final_df['Country_Region'\n ] == selected_country), 'Recovered_Diff'], {'data': trace_3,\n 'layout': layout_t3}\n\n\n@app.callback(Output(component_id='box-1', component_property='figure'), [\n Input(component_id='main-dropdown', component_property='value'), Input(\n component_id='main-dropdown-2', component_property='value')])\ndef update_maingraph(selected_country, selected_graph):\n if selected_graph is None and selected_country is None:\n selected_country = 'Canada'\n figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], marker_color='#648FFF')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19: {0}'.format(str(\n selected_country)), hovermode='x unified', xaxis=dict(title=\n 'Date', fixedrange=True, automargin=True, showline=True, mirror\n =False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n elif selected_graph is None and selected_country is not None:\n figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], marker_color='#648FFF')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19: {0}'.format(str(\n selected_country)), hovermode='x unified', xaxis=dict(title=\n 'Date', fixedrange=True, automargin=True, showline=True, mirror\n =False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n elif selected_graph == 'Total and Daily Confirmed Cases':\n figmain_t = [go.Scatter(name='Total Confirmed Cases', x=final_df.\n loc[final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], line=dict(color='#1A85FF', width=1.5), mode=\n 'lines'), go.Scatter(name='Daily Confirmed Cases', x=final_df.\n loc[final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'], line=dict(color='#D41159', width=3), mode=\n 'lines', fill='tozeroy')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19 ({0}): {1}'.format(str(\n selected_country), str(selected_graph)), hovermode='x unified',\n xaxis=dict(title='Date', fixedrange=True, automargin=True,\n showline=True, mirror=False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n else:\n cols_dict = {'Confirmed': '#648FFF', 'Deaths': '#DC267F',\n 'Recovered': '#009E73'}\n figmain_t = [go.Bar(name='Total {0}'.format(selected_graph), x=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Date'], y=final_df.loc[final_df['Country_Region'] ==\n selected_country, selected_graph], marker_color=cols_dict[\n selected_graph])]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n selected_graph].max() * 1.1], 'automargin': True, 'showline': \n True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'},\n title='Overall Progression of COVID-19 ({0}): {1}'.format(str(\n selected_country), str(selected_graph)), hovermode='x unified',\n xaxis=dict(title='Date', fixedrange=True, automargin=True,\n showline=True, mirror=False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n\n\nif __name__ == '__main__':\n app.run_server()\n",
"step-4": "<mask token>\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom datetime import date\nfrom COVID19_Diff import calc_diff_country\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport plotly.graph_objects as go\nimport math\nurls = [\n 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'\n ,\n 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'\n ,\n 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv'\n ]\nfinal_df = pd.read_csv('C:/Users/Jordan/Documents/COVID19/final_df.csv')\nfinal_df = calc_diff_country(final_df)\nfinal_df['Date'] = pd.to_datetime(final_df['Date'])\nfinal_df['Country_Region'] = final_df['Country_Region'].astype(str)\ncases_1000_start = final_df.loc[(final_df['Confirmed'] >= 1000) & (final_df\n ['Country_Region'] != 'Cruise Ship')].groupby(['Country_Region']).min()[\n 'Date']\ncases_1000_start = cases_1000_start.reset_index()\ncases_1000_start = cases_1000_start.rename(columns={'Date': 'Start_Date'})\nfinal_df['Country_Region'] = final_df['Country_Region'].str.strip()\ncases_1000_start = pd.merge(cases_1000_start, final_df, on=[\n 'Country_Region'], how='right')\ncases_1000_start['Start_Date'] = pd.to_datetime(cases_1000_start['Start_Date'])\ncases_1000_start['Date'] = pd.to_datetime(cases_1000_start['Date'])\ncases_1000_start = cases_1000_start[cases_1000_start['Start_Date'].notna()]\ncases_1000_start['Days Since 1000 Cases'] = (cases_1000_start['Date'] -\n cases_1000_start['Start_Date']).dt.days\ndeaths_100_start = final_df.loc[(final_df['Deaths'] >= 100) & (final_df[\n 'Country_Region'] != 'Cruise Ship')].groupby(['Country_Region']).min()[\n 'Date']\ndeaths_100_start = deaths_100_start.reset_index()\ndeaths_100_start = deaths_100_start.rename(columns={'Date': 'Start_Date'})\nfinal_df['Country_Region'] = final_df['Country_Region'].str.strip()\ndeaths_100_start = pd.merge(deaths_100_start, final_df, on=[\n 'Country_Region'], how='right')\ndeaths_100_start['Start_Date'] = pd.to_datetime(deaths_100_start['Start_Date'])\ndeaths_100_start['Date'] = pd.to_datetime(deaths_100_start['Date'])\ndeaths_100_start = deaths_100_start[deaths_100_start['Start_Date'].notna()]\ndeaths_100_start['Days Since 100 Deaths'] = (deaths_100_start['Date'] -\n deaths_100_start['Start_Date']).dt.days\nmort = final_df.groupby(['Country_Region'])['Date'].max().reset_index()\nmort = pd.merge(mort, final_df, on=['Country_Region', 'Date'], how='left')\nmort['Mortality_Percent'] = mort['Deaths'] / mort['Confirmed'] * 100.0\ncolors_dict_global = {'Europe': '#1D6996', 'Asia': '#CC503E', 'Africa':\n '#94346E', 'North America': '#38A6A5', 'Middle East': '#EDAD08',\n 'South America': '#E17C05', 'Caribbean & Central America': '#0F8554',\n 'Oceania': '#73AF48'}\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\nserver = app.server\napp.layout = html.Div(children=[html.H2(children='COVID-19 Dashboard'),\n html.H4(children=\n 'A Basic Dashboard to Help Track the COVID-19 Pandemic'), html.Br(),\n html.H5(children='Global View'), html.P(children=\n 'The Global View highlights how Covid-19 is affecting countries across the world, and how the pandemic is expanding on a country by country basis. The Global View includes the following:'\n ), html.Div([html.Ul([html.Li([html.B(\n 'Cumulative Cases by Country Since First 1000 Cases: '),\n 'This allows us to see how cases are spreading since the first 1000 Cases on a country by country basis'\n ]), html.Li([html.B(\n 'Cumulative Cases by Country Since First 100 Deaths: '),\n 'This allows us to see COVID-19 fatalities since the first 100 Deaths on a country by country basis'\n ]), html.Li([html.B(\n 'Observed Case - Mortality Ratio (Top 20 Countries by Confirmed Cases): '\n ),\n 'This allows us to see the percentage of COVID19 fatalities based on reported cases and deaths. (Note that reporting standards vary from country to country, so this is for illustrative purposes only)'\n ]), html.Li([html.B(\n 'Recoveries vs. Deaths By Country (Countries with over 100 deaths and 100 recoveries: '\n ),\n 'This plots Recoveries against Deaths on a country by country basis. (Note that reporting standards vary from country to country, so this is for illustrative purposes only)'\n ])])], style={'font-size': 12}), html.Br(), dcc.Dropdown(id=\n 'global-dropdown', options=[{'label': y, 'value': y} for y in [\n 'Global Cases Trend', 'Global Deaths Trend',\n '% Mortality by Confirmed Cases (Top 20 Countries)',\n 'Recoveries vs. Deaths By Country']], placeholder=\n 'Pick Graphs From Here...'), dcc.Graph(id='global-box-1'), html.Br(),\n html.H5(children='Country View'), html.P(\n 'The Country view allows us to see a closer look on how the COVID-19 Pandemic has expanded. As opposed to a high level aggregation, the Country View provides a day by day time series analysis of the effects of COVID-19. The Country View includes the following:'\n ), html.Div(style={'font-size': 12}, children=[html.Ul([html.Li([html.B\n ('Confirmed: '),\n 'Cumulative Confirmed Cases of COVID-19 since January 22nd, 2020']),\n html.Li([html.B('Recovered: '),\n 'Cumulative Recovered Cases of COVID-19 since January 22nd, 2020']),\n html.Li([html.B('Deaths: '),\n 'Cumulative Deaths from COVID-19 since January 22nd, 2020']), html.Li([\n html.B('Total and Daily Confirmed Cases: '),\n 'Cumulative and Daily Cases Since January 22nd, 2020. This illustrates the curve of daily cases in relation to the total cases for a country'\n ])])]), dcc.Dropdown(id='main-dropdown', options=[{'label': x, 'value':\n x} for x in list(final_df.Country_Region.unique())], placeholder=\n 'Pick a Country From Here...'), dcc.Dropdown(id='main-dropdown-2',\n placeholder='Pick Graphs From Here...'), dcc.Graph(id='box-1'), html.\n Div([html.Div([html.H6(children='Most Recent New Cases'), html.H1(id=\n 'btext1'), dcc.Graph(id='subplot1')], className='four columns', style={\n 'color': '#648FFF'}), html.Div([html.H6(children=\n 'Most Recent Daily Deaths'), html.H1(id='btext2'), dcc.Graph(id=\n 'subplot2')], className='four columns', style={'color': '#DC267F'}),\n html.Div([html.H6(children='Most Recent Daily Recovered'), html.H1(id=\n 'btext3'), dcc.Graph(id='subplot3')], className='four columns', style={\n 'color': '#009E73', 'layout': 'right'})], className='row')])\n\n\n@app.callback(Output(component_id='global-box-1', component_property=\n 'figure'), [Input(component_id='global-dropdown', component_property=\n 'value')])\ndef global_update(select_global):\n if select_global == 'Global Cases Trend' or select_global is None:\n fig1000 = []\n anno = []\n for group, dataframe in cases_1000_start.groupby(by='Country_Region'):\n di = dataframe.sort_values(by=['Days Since 1000 Cases'])\n trace = go.Scatter(x=di['Days Since 1000 Cases'].tolist(), y=di\n ['Confirmed'].tolist(), mode='lines', line=dict(color=\n colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=\n 1), opacity=0.6, text=di.Country_Region.tolist(),\n legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate=\n '<b>%{text}</b><br>' + '<br>Confirmed Cases: %{y}<br>' +\n 'Days Since First 1000 Cases: %{x}<br>', showlegend=False)\n a = {'x': int(di['Days Since 1000 Cases'].max() + 1.5), 'y': np\n .log10(int(di['Confirmed'].max())), 'xref': 'x', 'yref':\n 'y', 'showarrow': False, 'text': list(di.loc[:,\n 'Country_Region'])[0], 'xanchor': 'right', 'yanchor':\n 'middle', 'align': 'center', 'font': {'size': 8, 'color':\n 'black'}, 'bordercolor': '#ffffff', 'borderwidth': 1,\n 'borderpad': 1, 'bgcolor': '#ffffff', 'opacity': 0.6}\n fig1000.append(trace)\n anno.append(a)\n fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.2310491 *\n i)) for i in list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max()))], name=\n 'Cases Double Every 3 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dash'), text=[\n '# of Cases Double Every 3 Days'], hovertemplate=\n '<b>Cases Double Every 3 Days</b>', showlegend=True))\n fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max())), y=[(1000 * math.exp(0.099021 *\n i)) for i in list(np.arange(cases_1000_start[\n 'Days Since 1000 Cases'].max()))], name=\n 'Cases Double Every 7 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dot'), text=[\n '# of Cases Double Every 7 Days'], hovertemplate=\n '<b>Cases Double Every 7 Days</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title':\n 'Number of Confirmed Cases', 'range': [np.log10(1000), np.log10\n (cases_1000_start['Confirmed'].max() * 1.1)], 'type': 'log',\n 'fixedrange': True, 'linewidth': 2, 'linecolor': 'black',\n 'showgrid': False, 'dtick': 1, 'showline': True, 'mirror': \n False}, title='Overall Confirmed Cases', xaxis={'title':\n 'Days Since First 1000 Cases', 'range': [0, cases_1000_start[\n 'Days Since 1000 Cases'].max()], 'fixedrange': True,\n 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'showline': True, 'mirror': False}, height=750, hovermode=\n 'closest', annotations=anno)\n fig_global = {'data': fig1000, 'layout': layout_global}\n return fig_global\n elif select_global == 'Global Deaths Trend':\n fig100 = []\n anno = []\n for group, dataframe in deaths_100_start.groupby(by='Country_Region'):\n di = dataframe.sort_values(by=['Days Since 100 Deaths'])\n trace = go.Scatter(x=di['Days Since 100 Deaths'].tolist(), y=di\n ['Deaths'].tolist(), mode='lines', line=dict(color=\n colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=\n 1), opacity=0.6, text=di.Country_Region.tolist(),\n legendgroup=list(di.loc[:, 'Continent'])[0], hovertemplate=\n '<b>%{text}</b><br>' + '<br>Deaths: %{y}<br>' +\n 'Days Since First 1000 Cases: %{x}<br>', showlegend=False)\n a = {'x': int(di['Days Since 100 Deaths'].max() + 1.5), 'y': np\n .log10(int(di['Deaths'].max())), 'xref': 'x', 'yref': 'y',\n 'showarrow': False, 'text': list(di.loc[:, 'Country_Region'\n ])[0], 'xanchor': 'right', 'yanchor': 'middle', 'align':\n 'center', 'font': {'size': 8, 'color': 'black'},\n 'bordercolor': '#ffffff', 'borderwidth': 1, 'borderpad': 1,\n 'bgcolor': '#ffffff', 'opacity': 0.6}\n fig100.append(trace)\n anno.append(a)\n fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.2310491 *\n i)) for i in list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max()))], name=\n 'Deaths Double Every 3 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dash'), text=[\n '# of Deaths Double Every 3 Days'], hovertemplate=\n '<b>Deaths Double Every 3 Days</b>', showlegend=True))\n fig100.append(go.Scatter(x=list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max())), y=[(100 * math.exp(0.099021 *\n i)) for i in list(np.arange(deaths_100_start[\n 'Days Since 100 Deaths'].max()))], name=\n 'Deaths Double Every 7 Days', mode='lines', opacity=0.25, line=\n dict(color='grey', width=3, dash='dot'), text=[\n '# of Deaths Double Every 7 Days'], hovertemplate=\n '<b>Deaths Double Every 7 Days</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title': 'Number of Deaths',\n 'range': [np.log10(100), np.log10(cases_1000_start['Deaths'].\n max() * 1.1)], 'type': 'log', 'fixedrange': True, 'linewidth': \n 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1,\n 'showline': True, 'mirror': False}, title='Overall Deaths',\n xaxis={'title': 'Days Since First 100 deaths', 'range': [0,\n deaths_100_start['Days Since 100 Deaths'].max()], 'fixedrange':\n True, 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'showline': True, 'mirror': False}, height=750, hovermode=\n 'closest', annotations=anno)\n fig_global = {'data': fig100, 'layout': layout_global}\n return fig_global\n elif select_global == '% Mortality by Confirmed Cases (Top 20 Countries)':\n figmort = []\n anno = []\n m = mort.sort_values(by=['Confirmed'], ascending=False).head(20)\n m = m.sort_values(by=['Mortality_Percent'], ascending=True\n ).reset_index()\n for i in range(len(m)):\n m1 = m.loc[i, 'Country_Region']\n m2 = m.loc[i, 'Mortality_Percent']\n trace = go.Bar(name='Observed Case - Mortality Ratio', x=[m2],\n y=[m1], text=[round(m.loc[i, 'Mortality_Percent'], 2)],\n orientation='h', textposition='auto', marker=dict(color=\n '#FFB000', opacity=0.6, line=dict(color=\n 'rgba(255,176,0, 1)', width=1)), hovertemplate=\n '<b>%{y}</b><br>' +\n '<br>Observed Case Mortaility Pct: %{text}%<br>',\n showlegend=False)\n figmort.append(trace)\n layout_global = go.Layout(yaxis={'title': 'Country / Region',\n 'fixedrange': True, 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Observed Case - Mortality Ratio', xaxis={'title':\n '% Mortality by Confirmed Cases (Top 20 Countries)', 'range': [\n 0, m['Mortality_Percent'].max() + 2], 'fixedrange': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=750, hovermode='closest')\n fig_global = {'data': figmort, 'layout': layout_global}\n return fig_global\n elif select_global == 'Recoveries vs. Deaths By Country':\n figscat = []\n rc = mort.loc[(mort['Deaths'] >= 100) & (mort['Recovered'] >= 100)\n ].reset_index()\n for i in range(len(rc)):\n scat = go.Scatter(x=[rc.loc[i, 'Deaths']], y=[rc.loc[i,\n 'Recovered']], mode='markers+text', text=[rc.loc[i,\n 'Country_Region']], marker_color=colors_dict_global[rc.loc[\n i, 'Continent']], showlegend=False, marker=dict(size=12,\n line_width=1, opacity=0.75), hovertemplate=\n '<b>%{text}</b><br>' + '<br>Recoveries: %{y}<br>' +\n 'Deaths: %{x}<br>', textposition='bottom center', textfont=\n dict(size=10, color='rgba(0, 0, 0, 0.6)'))\n figscat.append(scat)\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[i for i in list(np.linspace(100, rc['Deaths'].max(),\n 3))], mode='lines', name='Deaths = Recoveries', opacity=0.25,\n line=dict(color='grey', width=1), text=[\n '# of Deaths = # of Recoveries'], hovertemplate=\n '<b># of Deaths = # of Recoveries</b>', showlegend=True))\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[(i * 2) for i in list(np.linspace(100, rc['Deaths'].\n max(), 3))], mode='lines', name='2 Recoveries for Every Death',\n opacity=0.25, line=dict(color='green', width=3, dash='dash'),\n text=['2 Recoveries for Every Death'], hovertemplate=\n '<b>2 Recoveries for Every Death</b>', showlegend=True))\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(\n ), 3)), y=[(i / 2) for i in list(np.linspace(100, rc['Deaths'].\n max(), 3))], mode='lines', name='2 Deaths for Every Recovery',\n opacity=0.25, line=dict(color='firebrick', width=3, dash='dash'\n ), text=['2 Deaths for Every Recovery'], hovertemplate=\n '<b>2 Deaths for Every Recovery</b>', showlegend=True))\n layout_global = go.Layout(yaxis={'title': 'Number of Recoveries',\n 'fixedrange': True, 'automargin': True, 'range': [np.log10(100),\n np.log10(rc['Recovered'].max() * 1.1)], 'type': 'log',\n 'linewidth': 2, 'linecolor': 'black', 'showgrid': False,\n 'dtick': 1, 'showline': True, 'mirror': False}, title=\n 'Recoveries vs. Deaths, By Country', xaxis={'title':\n 'Number of Deaths', 'fixedrange': True, 'range': [np.log10(100),\n np.log10(rc['Deaths'].max() * 1.1)], 'type': 'log', 'linewidth':\n 2, 'linecolor': 'black', 'showgrid': False, 'dtick': 1,\n 'showline': True, 'mirror': False}, height=750, hovermode='closest'\n )\n fig_global = {'data': figscat, 'layout': layout_global}\n return fig_global\n\n\n@app.callback([Output(component_id='main-dropdown-2', component_property=\n 'options'), Output(component_id='btext1', component_property='children'\n ), Output(component_id='subplot1', component_property='figure'), Output\n (component_id='btext2', component_property='children'), Output(\n component_id='subplot2', component_property='figure'), Output(\n component_id='btext3', component_property='children'), Output(\n component_id='subplot3', component_property='figure')], [Input(\n component_id='main-dropdown', component_property='value')])\ndef update_country(selected_country):\n if selected_country is None:\n selected_country = 'Canada'\n options = ['Confirmed', 'Recovered', 'Deaths',\n 'Total and Daily Confirmed Cases']\n vals = [{'label': i, 'value': i} for i in options]\n trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#648FFF', width=3))]\n layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Confirmed Cases: {0} (Last 45 Days)'.format(\n selected_country), xaxis={'type': 'date', 'automargin': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),\n go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df\n ['Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines',\n line=dict(color='#DC267F', width=3))]\n layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Recovered_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#009E73', width=3))]\n layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max\n ()) & (final_df['Country_Region'] == selected_country),\n 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1\n }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) &\n (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], {\n 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[\n 'Date'] == final_df['Date'].max()) & (final_df['Country_Region'\n ] == selected_country), 'Recovered_Diff'], {'data': trace_3,\n 'layout': layout_t3}\n else:\n options = ['Confirmed', 'Recovered', 'Deaths',\n 'Total and Daily Confirmed Cases']\n vals = [{'label': i, 'value': i} for i in options]\n trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Confirmed_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#648FFF', width=3))]\n layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Confirmed Cases: {0} (Last 45 Days)'.format(\n selected_country), xaxis={'type': 'date', 'automargin': True,\n 'showline': True, 'mirror': False, 'linewidth': 2, 'linecolor':\n 'black'}, height=300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),\n go.Scatter(name='5 Day Moving Average', x=final_df.loc[final_df\n ['Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines',\n line=dict(color='#DC267F', width=3))]\n layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Deaths: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[final_df[\n 'Country_Region'] == selected_country, 'Date'].tail(45), y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6\n ), go.Scatter(name='5 Day Moving Average', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'].tail(45\n ), y=final_df.loc[final_df['Country_Region'] ==\n selected_country, 'Recovered_Diff'].tail(45).rolling(window=5).\n mean(), mode='lines', line=dict(color='#009E73', width=3))]\n layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered',\n 'automargin': True, 'showline': True, 'mirror': False,\n 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Daily Recovered: {0} (Last 45 Days)'.format(selected_country),\n xaxis={'type': 'date', 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, height=\n 300, legend=dict(x=0.2, y=-0.15, orientation='h'))\n return vals, final_df.loc[(final_df['Date'] == final_df['Date'].max\n ()) & (final_df['Country_Region'] == selected_country),\n 'Confirmed_Diff'], {'data': trace_1, 'layout': layout_t1\n }, final_df.loc[(final_df['Date'] == final_df['Date'].max()) &\n (final_df['Country_Region'] == selected_country), 'Deaths_Diff'], {\n 'data': trace_2, 'layout': layout_t2}, final_df.loc[(final_df[\n 'Date'] == final_df['Date'].max()) & (final_df['Country_Region'\n ] == selected_country), 'Recovered_Diff'], {'data': trace_3,\n 'layout': layout_t3}\n\n\n@app.callback(Output(component_id='box-1', component_property='figure'), [\n Input(component_id='main-dropdown', component_property='value'), Input(\n component_id='main-dropdown-2', component_property='value')])\ndef update_maingraph(selected_country, selected_graph):\n if selected_graph is None and selected_country is None:\n selected_country = 'Canada'\n figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], marker_color='#648FFF')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19: {0}'.format(str(\n selected_country)), hovermode='x unified', xaxis=dict(title=\n 'Date', fixedrange=True, automargin=True, showline=True, mirror\n =False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n elif selected_graph is None and selected_country is not None:\n figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[\n final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], marker_color='#648FFF')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19: {0}'.format(str(\n selected_country)), hovermode='x unified', xaxis=dict(title=\n 'Date', fixedrange=True, automargin=True, showline=True, mirror\n =False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n elif selected_graph == 'Total and Daily Confirmed Cases':\n figmain_t = [go.Scatter(name='Total Confirmed Cases', x=final_df.\n loc[final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'], line=dict(color='#1A85FF', width=1.5), mode=\n 'lines'), go.Scatter(name='Daily Confirmed Cases', x=final_df.\n loc[final_df['Country_Region'] == selected_country, 'Date'], y=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed_Diff'], line=dict(color='#D41159', width=3), mode=\n 'lines', fill='tozeroy')]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n 'Confirmed'].max() * 1.1], 'automargin': True, 'showline': True,\n 'mirror': False, 'linewidth': 2, 'linecolor': 'black'}, title=\n 'Overall Progression of COVID-19 ({0}): {1}'.format(str(\n selected_country), str(selected_graph)), hovermode='x unified',\n xaxis=dict(title='Date', fixedrange=True, automargin=True,\n showline=True, mirror=False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n else:\n cols_dict = {'Confirmed': '#648FFF', 'Deaths': '#DC267F',\n 'Recovered': '#009E73'}\n figmain_t = [go.Bar(name='Total {0}'.format(selected_graph), x=\n final_df.loc[final_df['Country_Region'] == selected_country,\n 'Date'], y=final_df.loc[final_df['Country_Region'] ==\n selected_country, selected_graph], marker_color=cols_dict[\n selected_graph])]\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range': [\n 0, final_df.loc[final_df['Country_Region'] == selected_country,\n selected_graph].max() * 1.1], 'automargin': True, 'showline': \n True, 'mirror': False, 'linewidth': 2, 'linecolor': 'black'},\n title='Overall Progression of COVID-19 ({0}): {1}'.format(str(\n selected_country), str(selected_graph)), hovermode='x unified',\n xaxis=dict(title='Date', fixedrange=True, automargin=True,\n showline=True, mirror=False, linewidth=2, linecolor='black'))\n return {'data': figmain_t, 'layout': figmain_l}\n\n\nif __name__ == '__main__':\n app.run_server()\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Apr 10 01:03:35 2020\r\n\r\n@author: Jordan\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom datetime import date\r\n## from COVID19_Simple import *\r\nfrom COVID19_Diff import calc_diff_country\r\n### Dash Stuff ###\r\nimport dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output\r\nimport plotly.graph_objects as go\r\nimport math\r\n\r\n### Initial Code Block; Set Up Data ###\r\nurls = ['https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv',\r\n 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv',\r\n 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv']\r\n\r\n\r\n### Base Country Data (and Transformations)\r\nfinal_df = pd.read_csv('C:/Users/Jordan/Documents/COVID19/final_df.csv')\r\nfinal_df = calc_diff_country(final_df)\r\nfinal_df['Date'] = pd.to_datetime(final_df['Date'])\r\n\r\nfinal_df['Country_Region'] = final_df['Country_Region'].astype(str)\r\n\r\n\r\n### 1000 Cases, 10 Deaths, 10 Recovered ### (Global)\r\n## 1000 Cases ##\r\ncases_1000_start = final_df.loc[(final_df['Confirmed'] >= 1000) & (final_df['Country_Region'] != 'Cruise Ship')].groupby(['Country_Region']).min()['Date']\r\ncases_1000_start = cases_1000_start.reset_index()\r\ncases_1000_start = cases_1000_start.rename(columns={\"Date\":\"Start_Date\"})\r\nfinal_df['Country_Region'] = final_df['Country_Region'].str.strip()\r\ncases_1000_start = pd.merge(cases_1000_start,final_df, on = ['Country_Region'],how='right')\r\ncases_1000_start['Start_Date'] = pd.to_datetime(cases_1000_start['Start_Date'])\r\ncases_1000_start['Date'] = pd.to_datetime(cases_1000_start['Date'])\r\ncases_1000_start = cases_1000_start[cases_1000_start['Start_Date'].notna()]\r\ncases_1000_start['Days Since 1000 Cases'] = (cases_1000_start['Date'] - cases_1000_start['Start_Date']).dt.days\r\n\r\n\r\n## 100 Deaths ##\r\ndeaths_100_start = final_df.loc[(final_df['Deaths'] >= 100) & (final_df['Country_Region'] != 'Cruise Ship')].groupby(['Country_Region']).min()['Date']\r\ndeaths_100_start = deaths_100_start.reset_index()\r\ndeaths_100_start = deaths_100_start.rename(columns={\"Date\":\"Start_Date\"})\r\nfinal_df['Country_Region'] = final_df['Country_Region'].str.strip()\r\ndeaths_100_start = pd.merge(deaths_100_start,final_df, on = ['Country_Region'],how='right')\r\ndeaths_100_start['Start_Date'] = pd.to_datetime(deaths_100_start['Start_Date'])\r\ndeaths_100_start['Date'] = pd.to_datetime(deaths_100_start['Date'])\r\ndeaths_100_start = deaths_100_start[deaths_100_start['Start_Date'].notna()]\r\ndeaths_100_start['Days Since 100 Deaths'] = (deaths_100_start['Date'] - deaths_100_start['Start_Date']).dt.days\r\n\r\n## Mortality Ratios ##\r\nmort = final_df.groupby(['Country_Region'])['Date'].max().reset_index()\r\nmort = pd.merge(mort, final_df, on=['Country_Region', 'Date'], how='left')\r\nmort['Mortality_Percent'] = (mort['Deaths'] / mort['Confirmed'])*100.00\r\n\r\n\r\ncolors_dict_global = {'Europe':'#1D6996','Asia':'#CC503E','Africa':'#94346E', 'North America':'#38A6A5', 'Middle East': '#EDAD08', 'South America':'#E17C05', 'Caribbean & Central America':'#0F8554', 'Oceania':'#73AF48'}\r\n\r\n### Dash Portion of the Script ###\r\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\r\n\r\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\r\nserver=app.server\r\n\r\napp.layout = html.Div(children=[\r\n html.H2(children='COVID-19 Dashboard'),\r\n html.H4(children='A Basic Dashboard to Help Track the COVID-19 Pandemic'),\r\n html.Br(),\r\n html.H5(children='Global View'),\r\n html.P(children='The Global View highlights how Covid-19 is affecting countries across the world, and how the pandemic is expanding on a country by country basis. The Global View includes the following:'),\r\n html.Div([html.Ul([html.Li([html.B('Cumulative Cases by Country Since First 1000 Cases: '),'This allows us to see how cases are spreading since the first 1000 Cases on a country by country basis']),\r\n html.Li([html.B('Cumulative Cases by Country Since First 100 Deaths: '),'This allows us to see COVID-19 fatalities since the first 100 Deaths on a country by country basis']),\r\n html.Li([html.B('Observed Case - Mortality Ratio (Top 20 Countries by Confirmed Cases): '), 'This allows us to see the percentage of COVID19 fatalities based on reported cases and deaths. (Note that reporting standards vary from country to country, so this is for illustrative purposes only)']),\r\n html.Li([html.B('Recoveries vs. Deaths By Country (Countries with over 100 deaths and 100 recoveries: '), 'This plots Recoveries against Deaths on a country by country basis. (Note that reporting standards vary from country to country, so this is for illustrative purposes only)'])])], style={'font-size': 12}),\r\n html.Br(),\r\n dcc.Dropdown(id='global-dropdown', options=[{'label':y, 'value':y} for y in ['Global Cases Trend', 'Global Deaths Trend', '% Mortality by Confirmed Cases (Top 20 Countries)','Recoveries vs. Deaths By Country']], placeholder = 'Pick Graphs From Here...'),\r\n dcc.Graph(id='global-box-1'),\r\n html.Br(),\r\n html.H5(children='Country View'),\r\n html.P('The Country view allows us to see a closer look on how the COVID-19 Pandemic has expanded. As opposed to a high level aggregation, the Country View provides a day by day time series analysis of the effects of COVID-19. The Country View includes the following:'),\r\n html.Div(style={'font-size': 12}, children=[html.Ul([html.Li([html.B('Confirmed: '), 'Cumulative Confirmed Cases of COVID-19 since January 22nd, 2020']),\r\n html.Li([html.B('Recovered: '), 'Cumulative Recovered Cases of COVID-19 since January 22nd, 2020']),\r\n html.Li([html.B('Deaths: '),'Cumulative Deaths from COVID-19 since January 22nd, 2020']),\r\n html.Li([html.B('Total and Daily Confirmed Cases: '), 'Cumulative and Daily Cases Since January 22nd, 2020. This illustrates the curve of daily cases in relation to the total cases for a country'])])]),\r\n dcc.Dropdown(id='main-dropdown', options=[{'label': x, 'value': x} for x in list(final_df.Country_Region.unique())], placeholder = 'Pick a Country From Here...'),\r\n dcc.Dropdown(id='main-dropdown-2', placeholder = 'Pick Graphs From Here...'),\r\n dcc.Graph(id='box-1'),\r\n html.Div([html.Div([html.H6(children='Most Recent New Cases'), html.H1(id='btext1'), dcc.Graph(id='subplot1')], className = 'four columns', style={'color': '#648FFF'}),\r\n html.Div([html.H6(children='Most Recent Daily Deaths'), html.H1(id='btext2'), dcc.Graph(id='subplot2')], className = 'four columns', style={'color': '#DC267F'}),\r\n html.Div([html.H6(children='Most Recent Daily Recovered'), html.H1(id='btext3'), dcc.Graph(id='subplot3')], className = 'four columns', style={'color': '#009E73', 'layout':'right'})], className=\"row\")\r\n])\r\n\r\n## Callback Functionality ## \r\n@app.callback(\r\n Output(component_id='global-box-1', component_property='figure'),\r\n [Input(component_id='global-dropdown', component_property='value')])\r\n\r\ndef global_update(select_global):\r\n if select_global == 'Global Cases Trend' or select_global is None:\r\n\r\n fig1000 = []\r\n anno = []\r\n\r\n for group, dataframe in cases_1000_start.groupby(by='Country_Region'):\r\n di = dataframe.sort_values(by=['Days Since 1000 Cases'])\r\n trace = go.Scatter(x=di['Days Since 1000 Cases'].tolist(),\r\n y=di['Confirmed'].tolist(),\r\n mode='lines',\r\n line=dict(color=colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=1),\r\n opacity=0.6,\r\n text= di.Country_Region.tolist(),\r\n legendgroup=list(di.loc[:, 'Continent'])[0],\r\n hovertemplate='<b>%{text}</b><br>'+'<br>Confirmed Cases: %{y}<br>'+'Days Since First 1000 Cases: %{x}<br>',\r\n showlegend=False)\r\n\r\n a = {'x': int(di['Days Since 1000 Cases'].max()+1.5),\r\n 'y':np.log10(int(di['Confirmed'].max())),\r\n 'xref':'x', 'yref':'y',\r\n 'showarrow':False,\r\n 'text':list(di.loc[:, 'Country_Region'])[0],\r\n 'xanchor':'right', \r\n 'yanchor':'middle',\r\n 'align':'center',\r\n 'font':{'size':8, 'color':'black'},\r\n 'bordercolor':\"#ffffff\",\r\n 'borderwidth':1,\r\n 'borderpad':1,\r\n 'bgcolor':\"#ffffff\",\r\n 'opacity':0.6}\r\n\r\n fig1000.append(trace)\r\n anno.append(a)\r\n\r\n fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start['Days Since 1000 Cases'].max())),\r\n y = [1000 * (math.exp(0.2310491 * i)) for i in list(np.arange(cases_1000_start['Days Since 1000 Cases'].max()))],\r\n name='Cases Double Every 3 Days',\r\n mode='lines',\r\n opacity=.25,\r\n line = dict(color='grey', width=3, dash='dash'),\r\n text=['# of Cases Double Every 3 Days'],\r\n hovertemplate='<b>Cases Double Every 3 Days</b>',\r\n showlegend=True))\r\n\r\n fig1000.append(go.Scatter(x=list(np.arange(cases_1000_start['Days Since 1000 Cases'].max())),\r\n y = [1000 * (math.exp(0.099021 * i)) for i in list(np.arange(cases_1000_start['Days Since 1000 Cases'].max()))],\r\n name='Cases Double Every 7 Days',\r\n mode='lines',\r\n opacity=.25,\r\n line = dict(color='grey', width=3, dash='dot'),\r\n text=['# of Cases Double Every 7 Days'],\r\n hovertemplate='<b>Cases Double Every 7 Days</b>',\r\n showlegend=True))\r\n\r\n layout_global = go.Layout(yaxis={'title':'Number of Confirmed Cases', 'range':[np.log10(1000), np.log10(cases_1000_start['Confirmed'].max() * 1.10)], 'type':'log', 'fixedrange':True, 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'dtick': 1, 'showline':True, 'mirror':False},\r\n title='Overall Confirmed Cases',\r\n xaxis={'title': 'Days Since First 1000 Cases', 'range': [0, cases_1000_start['Days Since 1000 Cases'].max()], 'fixedrange':True, 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'showline':True, 'mirror':False}, height=750, hovermode='closest', annotations=anno)\r\n\r\n fig_global={'data':fig1000, 'layout': layout_global}\r\n return fig_global\r\n\r\n elif select_global == 'Global Deaths Trend':\r\n fig100 = []\r\n anno = []\r\n\r\n for group, dataframe in deaths_100_start.groupby(by='Country_Region'):\r\n di = dataframe.sort_values(by=['Days Since 100 Deaths'])\r\n trace = go.Scatter(x=di['Days Since 100 Deaths'].tolist(),\r\n y=di['Deaths'].tolist(),\r\n mode='lines',\r\n line=dict(color=colors_dict_global[list(di.loc[:, 'Continent'])[0]], width=1),\r\n opacity=0.6,\r\n text= di.Country_Region.tolist(),\r\n legendgroup=list(di.loc[:, 'Continent'])[0],\r\n hovertemplate='<b>%{text}</b><br>'+'<br>Deaths: %{y}<br>'+'Days Since First 1000 Cases: %{x}<br>',\r\n showlegend=False)\r\n\r\n a={'x': int(di['Days Since 100 Deaths'].max()+1.5),\r\n 'y':np.log10(int(di['Deaths'].max())),\r\n 'xref':'x', 'yref':'y',\r\n 'showarrow':False,\r\n 'text':list(di.loc[:, 'Country_Region'])[0],\r\n 'xanchor':'right', \r\n 'yanchor':'middle',\r\n 'align':'center',\r\n 'font':{'size':8, 'color':'black'},\r\n 'bordercolor':\"#ffffff\",\r\n 'borderwidth':1,\r\n 'borderpad':1,\r\n 'bgcolor':\"#ffffff\",\r\n 'opacity':0.6}\r\n\r\n fig100.append(trace)\r\n anno.append(a)\r\n\r\n fig100.append(go.Scatter(x=list(np.arange(deaths_100_start['Days Since 100 Deaths'].max())),\r\n y = [100 * (math.exp(0.2310491 * i)) for i in list(np.arange(deaths_100_start['Days Since 100 Deaths'].max()))],\r\n name='Deaths Double Every 3 Days',\r\n mode='lines',\r\n opacity=.25,\r\n line = dict(color='grey', width=3, dash='dash'),\r\n text=['# of Deaths Double Every 3 Days'],\r\n hovertemplate='<b>Deaths Double Every 3 Days</b>',\r\n showlegend=True))\r\n\r\n fig100.append(go.Scatter(x=list(np.arange(deaths_100_start['Days Since 100 Deaths'].max())),\r\n y = [100 * (math.exp(0.099021 * i)) for i in list(np.arange(deaths_100_start['Days Since 100 Deaths'].max()))],\r\n name='Deaths Double Every 7 Days',\r\n mode='lines',\r\n opacity=.25,\r\n line = dict(color='grey', width=3, dash='dot'),\r\n text=['# of Deaths Double Every 7 Days'],\r\n hovertemplate='<b>Deaths Double Every 7 Days</b>',\r\n showlegend=True))\r\n\r\n layout_global = go.Layout(yaxis={'title':'Number of Deaths', 'range':[np.log10(100), np.log10(cases_1000_start['Deaths'].max() * 1.10)], 'type':'log', 'fixedrange':True, 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'dtick': 1, 'showline':True, 'mirror':False},\r\n title='Overall Deaths',\r\n xaxis={'title': 'Days Since First 100 deaths', 'range': [0, deaths_100_start['Days Since 100 Deaths'].max()], 'fixedrange':True, 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'showline':True, 'mirror':False}, height=750, hovermode='closest', annotations=anno)\r\n\r\n fig_global={'data':fig100, 'layout': layout_global}\r\n return fig_global\r\n\r\n\r\n elif select_global == '% Mortality by Confirmed Cases (Top 20 Countries)':\r\n figmort = []\r\n anno =[]\r\n\r\n m = mort.sort_values(by=['Confirmed'], ascending=False).head(20)\r\n m = m.sort_values(by=['Mortality_Percent'], ascending=True).reset_index()\r\n\r\n for i in range(len(m)):\r\n\r\n m1 = m.loc[i, 'Country_Region']\r\n #m1 = [str(i) for i in m1]\r\n m2 = m.loc[i, 'Mortality_Percent']\r\n #m2 = [str(round(i, 2)) for i in m2]\r\n trace = go.Bar(name='Observed Case - Mortality Ratio',\r\n x = [m2],\r\n y= [m1],\r\n text = [round(m.loc[i, 'Mortality_Percent'], 2)],\r\n orientation ='h',\r\n textposition='auto',\r\n marker = dict(color='#FFB000', opacity=0.6, line=dict(color='rgba(255,176,0, 1)', width=1)),\r\n hovertemplate='<b>%{y}</b><br>'+'<br>Observed Case Mortaility Pct: %{text}%<br>',\r\n showlegend=False)\r\n\r\n figmort.append(trace)\r\n\r\n layout_global = go.Layout(yaxis={'title':'Country / Region','fixedrange':True, 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},\r\n title='Observed Case - Mortality Ratio',\r\n xaxis={'title': '% Mortality by Confirmed Cases (Top 20 Countries)', 'range': [0, m['Mortality_Percent'].max() + 2], 'fixedrange':True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=750, hovermode='closest')\r\n fig_global={'data':figmort, 'layout': layout_global}\r\n return fig_global\r\n\r\n\r\n elif select_global == 'Recoveries vs. Deaths By Country':\r\n figscat = []\r\n rc = mort.loc[(mort['Deaths'] >= 100) & (mort['Recovered'] >=100)].reset_index()\r\n\r\n for i in range(len(rc)):\r\n scat = go.Scatter(\r\n x=[rc.loc[i, 'Deaths']],\r\n y=[rc.loc[i, 'Recovered']],\r\n mode='markers+text',\r\n text=[rc.loc[i, 'Country_Region']],\r\n marker_color=(colors_dict_global[rc.loc[i, 'Continent']]),\r\n showlegend=False,\r\n marker=dict(size=12,line_width=1, opacity=0.75),\r\n hovertemplate='<b>%{text}</b><br>'+'<br>Recoveries: %{y}<br>'+'Deaths: %{x}<br>',\r\n textposition='bottom center',\r\n textfont=dict(size=10, color='rgba(0, 0, 0, 0.6)')\r\n )\r\n\r\n figscat.append(scat)\r\n\r\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(), 3)),\r\n y = [i for i in list(np.linspace(100, rc['Deaths'].max(), 3))],\r\n mode='lines',\r\n name='Deaths = Recoveries',\r\n opacity=.25,\r\n line = dict(color='grey', width=1),\r\n text=['# of Deaths = # of Recoveries'],\r\n hovertemplate='<b># of Deaths = # of Recoveries</b>',\r\n showlegend=True))\r\n\r\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(), 3)),\r\n y = [i*2 for i in list(np.linspace(100, rc['Deaths'].max(), 3))],\r\n mode='lines',\r\n name='2 Recoveries for Every Death',\r\n opacity=.25,\r\n line = dict(color='green', width=3, dash='dash'),\r\n text=['2 Recoveries for Every Death'],\r\n hovertemplate='<b>2 Recoveries for Every Death</b>',\r\n showlegend=True))\r\n\r\n figscat.append(go.Scatter(x=list(np.linspace(100, rc['Deaths'].max(), 3)),\r\n y = [i/2 for i in list(np.linspace(100, rc['Deaths'].max(), 3))],\r\n mode='lines',\r\n name='2 Deaths for Every Recovery',\r\n opacity=.25,\r\n line = dict(color='firebrick', width=3, dash='dash'),\r\n text=['2 Deaths for Every Recovery'],\r\n hovertemplate='<b>2 Deaths for Every Recovery</b>',\r\n showlegend=True))\r\n\r\n layout_global = go.Layout(yaxis={'title':'Number of Recoveries','fixedrange':True, 'automargin': True, 'range':[np.log10(100), np.log10(rc['Recovered'].max() * 1.10)], 'type':'log', 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'dtick': 1, 'showline':True, 'mirror':False},\r\n title='Recoveries vs. Deaths, By Country',\r\n xaxis={'title': 'Number of Deaths','fixedrange':True, 'range':[np.log10(100), np.log10(rc['Deaths'].max() * 1.10)], 'type':'log', 'linewidth':2, 'linecolor':'black', 'showgrid': False, 'dtick': 1, 'showline':True, 'mirror':False}, height=750, hovermode='closest')\r\n\r\n fig_global={'data':figscat, 'layout': layout_global}\r\n return fig_global\r\n\r\n\r\n@app.callback(\r\n [Output(component_id='main-dropdown-2', component_property = 'options'),\r\n Output(component_id='btext1', component_property='children'),\r\n Output(component_id='subplot1', component_property = 'figure'),\r\n Output(component_id='btext2', component_property='children'),\r\n Output(component_id='subplot2', component_property = 'figure'),\r\n Output(component_id='btext3', component_property='children'),\r\n Output(component_id='subplot3', component_property = 'figure')],\r\n [Input(component_id='main-dropdown', component_property = 'value')])\r\n\r\ndef update_country(selected_country):\r\n\r\n if selected_country is None:\r\n selected_country = 'Canada'\r\n\r\n options = ['Confirmed','Recovered','Deaths', 'Total and Daily Confirmed Cases']\r\n\r\n vals = [{'label': i, 'value': i} for i in options]\r\n\r\n trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6),\r\n go.Scatter(name='5 Day Moving Average', x = final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#648FFF', width = 3))]\r\n layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},\r\n title='Daily Confirmed Cases: {0} (Last 45 Days)'.format(selected_country),\r\n xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))\r\n\r\n trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),\r\n go.Scatter(name='5 Day Moving Average', x = final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#DC267F', width = 3))]\r\n layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},\r\n title='Daily Deaths: {0} (Last 45 Days)'.format(selected_country),\r\n xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))\r\n\r\n trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6),\r\n go.Scatter(name='5 Day Moving Average', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Recovered_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#009E73', width = 3))]\r\n layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},\r\n title='Daily Recovered: {0} (Last 45 Days)'.format(selected_country),\r\n xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))\r\n\r\n return vals,final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Confirmed_Diff'],{'data':trace_1, 'layout': layout_t1},final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Deaths_Diff'],{'data':trace_2, 'layout':layout_t2},final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Recovered_Diff'],{'data':trace_3, 'layout':layout_t3}\r\n \r\n\r\n else:\r\n options = ['Confirmed','Recovered','Deaths', 'Total and Daily Confirmed Cases']\r\n\r\n vals = [{'label': i, 'value': i} for i in options]\r\n\r\n trace_1 = [go.Bar(name='Daily Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'].tail(45), marker_color='#648FFF', opacity=0.6),\r\n go.Scatter(name='5 Day Moving Average', x = final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#648FFF', width = 3))]\r\n layout_t1 = go.Layout(yaxis={'title': 'Number of Confirmed Cases', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},\r\n title='Daily Confirmed Cases: {0} (Last 45 Days)'.format(selected_country),\r\n xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))\r\n\r\n trace_2 = [go.Bar(name='Daily Deaths', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Deaths_Diff'].tail(45), marker_color='#DC267F', opacity=0.6),\r\n go.Scatter(name='5 Day Moving Average', x = final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Deaths_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#DC267F', width = 3))]\r\n layout_t2 = go.Layout(yaxis={'title': 'Number of Deaths', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},\r\n title='Daily Deaths: {0} (Last 45 Days)'.format(selected_country),\r\n xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))\r\n\r\n trace_3 = [go.Bar(name='Daily Recoveries', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Recovered_Diff'].tail(45), marker_color='#009E73', opacity=0.6),\r\n go.Scatter(name='5 Day Moving Average', x=final_df.loc[(final_df['Country_Region'] == selected_country),'Date'].tail(45), y=final_df.loc[(final_df['Country_Region'] == selected_country),'Recovered_Diff'].tail(45).rolling(window=5).mean(), mode='lines', line=dict(color='#009E73', width = 3))]\r\n layout_t3 = go.Layout(yaxis={'title': 'Number of Recovered', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},\r\n title='Daily Recovered: {0} (Last 45 Days)'.format(selected_country),\r\n xaxis={'type': 'date', 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'}, height=300, legend=dict(x=.2, y=-.15, orientation='h'))\r\n\r\n return vals,final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Confirmed_Diff'],{'data':trace_1, 'layout': layout_t1},final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Deaths_Diff'],{'data':trace_2, 'layout':layout_t2},final_df.loc[(final_df['Date'] == final_df['Date'].max()) & (final_df['Country_Region'] == selected_country), 'Recovered_Diff'],{'data':trace_3, 'layout':layout_t3}\r\n\r\n\r\n\r\n@app.callback(\r\n Output(component_id='box-1',component_property='figure'),\r\n [Input(component_id='main-dropdown', component_property = 'value'),\r\n Input(component_id='main-dropdown-2', component_property = 'value')])\r\n\r\ndef update_maingraph(selected_country, selected_graph):\r\n if selected_graph is None and selected_country is None:\r\n\r\n selected_country = 'Canada'\r\n\r\n figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y = final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'], marker_color='#648FFF')]\r\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range':[0, (final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'].max() * 1.10)], 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},\r\n title='Overall Progression of COVID-19: {0}'.format(str(selected_country)),\r\n hovermode='x unified', xaxis=dict(title='Date', fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black'))\r\n\r\n return {'data':figmain_t, 'layout': figmain_l}\r\n\r\n elif selected_graph is None and selected_country is not None:\r\n\r\n figmain_t = [go.Bar(name='Total Confirmed Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y = final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'], marker_color='#648FFF')]\r\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range':[0, (final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'].max() * 1.10)], 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},\r\n title='Overall Progression of COVID-19: {0}'.format(str(selected_country)),\r\n hovermode='x unified', xaxis=dict(title='Date', fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black'))\r\n\r\n return {'data':figmain_t, 'layout': figmain_l}\r\n\r\n elif selected_graph == 'Total and Daily Confirmed Cases':\r\n figmain_t = [go.Scatter(name='Total Confirmed Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y = final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'], line=dict(color='#1A85FF', width = 1.5), mode='lines'),\r\n go.Scatter(name='Daily Confirmed Cases', x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y=final_df.loc[(final_df['Country_Region'] == selected_country),'Confirmed_Diff'], line=dict(color='#D41159', width = 3), mode='lines', fill='tozeroy')]\r\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range':[0, (final_df.loc[(final_df['Country_Region'] == selected_country) ,'Confirmed'].max() * 1.10)], 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},\r\n title='Overall Progression of COVID-19 ({0}): {1}'.format(str(selected_country), str(selected_graph)),\r\n hovermode='x unified', xaxis=dict(title='Date',fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black'))\r\n\r\n return {'data':figmain_t, 'layout': figmain_l}\r\n\r\n else:\r\n cols_dict = {'Confirmed':'#648FFF', 'Deaths':'#DC267F', 'Recovered':'#009E73'}\r\n\r\n figmain_t = [go.Bar(name='Total {0}'.format(selected_graph), x=final_df.loc[(final_df['Country_Region'] == selected_country) ,'Date'], y = final_df.loc[(final_df['Country_Region'] == selected_country) ,selected_graph], marker_color=cols_dict[selected_graph])]\r\n figmain_l = go.Layout(yaxis={'title': 'Number of Cases', 'range':[0, (final_df.loc[(final_df['Country_Region'] == selected_country) ,selected_graph].max() * 1.10)], 'automargin': True, 'showline':True, 'mirror':False, 'linewidth':2, 'linecolor':'black'},\r\n title='Overall Progression of COVID-19 ({0}): {1}'.format(str(selected_country), str(selected_graph)),\r\n hovermode='x unified', xaxis=dict(title='Date', fixedrange=True, automargin=True, showline=True, mirror=False, linewidth=2, linecolor='black'))\r\n\r\n return {'data':figmain_t, 'layout': figmain_l}\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run_server()",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import torch
from torchelie.data_learning import *
def test_pixel_image():
pi = PixelImage((1, 3, 128, 128), 0.01)
pi()
start = torch.randn(3, 128, 128)
pi = PixelImage((1, 3, 128, 128), init_img=start)
assert start.allclose(pi() + 0.5, atol=1e-7)
def test_spectral_image():
pi = SpectralImage((1, 3, 128, 128), 0.01)
pi()
start = torch.randn(1, 3, 128, 128)
pi = SpectralImage((1, 3, 128, 128), init_img=start)
def test_correlate_colors():
corr = CorrelateColors()
start = torch.randn(1, 3, 64, 64)
assert start.allclose(corr.invert(corr(start)), atol=1e-5)
def test_parameterized_img():
start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()
ParameterizedImg(1, 3,
128,
128,
space='spectral',
colors='uncorr',
init_img=start)()
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()
start = torch.clamp(torch.randn(1, 3, 128, 129) + 0.5, min=0, max=1)
ParameterizedImg(1, 3,
128,
129,
space='spectral',
colors='uncorr',
init_img=start)()
start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr')()
ParameterizedImg(1, 3,
128,
128,
space='pixel',
colors='uncorr',
init_img=start)()
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr')()
ParameterizedImg(1, 3,
128,
128,
space='spectral',
colors='corr',
init_img=start)()
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr')()
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr',
init_img=start)()
|
normal
|
{
"blob_id": "73cacc1317c8624b45c017144bc7449bc99bd045",
"index": 9542,
"step-1": "<mask token>\n\n\ndef test_pixel_image():\n pi = PixelImage((1, 3, 128, 128), 0.01)\n pi()\n start = torch.randn(3, 128, 128)\n pi = PixelImage((1, 3, 128, 128), init_img=start)\n assert start.allclose(pi() + 0.5, atol=1e-07)\n\n\n<mask token>\n\n\ndef test_parameterized_img():\n start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()\n start = torch.clamp(torch.randn(1, 3, 128, 129) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 129, space='spectral', colors='uncorr',\n init_img=start)()\n start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr')()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr')()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr')()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr', init_img\n =start)()\n",
"step-2": "<mask token>\n\n\ndef test_pixel_image():\n pi = PixelImage((1, 3, 128, 128), 0.01)\n pi()\n start = torch.randn(3, 128, 128)\n pi = PixelImage((1, 3, 128, 128), init_img=start)\n assert start.allclose(pi() + 0.5, atol=1e-07)\n\n\ndef test_spectral_image():\n pi = SpectralImage((1, 3, 128, 128), 0.01)\n pi()\n start = torch.randn(1, 3, 128, 128)\n pi = SpectralImage((1, 3, 128, 128), init_img=start)\n\n\n<mask token>\n\n\ndef test_parameterized_img():\n start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()\n start = torch.clamp(torch.randn(1, 3, 128, 129) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 129, space='spectral', colors='uncorr',\n init_img=start)()\n start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr')()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr')()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr')()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr', init_img\n =start)()\n",
"step-3": "<mask token>\n\n\ndef test_pixel_image():\n pi = PixelImage((1, 3, 128, 128), 0.01)\n pi()\n start = torch.randn(3, 128, 128)\n pi = PixelImage((1, 3, 128, 128), init_img=start)\n assert start.allclose(pi() + 0.5, atol=1e-07)\n\n\ndef test_spectral_image():\n pi = SpectralImage((1, 3, 128, 128), 0.01)\n pi()\n start = torch.randn(1, 3, 128, 128)\n pi = SpectralImage((1, 3, 128, 128), init_img=start)\n\n\ndef test_correlate_colors():\n corr = CorrelateColors()\n start = torch.randn(1, 3, 64, 64)\n assert start.allclose(corr.invert(corr(start)), atol=1e-05)\n\n\ndef test_parameterized_img():\n start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()\n start = torch.clamp(torch.randn(1, 3, 128, 129) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 129, space='spectral', colors='uncorr',\n init_img=start)()\n start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr')()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr')()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr')()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr', init_img\n =start)()\n",
"step-4": "import torch\nfrom torchelie.data_learning import *\n\n\ndef test_pixel_image():\n pi = PixelImage((1, 3, 128, 128), 0.01)\n pi()\n start = torch.randn(3, 128, 128)\n pi = PixelImage((1, 3, 128, 128), init_img=start)\n assert start.allclose(pi() + 0.5, atol=1e-07)\n\n\ndef test_spectral_image():\n pi = SpectralImage((1, 3, 128, 128), 0.01)\n pi()\n start = torch.randn(1, 3, 128, 128)\n pi = SpectralImage((1, 3, 128, 128), init_img=start)\n\n\ndef test_correlate_colors():\n corr = CorrelateColors()\n start = torch.randn(1, 3, 64, 64)\n assert start.allclose(corr.invert(corr(start)), atol=1e-05)\n\n\ndef test_parameterized_img():\n start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()\n start = torch.clamp(torch.randn(1, 3, 128, 129) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 129, space='spectral', colors='uncorr',\n init_img=start)()\n start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr')()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr')()\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr',\n init_img=start)()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr')()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr', init_img\n =start)()\n",
"step-5": "import torch\nfrom torchelie.data_learning import *\n\n\ndef test_pixel_image():\n pi = PixelImage((1, 3, 128, 128), 0.01)\n pi()\n\n start = torch.randn(3, 128, 128)\n pi = PixelImage((1, 3, 128, 128), init_img=start)\n\n assert start.allclose(pi() + 0.5, atol=1e-7)\n\n\ndef test_spectral_image():\n pi = SpectralImage((1, 3, 128, 128), 0.01)\n pi()\n\n start = torch.randn(1, 3, 128, 128)\n pi = SpectralImage((1, 3, 128, 128), init_img=start)\n\n\ndef test_correlate_colors():\n corr = CorrelateColors()\n start = torch.randn(1, 3, 64, 64)\n assert start.allclose(corr.invert(corr(start)), atol=1e-5)\n\n\ndef test_parameterized_img():\n start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)\n\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()\n ParameterizedImg(1, 3,\n 128,\n 128,\n space='spectral',\n colors='uncorr',\n init_img=start)()\n\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()\n\n start = torch.clamp(torch.randn(1, 3, 128, 129) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3,\n 128,\n 129,\n space='spectral',\n colors='uncorr',\n init_img=start)()\n start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr')()\n ParameterizedImg(1, 3,\n 128,\n 128,\n space='pixel',\n colors='uncorr',\n init_img=start)()\n\n ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr')()\n ParameterizedImg(1, 3,\n 128,\n 128,\n space='spectral',\n colors='corr',\n init_img=start)()\n\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr')()\n ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr',\n init_img=start)()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def corner(data, bins=20, *, range=None, axes_scale='linear', weights=None,
color=None, hist_bin_factor=1, smooth=None, smooth1d=None, labels=None,
label_kwargs=None, titles=None, show_titles=False, title_quantiles=None,
title_fmt='.2f', title_kwargs=None, truths=None, truth_color='#4682b4',
scale_hist=False, quantiles=None, verbose=False, fig=None, max_n_ticks=
5, top_ticks=False, use_math_text=False, reverse=False, labelpad=0.0,
hist_kwargs=None, group='posterior', var_names=None, filter_vars=None,
coords=None, divergences=False, divergences_kwargs=None, labeller=None,
**hist2d_kwargs):
"""
Make a *sick* corner plot showing the projections of a data set in a
multi-dimensional space. kwargs are passed to hist2d() or used for
`matplotlib` styling.
Parameters
----------
data : obj
Any object that can be converted to an ``arviz.InferenceData`` object.
Refer to documentation of ``arviz.convert_to_dataset`` for details.
bins : int or array_like[ndim,]
The number of bins to use in histograms, either as a fixed value for
all dimensions, or as a list of integers for each dimension.
group : str
Specifies which InferenceData group should be plotted. Defaults to
``'posterior'``.
var_names : list
Variables to be plotted, if ``None`` all variable are plotted. Prefix
the variables by `~` when you want to exclude them from the plot.
filter_vars : {``None``, ``"like"``, ``"regex"``}
If ``None`` (default), interpret ``var_names`` as the real variables
names. If ``"like"``, interpret ``var_names`` as substrings of the real
variables names. If ``"regex"``, interpret ``var_names`` as regular
expressions on the real variables names. A la ``pandas.filter``.
coords : mapping
Coordinates of ``var_names`` to be plotted. Passed to
``arviz.Dataset.sel``.
divergences : bool
If ``True`` divergences will be plotted in a different color, only if
``group`` is either ``'prior'`` or ``'posterior'``.
divergences_kwargs : dict
Any extra keyword arguments to send to the ``overplot_points`` when
plotting the divergences.
labeller : arviz.Labeller
Class providing the method ``make_label_vert`` to generate the labels
in the plot. Read the ArviZ label guide for more details and usage
examples.
weights : array_like[nsamples,]
The weight of each sample. If `None` (default), samples are given
equal weight.
color : str
A ``matplotlib`` style color for all histograms.
hist_bin_factor : float or array_like[ndim,]
This is a factor (or list of factors, one for each dimension) that
will multiply the bin specifications when making the 1-D histograms.
This is generally used to increase the number of bins in the 1-D plots
to provide more resolution.
smooth, smooth1d : float
The standard deviation for Gaussian kernel passed to
`scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms
respectively. If `None` (default), no smoothing is applied.
labels : iterable (ndim,)
A list of names for the dimensions.
.. deprecated:: 2.2.1
If a ``xs`` is a ``pandas.DataFrame`` *and* ArviZ is installed,
labels will default to column names.
This behavior will be removed in version 3;
either use ArviZ data structures instead or pass
``labels=dataframe.columns`` manually.
label_kwargs : dict
Any extra keyword arguments to send to the `set_xlabel` and
`set_ylabel` methods. Note that passing the `labelpad` keyword
in this dictionary will not have the desired effect. Use the
`labelpad` keyword in this function instead.
titles : iterable (ndim,)
A list of titles for the dimensions. If `None` (default),
uses labels as titles.
show_titles : bool
Displays a title above each 1-D histogram showing the 0.5 quantile
with the upper and lower errors supplied by the quantiles argument.
title_quantiles : iterable
A list of 3 fractional quantiles to show as the the upper and lower
errors. If `None` (default), inherit the values from quantiles, unless
quantiles is `None`, in which case it defaults to [0.16,0.5,0.84]
title_fmt : string
The format string for the quantiles given in titles. If you explicitly
set ``show_titles=True`` and ``title_fmt=None``, the labels will be
shown as the titles. (default: ``.2f``)
title_kwargs : dict
Any extra keyword arguments to send to the `set_title` command.
range : iterable (ndim,)
A list where each element is either a length 2 tuple containing
lower and upper bounds or a float in range (0., 1.)
giving the fraction of samples to include in bounds, e.g.,
[(0.,10.), (1.,5), 0.999, etc.].
If a fraction, the bounds are chosen to be equal-tailed.
axes_scale : str or iterable (ndim,)
Scale (``"linear"``, ``"log"``) to use for each data dimension. If only
one scale is specified, use that for all dimensions.
truths : iterable (ndim,)
A list of reference values to indicate on the plots. Individual
values can be omitted by using ``None``.
truth_color : str
A ``matplotlib`` style color for the ``truths`` makers.
scale_hist : bool
Should the 1-D histograms be scaled in such a way that the zero line
is visible?
quantiles : iterable
A list of fractional quantiles to show on the 1-D histograms as
vertical dashed lines.
verbose : bool
If true, print the values of the computed quantiles.
plot_contours : bool
Draw contours for dense regions of the plot.
use_math_text : bool
If true, then axis tick labels for very large or small exponents will
be displayed as powers of 10 rather than using `e`.
reverse : bool
If true, plot the corner plot starting in the upper-right corner
instead of the usual bottom-left corner
labelpad : float
Padding between the axis and the x- and y-labels in units of the
fraction of the axis from the lower left
max_n_ticks: int
Maximum number of ticks to try to use
top_ticks : bool
If true, label the top ticks of each axis
fig : `~matplotlib.figure.Figure`
Overplot onto the provided figure object, which must either have no
axes yet, or ``ndim * ndim`` axes already present. If not set, the
plot will be drawn on a newly created figure.
hist_kwargs : dict
Any extra keyword arguments to send to the 1-D histogram plots.
**hist2d_kwargs
Any remaining keyword arguments are sent to :func:`corner.hist2d` to
generate the 2-D histogram plots.
Returns
-------
fig : `~matplotlib.figure.Figure`
The ``matplotlib`` figure instance for the corner plot.
"""
if arviz_corner is None:
if not (isinstance(data, np.ndarray) or data.__class__.__name__ ==
'DataFrame'):
raise ImportError(
'Please install arviz or use a numpy array as input')
if (var_names is not None or filter_vars is not None or coords is not
None or divergences or divergences_kwargs is not None or
labeller is not None):
logging.warning(
'Please install arviz to use the advanced features of corner')
return corner_impl(data, bins=bins, range=range, axes_scale=
axes_scale, weights=weights, color=color, hist_bin_factor=
hist_bin_factor, smooth=smooth, smooth1d=smooth1d, labels=
labels, label_kwargs=label_kwargs, titles=titles, show_titles=
show_titles, title_quantiles=title_quantiles, title_fmt=
title_fmt, title_kwargs=title_kwargs, truths=truths,
truth_color=truth_color, scale_hist=scale_hist, quantiles=
quantiles, verbose=verbose, fig=fig, max_n_ticks=max_n_ticks,
top_ticks=top_ticks, use_math_text=use_math_text, reverse=
reverse, labelpad=labelpad, hist_kwargs=hist_kwargs, **
hist2d_kwargs)
return arviz_corner(data, bins=bins, range=range, axes_scale=axes_scale,
weights=weights, color=color, hist_bin_factor=hist_bin_factor,
smooth=smooth, smooth1d=smooth1d, labels=labels, label_kwargs=
label_kwargs, titles=titles, show_titles=show_titles,
title_quantiles=title_quantiles, title_fmt=title_fmt, title_kwargs=
title_kwargs, truths=truths, truth_color=truth_color, scale_hist=
scale_hist, quantiles=quantiles, verbose=verbose, fig=fig,
max_n_ticks=max_n_ticks, top_ticks=top_ticks, use_math_text=
use_math_text, reverse=reverse, labelpad=labelpad, hist_kwargs=
hist_kwargs, group=group, var_names=var_names, filter_vars=
filter_vars, coords=coords, divergences=divergences,
divergences_kwargs=divergences_kwargs, labeller=labeller, **
hist2d_kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
from corner.arviz_corner import arviz_corner
except ImportError:
arviz_corner = None
def corner(data, bins=20, *, range=None, axes_scale='linear', weights=None,
color=None, hist_bin_factor=1, smooth=None, smooth1d=None, labels=None,
label_kwargs=None, titles=None, show_titles=False, title_quantiles=None,
title_fmt='.2f', title_kwargs=None, truths=None, truth_color='#4682b4',
scale_hist=False, quantiles=None, verbose=False, fig=None, max_n_ticks=
5, top_ticks=False, use_math_text=False, reverse=False, labelpad=0.0,
hist_kwargs=None, group='posterior', var_names=None, filter_vars=None,
coords=None, divergences=False, divergences_kwargs=None, labeller=None,
**hist2d_kwargs):
"""
Make a *sick* corner plot showing the projections of a data set in a
multi-dimensional space. kwargs are passed to hist2d() or used for
`matplotlib` styling.
Parameters
----------
data : obj
Any object that can be converted to an ``arviz.InferenceData`` object.
Refer to documentation of ``arviz.convert_to_dataset`` for details.
bins : int or array_like[ndim,]
The number of bins to use in histograms, either as a fixed value for
all dimensions, or as a list of integers for each dimension.
group : str
Specifies which InferenceData group should be plotted. Defaults to
``'posterior'``.
var_names : list
Variables to be plotted, if ``None`` all variable are plotted. Prefix
the variables by `~` when you want to exclude them from the plot.
filter_vars : {``None``, ``"like"``, ``"regex"``}
If ``None`` (default), interpret ``var_names`` as the real variables
names. If ``"like"``, interpret ``var_names`` as substrings of the real
variables names. If ``"regex"``, interpret ``var_names`` as regular
expressions on the real variables names. A la ``pandas.filter``.
coords : mapping
Coordinates of ``var_names`` to be plotted. Passed to
``arviz.Dataset.sel``.
divergences : bool
If ``True`` divergences will be plotted in a different color, only if
``group`` is either ``'prior'`` or ``'posterior'``.
divergences_kwargs : dict
Any extra keyword arguments to send to the ``overplot_points`` when
plotting the divergences.
labeller : arviz.Labeller
Class providing the method ``make_label_vert`` to generate the labels
in the plot. Read the ArviZ label guide for more details and usage
examples.
weights : array_like[nsamples,]
The weight of each sample. If `None` (default), samples are given
equal weight.
color : str
A ``matplotlib`` style color for all histograms.
hist_bin_factor : float or array_like[ndim,]
This is a factor (or list of factors, one for each dimension) that
will multiply the bin specifications when making the 1-D histograms.
This is generally used to increase the number of bins in the 1-D plots
to provide more resolution.
smooth, smooth1d : float
The standard deviation for Gaussian kernel passed to
`scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms
respectively. If `None` (default), no smoothing is applied.
labels : iterable (ndim,)
A list of names for the dimensions.
.. deprecated:: 2.2.1
If a ``xs`` is a ``pandas.DataFrame`` *and* ArviZ is installed,
labels will default to column names.
This behavior will be removed in version 3;
either use ArviZ data structures instead or pass
``labels=dataframe.columns`` manually.
label_kwargs : dict
Any extra keyword arguments to send to the `set_xlabel` and
`set_ylabel` methods. Note that passing the `labelpad` keyword
in this dictionary will not have the desired effect. Use the
`labelpad` keyword in this function instead.
titles : iterable (ndim,)
A list of titles for the dimensions. If `None` (default),
uses labels as titles.
show_titles : bool
Displays a title above each 1-D histogram showing the 0.5 quantile
with the upper and lower errors supplied by the quantiles argument.
title_quantiles : iterable
A list of 3 fractional quantiles to show as the the upper and lower
errors. If `None` (default), inherit the values from quantiles, unless
quantiles is `None`, in which case it defaults to [0.16,0.5,0.84]
title_fmt : string
The format string for the quantiles given in titles. If you explicitly
set ``show_titles=True`` and ``title_fmt=None``, the labels will be
shown as the titles. (default: ``.2f``)
title_kwargs : dict
Any extra keyword arguments to send to the `set_title` command.
range : iterable (ndim,)
A list where each element is either a length 2 tuple containing
lower and upper bounds or a float in range (0., 1.)
giving the fraction of samples to include in bounds, e.g.,
[(0.,10.), (1.,5), 0.999, etc.].
If a fraction, the bounds are chosen to be equal-tailed.
axes_scale : str or iterable (ndim,)
Scale (``"linear"``, ``"log"``) to use for each data dimension. If only
one scale is specified, use that for all dimensions.
truths : iterable (ndim,)
A list of reference values to indicate on the plots. Individual
values can be omitted by using ``None``.
truth_color : str
A ``matplotlib`` style color for the ``truths`` makers.
scale_hist : bool
Should the 1-D histograms be scaled in such a way that the zero line
is visible?
quantiles : iterable
A list of fractional quantiles to show on the 1-D histograms as
vertical dashed lines.
verbose : bool
If true, print the values of the computed quantiles.
plot_contours : bool
Draw contours for dense regions of the plot.
use_math_text : bool
If true, then axis tick labels for very large or small exponents will
be displayed as powers of 10 rather than using `e`.
reverse : bool
If true, plot the corner plot starting in the upper-right corner
instead of the usual bottom-left corner
labelpad : float
Padding between the axis and the x- and y-labels in units of the
fraction of the axis from the lower left
max_n_ticks: int
Maximum number of ticks to try to use
top_ticks : bool
If true, label the top ticks of each axis
fig : `~matplotlib.figure.Figure`
Overplot onto the provided figure object, which must either have no
axes yet, or ``ndim * ndim`` axes already present. If not set, the
plot will be drawn on a newly created figure.
hist_kwargs : dict
Any extra keyword arguments to send to the 1-D histogram plots.
**hist2d_kwargs
Any remaining keyword arguments are sent to :func:`corner.hist2d` to
generate the 2-D histogram plots.
Returns
-------
fig : `~matplotlib.figure.Figure`
The ``matplotlib`` figure instance for the corner plot.
"""
if arviz_corner is None:
if not (isinstance(data, np.ndarray) or data.__class__.__name__ ==
'DataFrame'):
raise ImportError(
'Please install arviz or use a numpy array as input')
if (var_names is not None or filter_vars is not None or coords is not
None or divergences or divergences_kwargs is not None or
labeller is not None):
logging.warning(
'Please install arviz to use the advanced features of corner')
return corner_impl(data, bins=bins, range=range, axes_scale=
axes_scale, weights=weights, color=color, hist_bin_factor=
hist_bin_factor, smooth=smooth, smooth1d=smooth1d, labels=
labels, label_kwargs=label_kwargs, titles=titles, show_titles=
show_titles, title_quantiles=title_quantiles, title_fmt=
title_fmt, title_kwargs=title_kwargs, truths=truths,
truth_color=truth_color, scale_hist=scale_hist, quantiles=
quantiles, verbose=verbose, fig=fig, max_n_ticks=max_n_ticks,
top_ticks=top_ticks, use_math_text=use_math_text, reverse=
reverse, labelpad=labelpad, hist_kwargs=hist_kwargs, **
hist2d_kwargs)
return arviz_corner(data, bins=bins, range=range, axes_scale=axes_scale,
weights=weights, color=color, hist_bin_factor=hist_bin_factor,
smooth=smooth, smooth1d=smooth1d, labels=labels, label_kwargs=
label_kwargs, titles=titles, show_titles=show_titles,
title_quantiles=title_quantiles, title_fmt=title_fmt, title_kwargs=
title_kwargs, truths=truths, truth_color=truth_color, scale_hist=
scale_hist, quantiles=quantiles, verbose=verbose, fig=fig,
max_n_ticks=max_n_ticks, top_ticks=top_ticks, use_math_text=
use_math_text, reverse=reverse, labelpad=labelpad, hist_kwargs=
hist_kwargs, group=group, var_names=var_names, filter_vars=
filter_vars, coords=coords, divergences=divergences,
divergences_kwargs=divergences_kwargs, labeller=labeller, **
hist2d_kwargs)
<|reserved_special_token_1|>
__all__ = 'corner'
<|reserved_special_token_0|>
try:
from corner.arviz_corner import arviz_corner
except ImportError:
arviz_corner = None
def corner(data, bins=20, *, range=None, axes_scale='linear', weights=None,
color=None, hist_bin_factor=1, smooth=None, smooth1d=None, labels=None,
label_kwargs=None, titles=None, show_titles=False, title_quantiles=None,
title_fmt='.2f', title_kwargs=None, truths=None, truth_color='#4682b4',
scale_hist=False, quantiles=None, verbose=False, fig=None, max_n_ticks=
5, top_ticks=False, use_math_text=False, reverse=False, labelpad=0.0,
hist_kwargs=None, group='posterior', var_names=None, filter_vars=None,
coords=None, divergences=False, divergences_kwargs=None, labeller=None,
**hist2d_kwargs):
"""
Make a *sick* corner plot showing the projections of a data set in a
multi-dimensional space. kwargs are passed to hist2d() or used for
`matplotlib` styling.
Parameters
----------
data : obj
Any object that can be converted to an ``arviz.InferenceData`` object.
Refer to documentation of ``arviz.convert_to_dataset`` for details.
bins : int or array_like[ndim,]
The number of bins to use in histograms, either as a fixed value for
all dimensions, or as a list of integers for each dimension.
group : str
Specifies which InferenceData group should be plotted. Defaults to
``'posterior'``.
var_names : list
Variables to be plotted, if ``None`` all variable are plotted. Prefix
the variables by `~` when you want to exclude them from the plot.
filter_vars : {``None``, ``"like"``, ``"regex"``}
If ``None`` (default), interpret ``var_names`` as the real variables
names. If ``"like"``, interpret ``var_names`` as substrings of the real
variables names. If ``"regex"``, interpret ``var_names`` as regular
expressions on the real variables names. A la ``pandas.filter``.
coords : mapping
Coordinates of ``var_names`` to be plotted. Passed to
``arviz.Dataset.sel``.
divergences : bool
If ``True`` divergences will be plotted in a different color, only if
``group`` is either ``'prior'`` or ``'posterior'``.
divergences_kwargs : dict
Any extra keyword arguments to send to the ``overplot_points`` when
plotting the divergences.
labeller : arviz.Labeller
Class providing the method ``make_label_vert`` to generate the labels
in the plot. Read the ArviZ label guide for more details and usage
examples.
weights : array_like[nsamples,]
The weight of each sample. If `None` (default), samples are given
equal weight.
color : str
A ``matplotlib`` style color for all histograms.
hist_bin_factor : float or array_like[ndim,]
This is a factor (or list of factors, one for each dimension) that
will multiply the bin specifications when making the 1-D histograms.
This is generally used to increase the number of bins in the 1-D plots
to provide more resolution.
smooth, smooth1d : float
The standard deviation for Gaussian kernel passed to
`scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms
respectively. If `None` (default), no smoothing is applied.
labels : iterable (ndim,)
A list of names for the dimensions.
.. deprecated:: 2.2.1
If a ``xs`` is a ``pandas.DataFrame`` *and* ArviZ is installed,
labels will default to column names.
This behavior will be removed in version 3;
either use ArviZ data structures instead or pass
``labels=dataframe.columns`` manually.
label_kwargs : dict
Any extra keyword arguments to send to the `set_xlabel` and
`set_ylabel` methods. Note that passing the `labelpad` keyword
in this dictionary will not have the desired effect. Use the
`labelpad` keyword in this function instead.
titles : iterable (ndim,)
A list of titles for the dimensions. If `None` (default),
uses labels as titles.
show_titles : bool
Displays a title above each 1-D histogram showing the 0.5 quantile
with the upper and lower errors supplied by the quantiles argument.
title_quantiles : iterable
A list of 3 fractional quantiles to show as the the upper and lower
errors. If `None` (default), inherit the values from quantiles, unless
quantiles is `None`, in which case it defaults to [0.16,0.5,0.84]
title_fmt : string
The format string for the quantiles given in titles. If you explicitly
set ``show_titles=True`` and ``title_fmt=None``, the labels will be
shown as the titles. (default: ``.2f``)
title_kwargs : dict
Any extra keyword arguments to send to the `set_title` command.
range : iterable (ndim,)
A list where each element is either a length 2 tuple containing
lower and upper bounds or a float in range (0., 1.)
giving the fraction of samples to include in bounds, e.g.,
[(0.,10.), (1.,5), 0.999, etc.].
If a fraction, the bounds are chosen to be equal-tailed.
axes_scale : str or iterable (ndim,)
Scale (``"linear"``, ``"log"``) to use for each data dimension. If only
one scale is specified, use that for all dimensions.
truths : iterable (ndim,)
A list of reference values to indicate on the plots. Individual
values can be omitted by using ``None``.
truth_color : str
A ``matplotlib`` style color for the ``truths`` makers.
scale_hist : bool
Should the 1-D histograms be scaled in such a way that the zero line
is visible?
quantiles : iterable
A list of fractional quantiles to show on the 1-D histograms as
vertical dashed lines.
verbose : bool
If true, print the values of the computed quantiles.
plot_contours : bool
Draw contours for dense regions of the plot.
use_math_text : bool
If true, then axis tick labels for very large or small exponents will
be displayed as powers of 10 rather than using `e`.
reverse : bool
If true, plot the corner plot starting in the upper-right corner
instead of the usual bottom-left corner
labelpad : float
Padding between the axis and the x- and y-labels in units of the
fraction of the axis from the lower left
max_n_ticks: int
Maximum number of ticks to try to use
top_ticks : bool
If true, label the top ticks of each axis
fig : `~matplotlib.figure.Figure`
Overplot onto the provided figure object, which must either have no
axes yet, or ``ndim * ndim`` axes already present. If not set, the
plot will be drawn on a newly created figure.
hist_kwargs : dict
Any extra keyword arguments to send to the 1-D histogram plots.
**hist2d_kwargs
Any remaining keyword arguments are sent to :func:`corner.hist2d` to
generate the 2-D histogram plots.
Returns
-------
fig : `~matplotlib.figure.Figure`
The ``matplotlib`` figure instance for the corner plot.
"""
if arviz_corner is None:
if not (isinstance(data, np.ndarray) or data.__class__.__name__ ==
'DataFrame'):
raise ImportError(
'Please install arviz or use a numpy array as input')
if (var_names is not None or filter_vars is not None or coords is not
None or divergences or divergences_kwargs is not None or
labeller is not None):
logging.warning(
'Please install arviz to use the advanced features of corner')
return corner_impl(data, bins=bins, range=range, axes_scale=
axes_scale, weights=weights, color=color, hist_bin_factor=
hist_bin_factor, smooth=smooth, smooth1d=smooth1d, labels=
labels, label_kwargs=label_kwargs, titles=titles, show_titles=
show_titles, title_quantiles=title_quantiles, title_fmt=
title_fmt, title_kwargs=title_kwargs, truths=truths,
truth_color=truth_color, scale_hist=scale_hist, quantiles=
quantiles, verbose=verbose, fig=fig, max_n_ticks=max_n_ticks,
top_ticks=top_ticks, use_math_text=use_math_text, reverse=
reverse, labelpad=labelpad, hist_kwargs=hist_kwargs, **
hist2d_kwargs)
return arviz_corner(data, bins=bins, range=range, axes_scale=axes_scale,
weights=weights, color=color, hist_bin_factor=hist_bin_factor,
smooth=smooth, smooth1d=smooth1d, labels=labels, label_kwargs=
label_kwargs, titles=titles, show_titles=show_titles,
title_quantiles=title_quantiles, title_fmt=title_fmt, title_kwargs=
title_kwargs, truths=truths, truth_color=truth_color, scale_hist=
scale_hist, quantiles=quantiles, verbose=verbose, fig=fig,
max_n_ticks=max_n_ticks, top_ticks=top_ticks, use_math_text=
use_math_text, reverse=reverse, labelpad=labelpad, hist_kwargs=
hist_kwargs, group=group, var_names=var_names, filter_vars=
filter_vars, coords=coords, divergences=divergences,
divergences_kwargs=divergences_kwargs, labeller=labeller, **
hist2d_kwargs)
<|reserved_special_token_1|>
__all__ = 'corner'
import logging
import numpy as np
from corner.core import corner_impl
try:
from corner.arviz_corner import arviz_corner
except ImportError:
arviz_corner = None
def corner(data, bins=20, *, range=None, axes_scale='linear', weights=None,
color=None, hist_bin_factor=1, smooth=None, smooth1d=None, labels=None,
label_kwargs=None, titles=None, show_titles=False, title_quantiles=None,
title_fmt='.2f', title_kwargs=None, truths=None, truth_color='#4682b4',
scale_hist=False, quantiles=None, verbose=False, fig=None, max_n_ticks=
5, top_ticks=False, use_math_text=False, reverse=False, labelpad=0.0,
hist_kwargs=None, group='posterior', var_names=None, filter_vars=None,
coords=None, divergences=False, divergences_kwargs=None, labeller=None,
**hist2d_kwargs):
"""
Make a *sick* corner plot showing the projections of a data set in a
multi-dimensional space. kwargs are passed to hist2d() or used for
`matplotlib` styling.
Parameters
----------
data : obj
Any object that can be converted to an ``arviz.InferenceData`` object.
Refer to documentation of ``arviz.convert_to_dataset`` for details.
bins : int or array_like[ndim,]
The number of bins to use in histograms, either as a fixed value for
all dimensions, or as a list of integers for each dimension.
group : str
Specifies which InferenceData group should be plotted. Defaults to
``'posterior'``.
var_names : list
Variables to be plotted, if ``None`` all variable are plotted. Prefix
the variables by `~` when you want to exclude them from the plot.
filter_vars : {``None``, ``"like"``, ``"regex"``}
If ``None`` (default), interpret ``var_names`` as the real variables
names. If ``"like"``, interpret ``var_names`` as substrings of the real
variables names. If ``"regex"``, interpret ``var_names`` as regular
expressions on the real variables names. A la ``pandas.filter``.
coords : mapping
Coordinates of ``var_names`` to be plotted. Passed to
``arviz.Dataset.sel``.
divergences : bool
If ``True`` divergences will be plotted in a different color, only if
``group`` is either ``'prior'`` or ``'posterior'``.
divergences_kwargs : dict
Any extra keyword arguments to send to the ``overplot_points`` when
plotting the divergences.
labeller : arviz.Labeller
Class providing the method ``make_label_vert`` to generate the labels
in the plot. Read the ArviZ label guide for more details and usage
examples.
weights : array_like[nsamples,]
The weight of each sample. If `None` (default), samples are given
equal weight.
color : str
A ``matplotlib`` style color for all histograms.
hist_bin_factor : float or array_like[ndim,]
This is a factor (or list of factors, one for each dimension) that
will multiply the bin specifications when making the 1-D histograms.
This is generally used to increase the number of bins in the 1-D plots
to provide more resolution.
smooth, smooth1d : float
The standard deviation for Gaussian kernel passed to
`scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms
respectively. If `None` (default), no smoothing is applied.
labels : iterable (ndim,)
A list of names for the dimensions.
.. deprecated:: 2.2.1
If a ``xs`` is a ``pandas.DataFrame`` *and* ArviZ is installed,
labels will default to column names.
This behavior will be removed in version 3;
either use ArviZ data structures instead or pass
``labels=dataframe.columns`` manually.
label_kwargs : dict
Any extra keyword arguments to send to the `set_xlabel` and
`set_ylabel` methods. Note that passing the `labelpad` keyword
in this dictionary will not have the desired effect. Use the
`labelpad` keyword in this function instead.
titles : iterable (ndim,)
A list of titles for the dimensions. If `None` (default),
uses labels as titles.
show_titles : bool
Displays a title above each 1-D histogram showing the 0.5 quantile
with the upper and lower errors supplied by the quantiles argument.
title_quantiles : iterable
A list of 3 fractional quantiles to show as the the upper and lower
errors. If `None` (default), inherit the values from quantiles, unless
quantiles is `None`, in which case it defaults to [0.16,0.5,0.84]
title_fmt : string
The format string for the quantiles given in titles. If you explicitly
set ``show_titles=True`` and ``title_fmt=None``, the labels will be
shown as the titles. (default: ``.2f``)
title_kwargs : dict
Any extra keyword arguments to send to the `set_title` command.
range : iterable (ndim,)
A list where each element is either a length 2 tuple containing
lower and upper bounds or a float in range (0., 1.)
giving the fraction of samples to include in bounds, e.g.,
[(0.,10.), (1.,5), 0.999, etc.].
If a fraction, the bounds are chosen to be equal-tailed.
axes_scale : str or iterable (ndim,)
Scale (``"linear"``, ``"log"``) to use for each data dimension. If only
one scale is specified, use that for all dimensions.
truths : iterable (ndim,)
A list of reference values to indicate on the plots. Individual
values can be omitted by using ``None``.
truth_color : str
A ``matplotlib`` style color for the ``truths`` makers.
scale_hist : bool
Should the 1-D histograms be scaled in such a way that the zero line
is visible?
quantiles : iterable
A list of fractional quantiles to show on the 1-D histograms as
vertical dashed lines.
verbose : bool
If true, print the values of the computed quantiles.
plot_contours : bool
Draw contours for dense regions of the plot.
use_math_text : bool
If true, then axis tick labels for very large or small exponents will
be displayed as powers of 10 rather than using `e`.
reverse : bool
If true, plot the corner plot starting in the upper-right corner
instead of the usual bottom-left corner
labelpad : float
Padding between the axis and the x- and y-labels in units of the
fraction of the axis from the lower left
max_n_ticks: int
Maximum number of ticks to try to use
top_ticks : bool
If true, label the top ticks of each axis
fig : `~matplotlib.figure.Figure`
Overplot onto the provided figure object, which must either have no
axes yet, or ``ndim * ndim`` axes already present. If not set, the
plot will be drawn on a newly created figure.
hist_kwargs : dict
Any extra keyword arguments to send to the 1-D histogram plots.
**hist2d_kwargs
Any remaining keyword arguments are sent to :func:`corner.hist2d` to
generate the 2-D histogram plots.
Returns
-------
fig : `~matplotlib.figure.Figure`
The ``matplotlib`` figure instance for the corner plot.
"""
if arviz_corner is None:
if not (isinstance(data, np.ndarray) or data.__class__.__name__ ==
'DataFrame'):
raise ImportError(
'Please install arviz or use a numpy array as input')
if (var_names is not None or filter_vars is not None or coords is not
None or divergences or divergences_kwargs is not None or
labeller is not None):
logging.warning(
'Please install arviz to use the advanced features of corner')
return corner_impl(data, bins=bins, range=range, axes_scale=
axes_scale, weights=weights, color=color, hist_bin_factor=
hist_bin_factor, smooth=smooth, smooth1d=smooth1d, labels=
labels, label_kwargs=label_kwargs, titles=titles, show_titles=
show_titles, title_quantiles=title_quantiles, title_fmt=
title_fmt, title_kwargs=title_kwargs, truths=truths,
truth_color=truth_color, scale_hist=scale_hist, quantiles=
quantiles, verbose=verbose, fig=fig, max_n_ticks=max_n_ticks,
top_ticks=top_ticks, use_math_text=use_math_text, reverse=
reverse, labelpad=labelpad, hist_kwargs=hist_kwargs, **
hist2d_kwargs)
return arviz_corner(data, bins=bins, range=range, axes_scale=axes_scale,
weights=weights, color=color, hist_bin_factor=hist_bin_factor,
smooth=smooth, smooth1d=smooth1d, labels=labels, label_kwargs=
label_kwargs, titles=titles, show_titles=show_titles,
title_quantiles=title_quantiles, title_fmt=title_fmt, title_kwargs=
title_kwargs, truths=truths, truth_color=truth_color, scale_hist=
scale_hist, quantiles=quantiles, verbose=verbose, fig=fig,
max_n_ticks=max_n_ticks, top_ticks=top_ticks, use_math_text=
use_math_text, reverse=reverse, labelpad=labelpad, hist_kwargs=
hist_kwargs, group=group, var_names=var_names, filter_vars=
filter_vars, coords=coords, divergences=divergences,
divergences_kwargs=divergences_kwargs, labeller=labeller, **
hist2d_kwargs)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
__all__ = "corner"
import logging
import numpy as np
from corner.core import corner_impl
try:
from corner.arviz_corner import arviz_corner
except ImportError:
arviz_corner = None
def corner(
data,
bins=20,
*,
# Original corner parameters
range=None,
axes_scale="linear",
weights=None,
color=None,
hist_bin_factor=1,
smooth=None,
smooth1d=None,
labels=None,
label_kwargs=None,
titles=None,
show_titles=False,
title_quantiles=None,
title_fmt=".2f",
title_kwargs=None,
truths=None,
truth_color="#4682b4",
scale_hist=False,
quantiles=None,
verbose=False,
fig=None,
max_n_ticks=5,
top_ticks=False,
use_math_text=False,
reverse=False,
labelpad=0.0,
hist_kwargs=None,
# Arviz parameters
group="posterior",
var_names=None,
filter_vars=None,
coords=None,
divergences=False,
divergences_kwargs=None,
labeller=None,
**hist2d_kwargs,
):
"""
Make a *sick* corner plot showing the projections of a data set in a
multi-dimensional space. kwargs are passed to hist2d() or used for
`matplotlib` styling.
Parameters
----------
data : obj
Any object that can be converted to an ``arviz.InferenceData`` object.
Refer to documentation of ``arviz.convert_to_dataset`` for details.
bins : int or array_like[ndim,]
The number of bins to use in histograms, either as a fixed value for
all dimensions, or as a list of integers for each dimension.
group : str
Specifies which InferenceData group should be plotted. Defaults to
``'posterior'``.
var_names : list
Variables to be plotted, if ``None`` all variable are plotted. Prefix
the variables by `~` when you want to exclude them from the plot.
filter_vars : {``None``, ``"like"``, ``"regex"``}
If ``None`` (default), interpret ``var_names`` as the real variables
names. If ``"like"``, interpret ``var_names`` as substrings of the real
variables names. If ``"regex"``, interpret ``var_names`` as regular
expressions on the real variables names. A la ``pandas.filter``.
coords : mapping
Coordinates of ``var_names`` to be plotted. Passed to
``arviz.Dataset.sel``.
divergences : bool
If ``True`` divergences will be plotted in a different color, only if
``group`` is either ``'prior'`` or ``'posterior'``.
divergences_kwargs : dict
Any extra keyword arguments to send to the ``overplot_points`` when
plotting the divergences.
labeller : arviz.Labeller
Class providing the method ``make_label_vert`` to generate the labels
in the plot. Read the ArviZ label guide for more details and usage
examples.
weights : array_like[nsamples,]
The weight of each sample. If `None` (default), samples are given
equal weight.
color : str
A ``matplotlib`` style color for all histograms.
hist_bin_factor : float or array_like[ndim,]
This is a factor (or list of factors, one for each dimension) that
will multiply the bin specifications when making the 1-D histograms.
This is generally used to increase the number of bins in the 1-D plots
to provide more resolution.
smooth, smooth1d : float
The standard deviation for Gaussian kernel passed to
`scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms
respectively. If `None` (default), no smoothing is applied.
labels : iterable (ndim,)
A list of names for the dimensions.
.. deprecated:: 2.2.1
If a ``xs`` is a ``pandas.DataFrame`` *and* ArviZ is installed,
labels will default to column names.
This behavior will be removed in version 3;
either use ArviZ data structures instead or pass
``labels=dataframe.columns`` manually.
label_kwargs : dict
Any extra keyword arguments to send to the `set_xlabel` and
`set_ylabel` methods. Note that passing the `labelpad` keyword
in this dictionary will not have the desired effect. Use the
`labelpad` keyword in this function instead.
titles : iterable (ndim,)
A list of titles for the dimensions. If `None` (default),
uses labels as titles.
show_titles : bool
Displays a title above each 1-D histogram showing the 0.5 quantile
with the upper and lower errors supplied by the quantiles argument.
title_quantiles : iterable
A list of 3 fractional quantiles to show as the the upper and lower
errors. If `None` (default), inherit the values from quantiles, unless
quantiles is `None`, in which case it defaults to [0.16,0.5,0.84]
title_fmt : string
The format string for the quantiles given in titles. If you explicitly
set ``show_titles=True`` and ``title_fmt=None``, the labels will be
shown as the titles. (default: ``.2f``)
title_kwargs : dict
Any extra keyword arguments to send to the `set_title` command.
range : iterable (ndim,)
A list where each element is either a length 2 tuple containing
lower and upper bounds or a float in range (0., 1.)
giving the fraction of samples to include in bounds, e.g.,
[(0.,10.), (1.,5), 0.999, etc.].
If a fraction, the bounds are chosen to be equal-tailed.
axes_scale : str or iterable (ndim,)
Scale (``"linear"``, ``"log"``) to use for each data dimension. If only
one scale is specified, use that for all dimensions.
truths : iterable (ndim,)
A list of reference values to indicate on the plots. Individual
values can be omitted by using ``None``.
truth_color : str
A ``matplotlib`` style color for the ``truths`` makers.
scale_hist : bool
Should the 1-D histograms be scaled in such a way that the zero line
is visible?
quantiles : iterable
A list of fractional quantiles to show on the 1-D histograms as
vertical dashed lines.
verbose : bool
If true, print the values of the computed quantiles.
plot_contours : bool
Draw contours for dense regions of the plot.
use_math_text : bool
If true, then axis tick labels for very large or small exponents will
be displayed as powers of 10 rather than using `e`.
reverse : bool
If true, plot the corner plot starting in the upper-right corner
instead of the usual bottom-left corner
labelpad : float
Padding between the axis and the x- and y-labels in units of the
fraction of the axis from the lower left
max_n_ticks: int
Maximum number of ticks to try to use
top_ticks : bool
If true, label the top ticks of each axis
fig : `~matplotlib.figure.Figure`
Overplot onto the provided figure object, which must either have no
axes yet, or ``ndim * ndim`` axes already present. If not set, the
plot will be drawn on a newly created figure.
hist_kwargs : dict
Any extra keyword arguments to send to the 1-D histogram plots.
**hist2d_kwargs
Any remaining keyword arguments are sent to :func:`corner.hist2d` to
generate the 2-D histogram plots.
Returns
-------
fig : `~matplotlib.figure.Figure`
The ``matplotlib`` figure instance for the corner plot.
"""
if arviz_corner is None:
if not (
isinstance(data, np.ndarray)
or data.__class__.__name__ == "DataFrame"
):
raise ImportError(
"Please install arviz or use a numpy array as input"
)
if (
var_names is not None
or filter_vars is not None
or coords is not None
or divergences
or divergences_kwargs is not None
or labeller is not None
):
logging.warning(
"Please install arviz to use the advanced features of corner"
)
return corner_impl(
data,
bins=bins,
range=range,
axes_scale=axes_scale,
weights=weights,
color=color,
hist_bin_factor=hist_bin_factor,
smooth=smooth,
smooth1d=smooth1d,
labels=labels,
label_kwargs=label_kwargs,
titles=titles,
show_titles=show_titles,
title_quantiles=title_quantiles,
title_fmt=title_fmt,
title_kwargs=title_kwargs,
truths=truths,
truth_color=truth_color,
scale_hist=scale_hist,
quantiles=quantiles,
verbose=verbose,
fig=fig,
max_n_ticks=max_n_ticks,
top_ticks=top_ticks,
use_math_text=use_math_text,
reverse=reverse,
labelpad=labelpad,
hist_kwargs=hist_kwargs,
**hist2d_kwargs,
)
return arviz_corner(
data,
bins=bins,
range=range,
axes_scale=axes_scale,
weights=weights,
color=color,
hist_bin_factor=hist_bin_factor,
smooth=smooth,
smooth1d=smooth1d,
labels=labels,
label_kwargs=label_kwargs,
titles=titles,
show_titles=show_titles,
title_quantiles=title_quantiles,
title_fmt=title_fmt,
title_kwargs=title_kwargs,
truths=truths,
truth_color=truth_color,
scale_hist=scale_hist,
quantiles=quantiles,
verbose=verbose,
fig=fig,
max_n_ticks=max_n_ticks,
top_ticks=top_ticks,
use_math_text=use_math_text,
reverse=reverse,
labelpad=labelpad,
hist_kwargs=hist_kwargs,
group=group,
var_names=var_names,
filter_vars=filter_vars,
coords=coords,
divergences=divergences,
divergences_kwargs=divergences_kwargs,
labeller=labeller,
**hist2d_kwargs,
)
|
flexible
|
{
"blob_id": "ae998fb17b8d6f4f5c8871a0ebe86a039501ec99",
"index": 5959,
"step-1": "<mask token>\n\n\ndef corner(data, bins=20, *, range=None, axes_scale='linear', weights=None,\n color=None, hist_bin_factor=1, smooth=None, smooth1d=None, labels=None,\n label_kwargs=None, titles=None, show_titles=False, title_quantiles=None,\n title_fmt='.2f', title_kwargs=None, truths=None, truth_color='#4682b4',\n scale_hist=False, quantiles=None, verbose=False, fig=None, max_n_ticks=\n 5, top_ticks=False, use_math_text=False, reverse=False, labelpad=0.0,\n hist_kwargs=None, group='posterior', var_names=None, filter_vars=None,\n coords=None, divergences=False, divergences_kwargs=None, labeller=None,\n **hist2d_kwargs):\n \"\"\"\n Make a *sick* corner plot showing the projections of a data set in a\n multi-dimensional space. kwargs are passed to hist2d() or used for\n `matplotlib` styling.\n\n Parameters\n ----------\n data : obj\n Any object that can be converted to an ``arviz.InferenceData`` object.\n Refer to documentation of ``arviz.convert_to_dataset`` for details.\n\n bins : int or array_like[ndim,]\n The number of bins to use in histograms, either as a fixed value for\n all dimensions, or as a list of integers for each dimension.\n\n group : str\n Specifies which InferenceData group should be plotted. Defaults to\n ``'posterior'``.\n\n var_names : list\n Variables to be plotted, if ``None`` all variable are plotted. Prefix\n the variables by `~` when you want to exclude them from the plot.\n\n filter_vars : {``None``, ``\"like\"``, ``\"regex\"``}\n If ``None`` (default), interpret ``var_names`` as the real variables\n names. If ``\"like\"``, interpret ``var_names`` as substrings of the real\n variables names. If ``\"regex\"``, interpret ``var_names`` as regular\n expressions on the real variables names. A la ``pandas.filter``.\n\n coords : mapping\n Coordinates of ``var_names`` to be plotted. Passed to\n ``arviz.Dataset.sel``.\n\n divergences : bool\n If ``True`` divergences will be plotted in a different color, only if\n ``group`` is either ``'prior'`` or ``'posterior'``.\n\n divergences_kwargs : dict\n Any extra keyword arguments to send to the ``overplot_points`` when\n plotting the divergences.\n\n labeller : arviz.Labeller\n Class providing the method ``make_label_vert`` to generate the labels\n in the plot. Read the ArviZ label guide for more details and usage\n examples.\n\n weights : array_like[nsamples,]\n The weight of each sample. If `None` (default), samples are given\n equal weight.\n\n color : str\n A ``matplotlib`` style color for all histograms.\n\n hist_bin_factor : float or array_like[ndim,]\n This is a factor (or list of factors, one for each dimension) that\n will multiply the bin specifications when making the 1-D histograms.\n This is generally used to increase the number of bins in the 1-D plots\n to provide more resolution.\n\n smooth, smooth1d : float\n The standard deviation for Gaussian kernel passed to\n `scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms\n respectively. If `None` (default), no smoothing is applied.\n\n labels : iterable (ndim,)\n A list of names for the dimensions.\n\n .. deprecated:: 2.2.1\n If a ``xs`` is a ``pandas.DataFrame`` *and* ArviZ is installed,\n labels will default to column names.\n This behavior will be removed in version 3;\n either use ArviZ data structures instead or pass\n ``labels=dataframe.columns`` manually.\n\n label_kwargs : dict\n Any extra keyword arguments to send to the `set_xlabel` and\n `set_ylabel` methods. Note that passing the `labelpad` keyword\n in this dictionary will not have the desired effect. Use the\n `labelpad` keyword in this function instead.\n\n titles : iterable (ndim,)\n A list of titles for the dimensions. If `None` (default),\n uses labels as titles.\n\n show_titles : bool\n Displays a title above each 1-D histogram showing the 0.5 quantile\n with the upper and lower errors supplied by the quantiles argument.\n\n title_quantiles : iterable\n A list of 3 fractional quantiles to show as the the upper and lower\n errors. If `None` (default), inherit the values from quantiles, unless\n quantiles is `None`, in which case it defaults to [0.16,0.5,0.84]\n\n title_fmt : string\n The format string for the quantiles given in titles. If you explicitly\n set ``show_titles=True`` and ``title_fmt=None``, the labels will be\n shown as the titles. (default: ``.2f``)\n\n title_kwargs : dict\n Any extra keyword arguments to send to the `set_title` command.\n\n range : iterable (ndim,)\n A list where each element is either a length 2 tuple containing\n lower and upper bounds or a float in range (0., 1.)\n giving the fraction of samples to include in bounds, e.g.,\n [(0.,10.), (1.,5), 0.999, etc.].\n If a fraction, the bounds are chosen to be equal-tailed.\n\n axes_scale : str or iterable (ndim,)\n Scale (``\"linear\"``, ``\"log\"``) to use for each data dimension. If only\n one scale is specified, use that for all dimensions.\n\n truths : iterable (ndim,)\n A list of reference values to indicate on the plots. Individual\n values can be omitted by using ``None``.\n\n truth_color : str\n A ``matplotlib`` style color for the ``truths`` makers.\n\n scale_hist : bool\n Should the 1-D histograms be scaled in such a way that the zero line\n is visible?\n\n quantiles : iterable\n A list of fractional quantiles to show on the 1-D histograms as\n vertical dashed lines.\n\n verbose : bool\n If true, print the values of the computed quantiles.\n\n plot_contours : bool\n Draw contours for dense regions of the plot.\n\n use_math_text : bool\n If true, then axis tick labels for very large or small exponents will\n be displayed as powers of 10 rather than using `e`.\n\n reverse : bool\n If true, plot the corner plot starting in the upper-right corner\n instead of the usual bottom-left corner\n\n labelpad : float\n Padding between the axis and the x- and y-labels in units of the\n fraction of the axis from the lower left\n\n max_n_ticks: int\n Maximum number of ticks to try to use\n\n top_ticks : bool\n If true, label the top ticks of each axis\n\n fig : `~matplotlib.figure.Figure`\n Overplot onto the provided figure object, which must either have no\n axes yet, or ``ndim * ndim`` axes already present. If not set, the\n plot will be drawn on a newly created figure.\n\n hist_kwargs : dict\n Any extra keyword arguments to send to the 1-D histogram plots.\n\n **hist2d_kwargs\n Any remaining keyword arguments are sent to :func:`corner.hist2d` to\n generate the 2-D histogram plots.\n\n Returns\n -------\n fig : `~matplotlib.figure.Figure`\n The ``matplotlib`` figure instance for the corner plot.\n\n \"\"\"\n if arviz_corner is None:\n if not (isinstance(data, np.ndarray) or data.__class__.__name__ ==\n 'DataFrame'):\n raise ImportError(\n 'Please install arviz or use a numpy array as input')\n if (var_names is not None or filter_vars is not None or coords is not\n None or divergences or divergences_kwargs is not None or \n labeller is not None):\n logging.warning(\n 'Please install arviz to use the advanced features of corner')\n return corner_impl(data, bins=bins, range=range, axes_scale=\n axes_scale, weights=weights, color=color, hist_bin_factor=\n hist_bin_factor, smooth=smooth, smooth1d=smooth1d, labels=\n labels, label_kwargs=label_kwargs, titles=titles, show_titles=\n show_titles, title_quantiles=title_quantiles, title_fmt=\n title_fmt, title_kwargs=title_kwargs, truths=truths,\n truth_color=truth_color, scale_hist=scale_hist, quantiles=\n quantiles, verbose=verbose, fig=fig, max_n_ticks=max_n_ticks,\n top_ticks=top_ticks, use_math_text=use_math_text, reverse=\n reverse, labelpad=labelpad, hist_kwargs=hist_kwargs, **\n hist2d_kwargs)\n return arviz_corner(data, bins=bins, range=range, axes_scale=axes_scale,\n weights=weights, color=color, hist_bin_factor=hist_bin_factor,\n smooth=smooth, smooth1d=smooth1d, labels=labels, label_kwargs=\n label_kwargs, titles=titles, show_titles=show_titles,\n title_quantiles=title_quantiles, title_fmt=title_fmt, title_kwargs=\n title_kwargs, truths=truths, truth_color=truth_color, scale_hist=\n scale_hist, quantiles=quantiles, verbose=verbose, fig=fig,\n max_n_ticks=max_n_ticks, top_ticks=top_ticks, use_math_text=\n use_math_text, reverse=reverse, labelpad=labelpad, hist_kwargs=\n hist_kwargs, group=group, var_names=var_names, filter_vars=\n filter_vars, coords=coords, divergences=divergences,\n divergences_kwargs=divergences_kwargs, labeller=labeller, **\n hist2d_kwargs)\n",
"step-2": "<mask token>\ntry:\n from corner.arviz_corner import arviz_corner\nexcept ImportError:\n arviz_corner = None\n\n\ndef corner(data, bins=20, *, range=None, axes_scale='linear', weights=None,\n color=None, hist_bin_factor=1, smooth=None, smooth1d=None, labels=None,\n label_kwargs=None, titles=None, show_titles=False, title_quantiles=None,\n title_fmt='.2f', title_kwargs=None, truths=None, truth_color='#4682b4',\n scale_hist=False, quantiles=None, verbose=False, fig=None, max_n_ticks=\n 5, top_ticks=False, use_math_text=False, reverse=False, labelpad=0.0,\n hist_kwargs=None, group='posterior', var_names=None, filter_vars=None,\n coords=None, divergences=False, divergences_kwargs=None, labeller=None,\n **hist2d_kwargs):\n \"\"\"\n Make a *sick* corner plot showing the projections of a data set in a\n multi-dimensional space. kwargs are passed to hist2d() or used for\n `matplotlib` styling.\n\n Parameters\n ----------\n data : obj\n Any object that can be converted to an ``arviz.InferenceData`` object.\n Refer to documentation of ``arviz.convert_to_dataset`` for details.\n\n bins : int or array_like[ndim,]\n The number of bins to use in histograms, either as a fixed value for\n all dimensions, or as a list of integers for each dimension.\n\n group : str\n Specifies which InferenceData group should be plotted. Defaults to\n ``'posterior'``.\n\n var_names : list\n Variables to be plotted, if ``None`` all variable are plotted. Prefix\n the variables by `~` when you want to exclude them from the plot.\n\n filter_vars : {``None``, ``\"like\"``, ``\"regex\"``}\n If ``None`` (default), interpret ``var_names`` as the real variables\n names. If ``\"like\"``, interpret ``var_names`` as substrings of the real\n variables names. If ``\"regex\"``, interpret ``var_names`` as regular\n expressions on the real variables names. A la ``pandas.filter``.\n\n coords : mapping\n Coordinates of ``var_names`` to be plotted. Passed to\n ``arviz.Dataset.sel``.\n\n divergences : bool\n If ``True`` divergences will be plotted in a different color, only if\n ``group`` is either ``'prior'`` or ``'posterior'``.\n\n divergences_kwargs : dict\n Any extra keyword arguments to send to the ``overplot_points`` when\n plotting the divergences.\n\n labeller : arviz.Labeller\n Class providing the method ``make_label_vert`` to generate the labels\n in the plot. Read the ArviZ label guide for more details and usage\n examples.\n\n weights : array_like[nsamples,]\n The weight of each sample. If `None` (default), samples are given\n equal weight.\n\n color : str\n A ``matplotlib`` style color for all histograms.\n\n hist_bin_factor : float or array_like[ndim,]\n This is a factor (or list of factors, one for each dimension) that\n will multiply the bin specifications when making the 1-D histograms.\n This is generally used to increase the number of bins in the 1-D plots\n to provide more resolution.\n\n smooth, smooth1d : float\n The standard deviation for Gaussian kernel passed to\n `scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms\n respectively. If `None` (default), no smoothing is applied.\n\n labels : iterable (ndim,)\n A list of names for the dimensions.\n\n .. deprecated:: 2.2.1\n If a ``xs`` is a ``pandas.DataFrame`` *and* ArviZ is installed,\n labels will default to column names.\n This behavior will be removed in version 3;\n either use ArviZ data structures instead or pass\n ``labels=dataframe.columns`` manually.\n\n label_kwargs : dict\n Any extra keyword arguments to send to the `set_xlabel` and\n `set_ylabel` methods. Note that passing the `labelpad` keyword\n in this dictionary will not have the desired effect. Use the\n `labelpad` keyword in this function instead.\n\n titles : iterable (ndim,)\n A list of titles for the dimensions. If `None` (default),\n uses labels as titles.\n\n show_titles : bool\n Displays a title above each 1-D histogram showing the 0.5 quantile\n with the upper and lower errors supplied by the quantiles argument.\n\n title_quantiles : iterable\n A list of 3 fractional quantiles to show as the the upper and lower\n errors. If `None` (default), inherit the values from quantiles, unless\n quantiles is `None`, in which case it defaults to [0.16,0.5,0.84]\n\n title_fmt : string\n The format string for the quantiles given in titles. If you explicitly\n set ``show_titles=True`` and ``title_fmt=None``, the labels will be\n shown as the titles. (default: ``.2f``)\n\n title_kwargs : dict\n Any extra keyword arguments to send to the `set_title` command.\n\n range : iterable (ndim,)\n A list where each element is either a length 2 tuple containing\n lower and upper bounds or a float in range (0., 1.)\n giving the fraction of samples to include in bounds, e.g.,\n [(0.,10.), (1.,5), 0.999, etc.].\n If a fraction, the bounds are chosen to be equal-tailed.\n\n axes_scale : str or iterable (ndim,)\n Scale (``\"linear\"``, ``\"log\"``) to use for each data dimension. If only\n one scale is specified, use that for all dimensions.\n\n truths : iterable (ndim,)\n A list of reference values to indicate on the plots. Individual\n values can be omitted by using ``None``.\n\n truth_color : str\n A ``matplotlib`` style color for the ``truths`` makers.\n\n scale_hist : bool\n Should the 1-D histograms be scaled in such a way that the zero line\n is visible?\n\n quantiles : iterable\n A list of fractional quantiles to show on the 1-D histograms as\n vertical dashed lines.\n\n verbose : bool\n If true, print the values of the computed quantiles.\n\n plot_contours : bool\n Draw contours for dense regions of the plot.\n\n use_math_text : bool\n If true, then axis tick labels for very large or small exponents will\n be displayed as powers of 10 rather than using `e`.\n\n reverse : bool\n If true, plot the corner plot starting in the upper-right corner\n instead of the usual bottom-left corner\n\n labelpad : float\n Padding between the axis and the x- and y-labels in units of the\n fraction of the axis from the lower left\n\n max_n_ticks: int\n Maximum number of ticks to try to use\n\n top_ticks : bool\n If true, label the top ticks of each axis\n\n fig : `~matplotlib.figure.Figure`\n Overplot onto the provided figure object, which must either have no\n axes yet, or ``ndim * ndim`` axes already present. If not set, the\n plot will be drawn on a newly created figure.\n\n hist_kwargs : dict\n Any extra keyword arguments to send to the 1-D histogram plots.\n\n **hist2d_kwargs\n Any remaining keyword arguments are sent to :func:`corner.hist2d` to\n generate the 2-D histogram plots.\n\n Returns\n -------\n fig : `~matplotlib.figure.Figure`\n The ``matplotlib`` figure instance for the corner plot.\n\n \"\"\"\n if arviz_corner is None:\n if not (isinstance(data, np.ndarray) or data.__class__.__name__ ==\n 'DataFrame'):\n raise ImportError(\n 'Please install arviz or use a numpy array as input')\n if (var_names is not None or filter_vars is not None or coords is not\n None or divergences or divergences_kwargs is not None or \n labeller is not None):\n logging.warning(\n 'Please install arviz to use the advanced features of corner')\n return corner_impl(data, bins=bins, range=range, axes_scale=\n axes_scale, weights=weights, color=color, hist_bin_factor=\n hist_bin_factor, smooth=smooth, smooth1d=smooth1d, labels=\n labels, label_kwargs=label_kwargs, titles=titles, show_titles=\n show_titles, title_quantiles=title_quantiles, title_fmt=\n title_fmt, title_kwargs=title_kwargs, truths=truths,\n truth_color=truth_color, scale_hist=scale_hist, quantiles=\n quantiles, verbose=verbose, fig=fig, max_n_ticks=max_n_ticks,\n top_ticks=top_ticks, use_math_text=use_math_text, reverse=\n reverse, labelpad=labelpad, hist_kwargs=hist_kwargs, **\n hist2d_kwargs)\n return arviz_corner(data, bins=bins, range=range, axes_scale=axes_scale,\n weights=weights, color=color, hist_bin_factor=hist_bin_factor,\n smooth=smooth, smooth1d=smooth1d, labels=labels, label_kwargs=\n label_kwargs, titles=titles, show_titles=show_titles,\n title_quantiles=title_quantiles, title_fmt=title_fmt, title_kwargs=\n title_kwargs, truths=truths, truth_color=truth_color, scale_hist=\n scale_hist, quantiles=quantiles, verbose=verbose, fig=fig,\n max_n_ticks=max_n_ticks, top_ticks=top_ticks, use_math_text=\n use_math_text, reverse=reverse, labelpad=labelpad, hist_kwargs=\n hist_kwargs, group=group, var_names=var_names, filter_vars=\n filter_vars, coords=coords, divergences=divergences,\n divergences_kwargs=divergences_kwargs, labeller=labeller, **\n hist2d_kwargs)\n",
"step-3": "__all__ = 'corner'\n<mask token>\ntry:\n from corner.arviz_corner import arviz_corner\nexcept ImportError:\n arviz_corner = None\n\n\ndef corner(data, bins=20, *, range=None, axes_scale='linear', weights=None,\n color=None, hist_bin_factor=1, smooth=None, smooth1d=None, labels=None,\n label_kwargs=None, titles=None, show_titles=False, title_quantiles=None,\n title_fmt='.2f', title_kwargs=None, truths=None, truth_color='#4682b4',\n scale_hist=False, quantiles=None, verbose=False, fig=None, max_n_ticks=\n 5, top_ticks=False, use_math_text=False, reverse=False, labelpad=0.0,\n hist_kwargs=None, group='posterior', var_names=None, filter_vars=None,\n coords=None, divergences=False, divergences_kwargs=None, labeller=None,\n **hist2d_kwargs):\n \"\"\"\n Make a *sick* corner plot showing the projections of a data set in a\n multi-dimensional space. kwargs are passed to hist2d() or used for\n `matplotlib` styling.\n\n Parameters\n ----------\n data : obj\n Any object that can be converted to an ``arviz.InferenceData`` object.\n Refer to documentation of ``arviz.convert_to_dataset`` for details.\n\n bins : int or array_like[ndim,]\n The number of bins to use in histograms, either as a fixed value for\n all dimensions, or as a list of integers for each dimension.\n\n group : str\n Specifies which InferenceData group should be plotted. Defaults to\n ``'posterior'``.\n\n var_names : list\n Variables to be plotted, if ``None`` all variable are plotted. Prefix\n the variables by `~` when you want to exclude them from the plot.\n\n filter_vars : {``None``, ``\"like\"``, ``\"regex\"``}\n If ``None`` (default), interpret ``var_names`` as the real variables\n names. If ``\"like\"``, interpret ``var_names`` as substrings of the real\n variables names. If ``\"regex\"``, interpret ``var_names`` as regular\n expressions on the real variables names. A la ``pandas.filter``.\n\n coords : mapping\n Coordinates of ``var_names`` to be plotted. Passed to\n ``arviz.Dataset.sel``.\n\n divergences : bool\n If ``True`` divergences will be plotted in a different color, only if\n ``group`` is either ``'prior'`` or ``'posterior'``.\n\n divergences_kwargs : dict\n Any extra keyword arguments to send to the ``overplot_points`` when\n plotting the divergences.\n\n labeller : arviz.Labeller\n Class providing the method ``make_label_vert`` to generate the labels\n in the plot. Read the ArviZ label guide for more details and usage\n examples.\n\n weights : array_like[nsamples,]\n The weight of each sample. If `None` (default), samples are given\n equal weight.\n\n color : str\n A ``matplotlib`` style color for all histograms.\n\n hist_bin_factor : float or array_like[ndim,]\n This is a factor (or list of factors, one for each dimension) that\n will multiply the bin specifications when making the 1-D histograms.\n This is generally used to increase the number of bins in the 1-D plots\n to provide more resolution.\n\n smooth, smooth1d : float\n The standard deviation for Gaussian kernel passed to\n `scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms\n respectively. If `None` (default), no smoothing is applied.\n\n labels : iterable (ndim,)\n A list of names for the dimensions.\n\n .. deprecated:: 2.2.1\n If a ``xs`` is a ``pandas.DataFrame`` *and* ArviZ is installed,\n labels will default to column names.\n This behavior will be removed in version 3;\n either use ArviZ data structures instead or pass\n ``labels=dataframe.columns`` manually.\n\n label_kwargs : dict\n Any extra keyword arguments to send to the `set_xlabel` and\n `set_ylabel` methods. Note that passing the `labelpad` keyword\n in this dictionary will not have the desired effect. Use the\n `labelpad` keyword in this function instead.\n\n titles : iterable (ndim,)\n A list of titles for the dimensions. If `None` (default),\n uses labels as titles.\n\n show_titles : bool\n Displays a title above each 1-D histogram showing the 0.5 quantile\n with the upper and lower errors supplied by the quantiles argument.\n\n title_quantiles : iterable\n A list of 3 fractional quantiles to show as the the upper and lower\n errors. If `None` (default), inherit the values from quantiles, unless\n quantiles is `None`, in which case it defaults to [0.16,0.5,0.84]\n\n title_fmt : string\n The format string for the quantiles given in titles. If you explicitly\n set ``show_titles=True`` and ``title_fmt=None``, the labels will be\n shown as the titles. (default: ``.2f``)\n\n title_kwargs : dict\n Any extra keyword arguments to send to the `set_title` command.\n\n range : iterable (ndim,)\n A list where each element is either a length 2 tuple containing\n lower and upper bounds or a float in range (0., 1.)\n giving the fraction of samples to include in bounds, e.g.,\n [(0.,10.), (1.,5), 0.999, etc.].\n If a fraction, the bounds are chosen to be equal-tailed.\n\n axes_scale : str or iterable (ndim,)\n Scale (``\"linear\"``, ``\"log\"``) to use for each data dimension. If only\n one scale is specified, use that for all dimensions.\n\n truths : iterable (ndim,)\n A list of reference values to indicate on the plots. Individual\n values can be omitted by using ``None``.\n\n truth_color : str\n A ``matplotlib`` style color for the ``truths`` makers.\n\n scale_hist : bool\n Should the 1-D histograms be scaled in such a way that the zero line\n is visible?\n\n quantiles : iterable\n A list of fractional quantiles to show on the 1-D histograms as\n vertical dashed lines.\n\n verbose : bool\n If true, print the values of the computed quantiles.\n\n plot_contours : bool\n Draw contours for dense regions of the plot.\n\n use_math_text : bool\n If true, then axis tick labels for very large or small exponents will\n be displayed as powers of 10 rather than using `e`.\n\n reverse : bool\n If true, plot the corner plot starting in the upper-right corner\n instead of the usual bottom-left corner\n\n labelpad : float\n Padding between the axis and the x- and y-labels in units of the\n fraction of the axis from the lower left\n\n max_n_ticks: int\n Maximum number of ticks to try to use\n\n top_ticks : bool\n If true, label the top ticks of each axis\n\n fig : `~matplotlib.figure.Figure`\n Overplot onto the provided figure object, which must either have no\n axes yet, or ``ndim * ndim`` axes already present. If not set, the\n plot will be drawn on a newly created figure.\n\n hist_kwargs : dict\n Any extra keyword arguments to send to the 1-D histogram plots.\n\n **hist2d_kwargs\n Any remaining keyword arguments are sent to :func:`corner.hist2d` to\n generate the 2-D histogram plots.\n\n Returns\n -------\n fig : `~matplotlib.figure.Figure`\n The ``matplotlib`` figure instance for the corner plot.\n\n \"\"\"\n if arviz_corner is None:\n if not (isinstance(data, np.ndarray) or data.__class__.__name__ ==\n 'DataFrame'):\n raise ImportError(\n 'Please install arviz or use a numpy array as input')\n if (var_names is not None or filter_vars is not None or coords is not\n None or divergences or divergences_kwargs is not None or \n labeller is not None):\n logging.warning(\n 'Please install arviz to use the advanced features of corner')\n return corner_impl(data, bins=bins, range=range, axes_scale=\n axes_scale, weights=weights, color=color, hist_bin_factor=\n hist_bin_factor, smooth=smooth, smooth1d=smooth1d, labels=\n labels, label_kwargs=label_kwargs, titles=titles, show_titles=\n show_titles, title_quantiles=title_quantiles, title_fmt=\n title_fmt, title_kwargs=title_kwargs, truths=truths,\n truth_color=truth_color, scale_hist=scale_hist, quantiles=\n quantiles, verbose=verbose, fig=fig, max_n_ticks=max_n_ticks,\n top_ticks=top_ticks, use_math_text=use_math_text, reverse=\n reverse, labelpad=labelpad, hist_kwargs=hist_kwargs, **\n hist2d_kwargs)\n return arviz_corner(data, bins=bins, range=range, axes_scale=axes_scale,\n weights=weights, color=color, hist_bin_factor=hist_bin_factor,\n smooth=smooth, smooth1d=smooth1d, labels=labels, label_kwargs=\n label_kwargs, titles=titles, show_titles=show_titles,\n title_quantiles=title_quantiles, title_fmt=title_fmt, title_kwargs=\n title_kwargs, truths=truths, truth_color=truth_color, scale_hist=\n scale_hist, quantiles=quantiles, verbose=verbose, fig=fig,\n max_n_ticks=max_n_ticks, top_ticks=top_ticks, use_math_text=\n use_math_text, reverse=reverse, labelpad=labelpad, hist_kwargs=\n hist_kwargs, group=group, var_names=var_names, filter_vars=\n filter_vars, coords=coords, divergences=divergences,\n divergences_kwargs=divergences_kwargs, labeller=labeller, **\n hist2d_kwargs)\n",
"step-4": "__all__ = 'corner'\nimport logging\nimport numpy as np\nfrom corner.core import corner_impl\ntry:\n from corner.arviz_corner import arviz_corner\nexcept ImportError:\n arviz_corner = None\n\n\ndef corner(data, bins=20, *, range=None, axes_scale='linear', weights=None,\n color=None, hist_bin_factor=1, smooth=None, smooth1d=None, labels=None,\n label_kwargs=None, titles=None, show_titles=False, title_quantiles=None,\n title_fmt='.2f', title_kwargs=None, truths=None, truth_color='#4682b4',\n scale_hist=False, quantiles=None, verbose=False, fig=None, max_n_ticks=\n 5, top_ticks=False, use_math_text=False, reverse=False, labelpad=0.0,\n hist_kwargs=None, group='posterior', var_names=None, filter_vars=None,\n coords=None, divergences=False, divergences_kwargs=None, labeller=None,\n **hist2d_kwargs):\n \"\"\"\n Make a *sick* corner plot showing the projections of a data set in a\n multi-dimensional space. kwargs are passed to hist2d() or used for\n `matplotlib` styling.\n\n Parameters\n ----------\n data : obj\n Any object that can be converted to an ``arviz.InferenceData`` object.\n Refer to documentation of ``arviz.convert_to_dataset`` for details.\n\n bins : int or array_like[ndim,]\n The number of bins to use in histograms, either as a fixed value for\n all dimensions, or as a list of integers for each dimension.\n\n group : str\n Specifies which InferenceData group should be plotted. Defaults to\n ``'posterior'``.\n\n var_names : list\n Variables to be plotted, if ``None`` all variable are plotted. Prefix\n the variables by `~` when you want to exclude them from the plot.\n\n filter_vars : {``None``, ``\"like\"``, ``\"regex\"``}\n If ``None`` (default), interpret ``var_names`` as the real variables\n names. If ``\"like\"``, interpret ``var_names`` as substrings of the real\n variables names. If ``\"regex\"``, interpret ``var_names`` as regular\n expressions on the real variables names. A la ``pandas.filter``.\n\n coords : mapping\n Coordinates of ``var_names`` to be plotted. Passed to\n ``arviz.Dataset.sel``.\n\n divergences : bool\n If ``True`` divergences will be plotted in a different color, only if\n ``group`` is either ``'prior'`` or ``'posterior'``.\n\n divergences_kwargs : dict\n Any extra keyword arguments to send to the ``overplot_points`` when\n plotting the divergences.\n\n labeller : arviz.Labeller\n Class providing the method ``make_label_vert`` to generate the labels\n in the plot. Read the ArviZ label guide for more details and usage\n examples.\n\n weights : array_like[nsamples,]\n The weight of each sample. If `None` (default), samples are given\n equal weight.\n\n color : str\n A ``matplotlib`` style color for all histograms.\n\n hist_bin_factor : float or array_like[ndim,]\n This is a factor (or list of factors, one for each dimension) that\n will multiply the bin specifications when making the 1-D histograms.\n This is generally used to increase the number of bins in the 1-D plots\n to provide more resolution.\n\n smooth, smooth1d : float\n The standard deviation for Gaussian kernel passed to\n `scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms\n respectively. If `None` (default), no smoothing is applied.\n\n labels : iterable (ndim,)\n A list of names for the dimensions.\n\n .. deprecated:: 2.2.1\n If a ``xs`` is a ``pandas.DataFrame`` *and* ArviZ is installed,\n labels will default to column names.\n This behavior will be removed in version 3;\n either use ArviZ data structures instead or pass\n ``labels=dataframe.columns`` manually.\n\n label_kwargs : dict\n Any extra keyword arguments to send to the `set_xlabel` and\n `set_ylabel` methods. Note that passing the `labelpad` keyword\n in this dictionary will not have the desired effect. Use the\n `labelpad` keyword in this function instead.\n\n titles : iterable (ndim,)\n A list of titles for the dimensions. If `None` (default),\n uses labels as titles.\n\n show_titles : bool\n Displays a title above each 1-D histogram showing the 0.5 quantile\n with the upper and lower errors supplied by the quantiles argument.\n\n title_quantiles : iterable\n A list of 3 fractional quantiles to show as the the upper and lower\n errors. If `None` (default), inherit the values from quantiles, unless\n quantiles is `None`, in which case it defaults to [0.16,0.5,0.84]\n\n title_fmt : string\n The format string for the quantiles given in titles. If you explicitly\n set ``show_titles=True`` and ``title_fmt=None``, the labels will be\n shown as the titles. (default: ``.2f``)\n\n title_kwargs : dict\n Any extra keyword arguments to send to the `set_title` command.\n\n range : iterable (ndim,)\n A list where each element is either a length 2 tuple containing\n lower and upper bounds or a float in range (0., 1.)\n giving the fraction of samples to include in bounds, e.g.,\n [(0.,10.), (1.,5), 0.999, etc.].\n If a fraction, the bounds are chosen to be equal-tailed.\n\n axes_scale : str or iterable (ndim,)\n Scale (``\"linear\"``, ``\"log\"``) to use for each data dimension. If only\n one scale is specified, use that for all dimensions.\n\n truths : iterable (ndim,)\n A list of reference values to indicate on the plots. Individual\n values can be omitted by using ``None``.\n\n truth_color : str\n A ``matplotlib`` style color for the ``truths`` makers.\n\n scale_hist : bool\n Should the 1-D histograms be scaled in such a way that the zero line\n is visible?\n\n quantiles : iterable\n A list of fractional quantiles to show on the 1-D histograms as\n vertical dashed lines.\n\n verbose : bool\n If true, print the values of the computed quantiles.\n\n plot_contours : bool\n Draw contours for dense regions of the plot.\n\n use_math_text : bool\n If true, then axis tick labels for very large or small exponents will\n be displayed as powers of 10 rather than using `e`.\n\n reverse : bool\n If true, plot the corner plot starting in the upper-right corner\n instead of the usual bottom-left corner\n\n labelpad : float\n Padding between the axis and the x- and y-labels in units of the\n fraction of the axis from the lower left\n\n max_n_ticks: int\n Maximum number of ticks to try to use\n\n top_ticks : bool\n If true, label the top ticks of each axis\n\n fig : `~matplotlib.figure.Figure`\n Overplot onto the provided figure object, which must either have no\n axes yet, or ``ndim * ndim`` axes already present. If not set, the\n plot will be drawn on a newly created figure.\n\n hist_kwargs : dict\n Any extra keyword arguments to send to the 1-D histogram plots.\n\n **hist2d_kwargs\n Any remaining keyword arguments are sent to :func:`corner.hist2d` to\n generate the 2-D histogram plots.\n\n Returns\n -------\n fig : `~matplotlib.figure.Figure`\n The ``matplotlib`` figure instance for the corner plot.\n\n \"\"\"\n if arviz_corner is None:\n if not (isinstance(data, np.ndarray) or data.__class__.__name__ ==\n 'DataFrame'):\n raise ImportError(\n 'Please install arviz or use a numpy array as input')\n if (var_names is not None or filter_vars is not None or coords is not\n None or divergences or divergences_kwargs is not None or \n labeller is not None):\n logging.warning(\n 'Please install arviz to use the advanced features of corner')\n return corner_impl(data, bins=bins, range=range, axes_scale=\n axes_scale, weights=weights, color=color, hist_bin_factor=\n hist_bin_factor, smooth=smooth, smooth1d=smooth1d, labels=\n labels, label_kwargs=label_kwargs, titles=titles, show_titles=\n show_titles, title_quantiles=title_quantiles, title_fmt=\n title_fmt, title_kwargs=title_kwargs, truths=truths,\n truth_color=truth_color, scale_hist=scale_hist, quantiles=\n quantiles, verbose=verbose, fig=fig, max_n_ticks=max_n_ticks,\n top_ticks=top_ticks, use_math_text=use_math_text, reverse=\n reverse, labelpad=labelpad, hist_kwargs=hist_kwargs, **\n hist2d_kwargs)\n return arviz_corner(data, bins=bins, range=range, axes_scale=axes_scale,\n weights=weights, color=color, hist_bin_factor=hist_bin_factor,\n smooth=smooth, smooth1d=smooth1d, labels=labels, label_kwargs=\n label_kwargs, titles=titles, show_titles=show_titles,\n title_quantiles=title_quantiles, title_fmt=title_fmt, title_kwargs=\n title_kwargs, truths=truths, truth_color=truth_color, scale_hist=\n scale_hist, quantiles=quantiles, verbose=verbose, fig=fig,\n max_n_ticks=max_n_ticks, top_ticks=top_ticks, use_math_text=\n use_math_text, reverse=reverse, labelpad=labelpad, hist_kwargs=\n hist_kwargs, group=group, var_names=var_names, filter_vars=\n filter_vars, coords=coords, divergences=divergences,\n divergences_kwargs=divergences_kwargs, labeller=labeller, **\n hist2d_kwargs)\n",
"step-5": "# -*- coding: utf-8 -*-\n\n__all__ = \"corner\"\n\nimport logging\n\nimport numpy as np\n\nfrom corner.core import corner_impl\n\ntry:\n from corner.arviz_corner import arviz_corner\nexcept ImportError:\n arviz_corner = None\n\n\ndef corner(\n data,\n bins=20,\n *,\n # Original corner parameters\n range=None,\n axes_scale=\"linear\",\n weights=None,\n color=None,\n hist_bin_factor=1,\n smooth=None,\n smooth1d=None,\n labels=None,\n label_kwargs=None,\n titles=None,\n show_titles=False,\n title_quantiles=None,\n title_fmt=\".2f\",\n title_kwargs=None,\n truths=None,\n truth_color=\"#4682b4\",\n scale_hist=False,\n quantiles=None,\n verbose=False,\n fig=None,\n max_n_ticks=5,\n top_ticks=False,\n use_math_text=False,\n reverse=False,\n labelpad=0.0,\n hist_kwargs=None,\n # Arviz parameters\n group=\"posterior\",\n var_names=None,\n filter_vars=None,\n coords=None,\n divergences=False,\n divergences_kwargs=None,\n labeller=None,\n **hist2d_kwargs,\n):\n \"\"\"\n Make a *sick* corner plot showing the projections of a data set in a\n multi-dimensional space. kwargs are passed to hist2d() or used for\n `matplotlib` styling.\n\n Parameters\n ----------\n data : obj\n Any object that can be converted to an ``arviz.InferenceData`` object.\n Refer to documentation of ``arviz.convert_to_dataset`` for details.\n\n bins : int or array_like[ndim,]\n The number of bins to use in histograms, either as a fixed value for\n all dimensions, or as a list of integers for each dimension.\n\n group : str\n Specifies which InferenceData group should be plotted. Defaults to\n ``'posterior'``.\n\n var_names : list\n Variables to be plotted, if ``None`` all variable are plotted. Prefix\n the variables by `~` when you want to exclude them from the plot.\n\n filter_vars : {``None``, ``\"like\"``, ``\"regex\"``}\n If ``None`` (default), interpret ``var_names`` as the real variables\n names. If ``\"like\"``, interpret ``var_names`` as substrings of the real\n variables names. If ``\"regex\"``, interpret ``var_names`` as regular\n expressions on the real variables names. A la ``pandas.filter``.\n\n coords : mapping\n Coordinates of ``var_names`` to be plotted. Passed to\n ``arviz.Dataset.sel``.\n\n divergences : bool\n If ``True`` divergences will be plotted in a different color, only if\n ``group`` is either ``'prior'`` or ``'posterior'``.\n\n divergences_kwargs : dict\n Any extra keyword arguments to send to the ``overplot_points`` when\n plotting the divergences.\n\n labeller : arviz.Labeller\n Class providing the method ``make_label_vert`` to generate the labels\n in the plot. Read the ArviZ label guide for more details and usage\n examples.\n\n weights : array_like[nsamples,]\n The weight of each sample. If `None` (default), samples are given\n equal weight.\n\n color : str\n A ``matplotlib`` style color for all histograms.\n\n hist_bin_factor : float or array_like[ndim,]\n This is a factor (or list of factors, one for each dimension) that\n will multiply the bin specifications when making the 1-D histograms.\n This is generally used to increase the number of bins in the 1-D plots\n to provide more resolution.\n\n smooth, smooth1d : float\n The standard deviation for Gaussian kernel passed to\n `scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms\n respectively. If `None` (default), no smoothing is applied.\n\n labels : iterable (ndim,)\n A list of names for the dimensions.\n\n .. deprecated:: 2.2.1\n If a ``xs`` is a ``pandas.DataFrame`` *and* ArviZ is installed,\n labels will default to column names.\n This behavior will be removed in version 3;\n either use ArviZ data structures instead or pass\n ``labels=dataframe.columns`` manually.\n\n label_kwargs : dict\n Any extra keyword arguments to send to the `set_xlabel` and\n `set_ylabel` methods. Note that passing the `labelpad` keyword\n in this dictionary will not have the desired effect. Use the\n `labelpad` keyword in this function instead.\n\n titles : iterable (ndim,)\n A list of titles for the dimensions. If `None` (default),\n uses labels as titles.\n\n show_titles : bool\n Displays a title above each 1-D histogram showing the 0.5 quantile\n with the upper and lower errors supplied by the quantiles argument.\n\n title_quantiles : iterable\n A list of 3 fractional quantiles to show as the the upper and lower\n errors. If `None` (default), inherit the values from quantiles, unless\n quantiles is `None`, in which case it defaults to [0.16,0.5,0.84]\n\n title_fmt : string\n The format string for the quantiles given in titles. If you explicitly\n set ``show_titles=True`` and ``title_fmt=None``, the labels will be\n shown as the titles. (default: ``.2f``)\n\n title_kwargs : dict\n Any extra keyword arguments to send to the `set_title` command.\n\n range : iterable (ndim,)\n A list where each element is either a length 2 tuple containing\n lower and upper bounds or a float in range (0., 1.)\n giving the fraction of samples to include in bounds, e.g.,\n [(0.,10.), (1.,5), 0.999, etc.].\n If a fraction, the bounds are chosen to be equal-tailed.\n\n axes_scale : str or iterable (ndim,)\n Scale (``\"linear\"``, ``\"log\"``) to use for each data dimension. If only\n one scale is specified, use that for all dimensions.\n\n truths : iterable (ndim,)\n A list of reference values to indicate on the plots. Individual\n values can be omitted by using ``None``.\n\n truth_color : str\n A ``matplotlib`` style color for the ``truths`` makers.\n\n scale_hist : bool\n Should the 1-D histograms be scaled in such a way that the zero line\n is visible?\n\n quantiles : iterable\n A list of fractional quantiles to show on the 1-D histograms as\n vertical dashed lines.\n\n verbose : bool\n If true, print the values of the computed quantiles.\n\n plot_contours : bool\n Draw contours for dense regions of the plot.\n\n use_math_text : bool\n If true, then axis tick labels for very large or small exponents will\n be displayed as powers of 10 rather than using `e`.\n\n reverse : bool\n If true, plot the corner plot starting in the upper-right corner\n instead of the usual bottom-left corner\n\n labelpad : float\n Padding between the axis and the x- and y-labels in units of the\n fraction of the axis from the lower left\n\n max_n_ticks: int\n Maximum number of ticks to try to use\n\n top_ticks : bool\n If true, label the top ticks of each axis\n\n fig : `~matplotlib.figure.Figure`\n Overplot onto the provided figure object, which must either have no\n axes yet, or ``ndim * ndim`` axes already present. If not set, the\n plot will be drawn on a newly created figure.\n\n hist_kwargs : dict\n Any extra keyword arguments to send to the 1-D histogram plots.\n\n **hist2d_kwargs\n Any remaining keyword arguments are sent to :func:`corner.hist2d` to\n generate the 2-D histogram plots.\n\n Returns\n -------\n fig : `~matplotlib.figure.Figure`\n The ``matplotlib`` figure instance for the corner plot.\n\n \"\"\"\n if arviz_corner is None:\n if not (\n isinstance(data, np.ndarray)\n or data.__class__.__name__ == \"DataFrame\"\n ):\n raise ImportError(\n \"Please install arviz or use a numpy array as input\"\n )\n\n if (\n var_names is not None\n or filter_vars is not None\n or coords is not None\n or divergences\n or divergences_kwargs is not None\n or labeller is not None\n ):\n logging.warning(\n \"Please install arviz to use the advanced features of corner\"\n )\n\n return corner_impl(\n data,\n bins=bins,\n range=range,\n axes_scale=axes_scale,\n weights=weights,\n color=color,\n hist_bin_factor=hist_bin_factor,\n smooth=smooth,\n smooth1d=smooth1d,\n labels=labels,\n label_kwargs=label_kwargs,\n titles=titles,\n show_titles=show_titles,\n title_quantiles=title_quantiles,\n title_fmt=title_fmt,\n title_kwargs=title_kwargs,\n truths=truths,\n truth_color=truth_color,\n scale_hist=scale_hist,\n quantiles=quantiles,\n verbose=verbose,\n fig=fig,\n max_n_ticks=max_n_ticks,\n top_ticks=top_ticks,\n use_math_text=use_math_text,\n reverse=reverse,\n labelpad=labelpad,\n hist_kwargs=hist_kwargs,\n **hist2d_kwargs,\n )\n\n return arviz_corner(\n data,\n bins=bins,\n range=range,\n axes_scale=axes_scale,\n weights=weights,\n color=color,\n hist_bin_factor=hist_bin_factor,\n smooth=smooth,\n smooth1d=smooth1d,\n labels=labels,\n label_kwargs=label_kwargs,\n titles=titles,\n show_titles=show_titles,\n title_quantiles=title_quantiles,\n title_fmt=title_fmt,\n title_kwargs=title_kwargs,\n truths=truths,\n truth_color=truth_color,\n scale_hist=scale_hist,\n quantiles=quantiles,\n verbose=verbose,\n fig=fig,\n max_n_ticks=max_n_ticks,\n top_ticks=top_ticks,\n use_math_text=use_math_text,\n reverse=reverse,\n labelpad=labelpad,\n hist_kwargs=hist_kwargs,\n group=group,\n var_names=var_names,\n filter_vars=filter_vars,\n coords=coords,\n divergences=divergences,\n divergences_kwargs=divergences_kwargs,\n labeller=labeller,\n **hist2d_kwargs,\n )\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from flask import Blueprint, render_template, request, session, url_for, redirect
from flask_socketio import join_room, leave_room, send, emit
from models.game.game import Game
from models.games.games import Games
from decorators.req_login import requires_login
game_blueprint = Blueprint('game', __name__)
@game_blueprint.route('/<string:game_id>')
@requires_login
def game_index(game_id):
return render_template('game/game.html')
|
normal
|
{
"blob_id": "1ccb23435d8501ed82debf91bd6bf856830d01cb",
"index": 6063,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@game_blueprint.route('/<string:game_id>')\n@requires_login\ndef game_index(game_id):\n return render_template('game/game.html')\n",
"step-3": "<mask token>\ngame_blueprint = Blueprint('game', __name__)\n\n\n@game_blueprint.route('/<string:game_id>')\n@requires_login\ndef game_index(game_id):\n return render_template('game/game.html')\n",
"step-4": "from flask import Blueprint, render_template, request, session, url_for, redirect\nfrom flask_socketio import join_room, leave_room, send, emit\nfrom models.game.game import Game\nfrom models.games.games import Games\nfrom decorators.req_login import requires_login\ngame_blueprint = Blueprint('game', __name__)\n\n\n@game_blueprint.route('/<string:game_id>')\n@requires_login\ndef game_index(game_id):\n return render_template('game/game.html')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_digits
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.preprocessing import LabelBinarizer
def tanh(x):
return np.tanh(x)
def tanh_deriv(x):
return 1.0 - np.tanh(x) * np.tanh(x)
def logistic(x):
return 1 / (1 + np.exp(-x))
def logistic_derivative(x):
return logistic(x) * (1 - logistic(x))
class NeuralNetwork:
def __init__(self, layers, activation='tanh'):
"""
:param layers: A list containing the number of units in each layer.
Should be at least two values
:param activation: The activation function to be used. Can be
"logistic" or "tanh"
"""
if activation == 'logistic':
self.activation = logistic
self.activation_deriv = logistic_derivative
elif activation == 'tanh':
self.activation = tanh
self.activation_deriv = tanh_deriv
self.weights = []
for i in range(1, len(layers) - 1):
print(i)
self.weights.append((2 * np.random.random((layers[i - 1] + 1, layers[i] + 1)) - 1) * 0.25)
self.weights.append((2 * np.random.random((layers[i] + 1, layers[i + 1])) - 1) * 0.25)
# print(self.weights)
def fit(self, X, y, learning_rate=0.2, epochs=10000):
# 一. 给X数据加一列1,相当于后续的偏置所乘的数
X = np.atleast_2d(X)
print(X)
print(X.shape)
temp = np.ones([X.shape[0], X.shape[1] + 1])
# print(temp)
temp[:, 0:-1] = X # adding the bias unit to the input layer
X = temp
print(X)
y = np.array(y)
print(y)
# 迭代epochs次
for k in range(epochs):
# 随机挑选X的一行,i为行号,a为这一行数据,为输入层数据
i = np.random.randint(X.shape[0])
a = [X[i]]
# a为每层的值,a[0]为第一层输入层数据,a[1]为第二层输出层数据,a[-1]为最后一层输出层数据
for l in range(len(self.weights)):
# 计算每层的结果
a.append(self.activation(np.dot(a[l], self.weights[l])))
# Computer the error at the top layer
# print(a)
error = y[i] - a[-1]
# For output layer, Err calculation (delta is updated error)
deltas = [error * self.activation_deriv(a[-1])]
# Staring backprobagation
for l in range(len(a) - 2, 0, -1): # we need to begin at the second to last layer
# Compute the updated error (i,e, deltas) for each node going from top layer to input layer
deltas.append(deltas[-1].dot(self.weights[l].T) * self.activation_deriv(a[l]))
deltas.reverse()
# print(deltas)
for i in range(len(self.weights)):
layer = np.atleast_2d(a[i])
delta = np.atleast_2d(deltas[i])
self.weights[i] += learning_rate * layer.T.dot(delta)
def predict(self, x):
x = np.array(x)
temp = np.ones(x.shape[0] + 1)
temp[0:-1] = x
a = temp
for l in range(0, len(self.weights)):
a = self.activation(np.dot(a, self.weights[l]))
return a
nn = NeuralNetwork([2, 2, 1], 'tanh')
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([0, 1, 1, 0])
nn.fit(X, y)
for i in [[0, 0], [0, 1], [1, 0], [1, 1]]:
print(i, nn.predict(i))
# digits = load_digits()
# X = digits.data
# y = digits.target
# X -= X.min() # normalize the values to bring them into the range 0-1
# X /= X.max()
#
# nn = NeuralNetwork([64, 100, 10], 'logistic')
# X_train, X_test, y_train, y_test = train_test_split(X, y)
# labels_train = LabelBinarizer().fit_transform(y_train)
# labels_test = LabelBinarizer().fit_transform(y_test)
# print("start fitting")
# nn.fit(X_train, labels_train, epochs=3000)
# predictions = []
# for i in range(X_test.shape[0]):
# o = nn.predict(X_test[i])
# predictions.append(np.argmax(o))
# print(confusion_matrix(y_test, predictions))
# print(classification_report(y_test, predictions))
|
normal
|
{
"blob_id": "a6a5fddb8e1eda4cc8e9c79ad83019f55d149a80",
"index": 2988,
"step-1": "<mask token>\n\n\ndef tanh(x):\n return np.tanh(x)\n\n\ndef tanh_deriv(x):\n return 1.0 - np.tanh(x) * np.tanh(x)\n\n\n<mask token>\n\n\nclass NeuralNetwork:\n\n def __init__(self, layers, activation='tanh'):\n \"\"\"\n :param layers: A list containing the number of units in each layer.\n Should be at least two values\n :param activation: The activation function to be used. Can be\n \"logistic\" or \"tanh\"\n \"\"\"\n if activation == 'logistic':\n self.activation = logistic\n self.activation_deriv = logistic_derivative\n elif activation == 'tanh':\n self.activation = tanh\n self.activation_deriv = tanh_deriv\n self.weights = []\n for i in range(1, len(layers) - 1):\n print(i)\n self.weights.append((2 * np.random.random((layers[i - 1] + 1, \n layers[i] + 1)) - 1) * 0.25)\n self.weights.append((2 * np.random.random((layers[i] + 1,\n layers[i + 1])) - 1) * 0.25)\n\n def fit(self, X, y, learning_rate=0.2, epochs=10000):\n X = np.atleast_2d(X)\n print(X)\n print(X.shape)\n temp = np.ones([X.shape[0], X.shape[1] + 1])\n temp[:, 0:-1] = X\n X = temp\n print(X)\n y = np.array(y)\n print(y)\n for k in range(epochs):\n i = np.random.randint(X.shape[0])\n a = [X[i]]\n for l in range(len(self.weights)):\n a.append(self.activation(np.dot(a[l], self.weights[l])))\n error = y[i] - a[-1]\n deltas = [error * self.activation_deriv(a[-1])]\n for l in range(len(a) - 2, 0, -1):\n deltas.append(deltas[-1].dot(self.weights[l].T) * self.\n activation_deriv(a[l]))\n deltas.reverse()\n for i in range(len(self.weights)):\n layer = np.atleast_2d(a[i])\n delta = np.atleast_2d(deltas[i])\n self.weights[i] += learning_rate * layer.T.dot(delta)\n\n def predict(self, x):\n x = np.array(x)\n temp = np.ones(x.shape[0] + 1)\n temp[0:-1] = x\n a = temp\n for l in range(0, len(self.weights)):\n a = self.activation(np.dot(a, self.weights[l]))\n return a\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef tanh(x):\n return np.tanh(x)\n\n\ndef tanh_deriv(x):\n return 1.0 - np.tanh(x) * np.tanh(x)\n\n\ndef logistic(x):\n return 1 / (1 + np.exp(-x))\n\n\ndef logistic_derivative(x):\n return logistic(x) * (1 - logistic(x))\n\n\nclass NeuralNetwork:\n\n def __init__(self, layers, activation='tanh'):\n \"\"\"\n :param layers: A list containing the number of units in each layer.\n Should be at least two values\n :param activation: The activation function to be used. Can be\n \"logistic\" or \"tanh\"\n \"\"\"\n if activation == 'logistic':\n self.activation = logistic\n self.activation_deriv = logistic_derivative\n elif activation == 'tanh':\n self.activation = tanh\n self.activation_deriv = tanh_deriv\n self.weights = []\n for i in range(1, len(layers) - 1):\n print(i)\n self.weights.append((2 * np.random.random((layers[i - 1] + 1, \n layers[i] + 1)) - 1) * 0.25)\n self.weights.append((2 * np.random.random((layers[i] + 1,\n layers[i + 1])) - 1) * 0.25)\n\n def fit(self, X, y, learning_rate=0.2, epochs=10000):\n X = np.atleast_2d(X)\n print(X)\n print(X.shape)\n temp = np.ones([X.shape[0], X.shape[1] + 1])\n temp[:, 0:-1] = X\n X = temp\n print(X)\n y = np.array(y)\n print(y)\n for k in range(epochs):\n i = np.random.randint(X.shape[0])\n a = [X[i]]\n for l in range(len(self.weights)):\n a.append(self.activation(np.dot(a[l], self.weights[l])))\n error = y[i] - a[-1]\n deltas = [error * self.activation_deriv(a[-1])]\n for l in range(len(a) - 2, 0, -1):\n deltas.append(deltas[-1].dot(self.weights[l].T) * self.\n activation_deriv(a[l]))\n deltas.reverse()\n for i in range(len(self.weights)):\n layer = np.atleast_2d(a[i])\n delta = np.atleast_2d(deltas[i])\n self.weights[i] += learning_rate * layer.T.dot(delta)\n\n def predict(self, x):\n x = np.array(x)\n temp = np.ones(x.shape[0] + 1)\n temp[0:-1] = x\n a = temp\n for l in range(0, len(self.weights)):\n a = self.activation(np.dot(a, self.weights[l]))\n return a\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef tanh(x):\n return np.tanh(x)\n\n\ndef tanh_deriv(x):\n return 1.0 - np.tanh(x) * np.tanh(x)\n\n\ndef logistic(x):\n return 1 / (1 + np.exp(-x))\n\n\ndef logistic_derivative(x):\n return logistic(x) * (1 - logistic(x))\n\n\nclass NeuralNetwork:\n\n def __init__(self, layers, activation='tanh'):\n \"\"\"\n :param layers: A list containing the number of units in each layer.\n Should be at least two values\n :param activation: The activation function to be used. Can be\n \"logistic\" or \"tanh\"\n \"\"\"\n if activation == 'logistic':\n self.activation = logistic\n self.activation_deriv = logistic_derivative\n elif activation == 'tanh':\n self.activation = tanh\n self.activation_deriv = tanh_deriv\n self.weights = []\n for i in range(1, len(layers) - 1):\n print(i)\n self.weights.append((2 * np.random.random((layers[i - 1] + 1, \n layers[i] + 1)) - 1) * 0.25)\n self.weights.append((2 * np.random.random((layers[i] + 1,\n layers[i + 1])) - 1) * 0.25)\n\n def fit(self, X, y, learning_rate=0.2, epochs=10000):\n X = np.atleast_2d(X)\n print(X)\n print(X.shape)\n temp = np.ones([X.shape[0], X.shape[1] + 1])\n temp[:, 0:-1] = X\n X = temp\n print(X)\n y = np.array(y)\n print(y)\n for k in range(epochs):\n i = np.random.randint(X.shape[0])\n a = [X[i]]\n for l in range(len(self.weights)):\n a.append(self.activation(np.dot(a[l], self.weights[l])))\n error = y[i] - a[-1]\n deltas = [error * self.activation_deriv(a[-1])]\n for l in range(len(a) - 2, 0, -1):\n deltas.append(deltas[-1].dot(self.weights[l].T) * self.\n activation_deriv(a[l]))\n deltas.reverse()\n for i in range(len(self.weights)):\n layer = np.atleast_2d(a[i])\n delta = np.atleast_2d(deltas[i])\n self.weights[i] += learning_rate * layer.T.dot(delta)\n\n def predict(self, x):\n x = np.array(x)\n temp = np.ones(x.shape[0] + 1)\n temp[0:-1] = x\n a = temp\n for l in range(0, len(self.weights)):\n a = self.activation(np.dot(a, self.weights[l]))\n return a\n\n\n<mask token>\nnn.fit(X, y)\nfor i in [[0, 0], [0, 1], [1, 0], [1, 1]]:\n print(i, nn.predict(i))\n",
"step-4": "import numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import load_digits\nfrom sklearn.metrics import confusion_matrix, classification_report\nfrom sklearn.preprocessing import LabelBinarizer\n\n\ndef tanh(x):\n return np.tanh(x)\n\n\ndef tanh_deriv(x):\n return 1.0 - np.tanh(x) * np.tanh(x)\n\n\ndef logistic(x):\n return 1 / (1 + np.exp(-x))\n\n\ndef logistic_derivative(x):\n return logistic(x) * (1 - logistic(x))\n\n\nclass NeuralNetwork:\n\n def __init__(self, layers, activation='tanh'):\n \"\"\"\n :param layers: A list containing the number of units in each layer.\n Should be at least two values\n :param activation: The activation function to be used. Can be\n \"logistic\" or \"tanh\"\n \"\"\"\n if activation == 'logistic':\n self.activation = logistic\n self.activation_deriv = logistic_derivative\n elif activation == 'tanh':\n self.activation = tanh\n self.activation_deriv = tanh_deriv\n self.weights = []\n for i in range(1, len(layers) - 1):\n print(i)\n self.weights.append((2 * np.random.random((layers[i - 1] + 1, \n layers[i] + 1)) - 1) * 0.25)\n self.weights.append((2 * np.random.random((layers[i] + 1,\n layers[i + 1])) - 1) * 0.25)\n\n def fit(self, X, y, learning_rate=0.2, epochs=10000):\n X = np.atleast_2d(X)\n print(X)\n print(X.shape)\n temp = np.ones([X.shape[0], X.shape[1] + 1])\n temp[:, 0:-1] = X\n X = temp\n print(X)\n y = np.array(y)\n print(y)\n for k in range(epochs):\n i = np.random.randint(X.shape[0])\n a = [X[i]]\n for l in range(len(self.weights)):\n a.append(self.activation(np.dot(a[l], self.weights[l])))\n error = y[i] - a[-1]\n deltas = [error * self.activation_deriv(a[-1])]\n for l in range(len(a) - 2, 0, -1):\n deltas.append(deltas[-1].dot(self.weights[l].T) * self.\n activation_deriv(a[l]))\n deltas.reverse()\n for i in range(len(self.weights)):\n layer = np.atleast_2d(a[i])\n delta = np.atleast_2d(deltas[i])\n self.weights[i] += learning_rate * layer.T.dot(delta)\n\n def predict(self, x):\n x = np.array(x)\n temp = np.ones(x.shape[0] + 1)\n temp[0:-1] = x\n a = temp\n for l in range(0, len(self.weights)):\n a = self.activation(np.dot(a, self.weights[l]))\n return a\n\n\nnn = NeuralNetwork([2, 2, 1], 'tanh')\nX = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])\ny = np.array([0, 1, 1, 0])\nnn.fit(X, y)\nfor i in [[0, 0], [0, 1], [1, 0], [1, 1]]:\n print(i, nn.predict(i))\n",
"step-5": "import numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import load_digits\nfrom sklearn.metrics import confusion_matrix, classification_report\nfrom sklearn.preprocessing import LabelBinarizer\n\n\ndef tanh(x):\n return np.tanh(x)\n\n\ndef tanh_deriv(x):\n return 1.0 - np.tanh(x) * np.tanh(x)\n\n\ndef logistic(x):\n return 1 / (1 + np.exp(-x))\n\n\ndef logistic_derivative(x):\n return logistic(x) * (1 - logistic(x))\n\n\nclass NeuralNetwork:\n def __init__(self, layers, activation='tanh'):\n \"\"\"\n :param layers: A list containing the number of units in each layer.\n Should be at least two values\n :param activation: The activation function to be used. Can be\n \"logistic\" or \"tanh\"\n \"\"\"\n if activation == 'logistic':\n self.activation = logistic\n self.activation_deriv = logistic_derivative\n elif activation == 'tanh':\n self.activation = tanh\n self.activation_deriv = tanh_deriv\n\n self.weights = []\n for i in range(1, len(layers) - 1):\n print(i)\n self.weights.append((2 * np.random.random((layers[i - 1] + 1, layers[i] + 1)) - 1) * 0.25)\n self.weights.append((2 * np.random.random((layers[i] + 1, layers[i + 1])) - 1) * 0.25)\n # print(self.weights)\n\n def fit(self, X, y, learning_rate=0.2, epochs=10000):\n # 一. 给X数据加一列1,相当于后续的偏置所乘的数\n X = np.atleast_2d(X)\n print(X)\n print(X.shape)\n temp = np.ones([X.shape[0], X.shape[1] + 1])\n # print(temp)\n temp[:, 0:-1] = X # adding the bias unit to the input layer\n X = temp\n print(X)\n y = np.array(y)\n print(y)\n\n # 迭代epochs次\n for k in range(epochs):\n # 随机挑选X的一行,i为行号,a为这一行数据,为输入层数据\n i = np.random.randint(X.shape[0])\n a = [X[i]]\n\n # a为每层的值,a[0]为第一层输入层数据,a[1]为第二层输出层数据,a[-1]为最后一层输出层数据\n for l in range(len(self.weights)):\n # 计算每层的结果\n\n a.append(self.activation(np.dot(a[l], self.weights[l])))\n\n # Computer the error at the top layer\n # print(a)\n error = y[i] - a[-1]\n\n # For output layer, Err calculation (delta is updated error)\n deltas = [error * self.activation_deriv(a[-1])]\n\n # Staring backprobagation\n for l in range(len(a) - 2, 0, -1): # we need to begin at the second to last layer\n # Compute the updated error (i,e, deltas) for each node going from top layer to input layer\n deltas.append(deltas[-1].dot(self.weights[l].T) * self.activation_deriv(a[l]))\n deltas.reverse()\n # print(deltas)\n for i in range(len(self.weights)):\n layer = np.atleast_2d(a[i])\n delta = np.atleast_2d(deltas[i])\n self.weights[i] += learning_rate * layer.T.dot(delta)\n\n def predict(self, x):\n x = np.array(x)\n temp = np.ones(x.shape[0] + 1)\n temp[0:-1] = x\n a = temp\n for l in range(0, len(self.weights)):\n a = self.activation(np.dot(a, self.weights[l]))\n return a\n\n\nnn = NeuralNetwork([2, 2, 1], 'tanh')\nX = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])\ny = np.array([0, 1, 1, 0])\nnn.fit(X, y)\nfor i in [[0, 0], [0, 1], [1, 0], [1, 1]]:\n print(i, nn.predict(i))\n\n# digits = load_digits()\n# X = digits.data\n# y = digits.target\n# X -= X.min() # normalize the values to bring them into the range 0-1\n# X /= X.max()\n#\n# nn = NeuralNetwork([64, 100, 10], 'logistic')\n# X_train, X_test, y_train, y_test = train_test_split(X, y)\n# labels_train = LabelBinarizer().fit_transform(y_train)\n# labels_test = LabelBinarizer().fit_transform(y_test)\n# print(\"start fitting\")\n# nn.fit(X_train, labels_train, epochs=3000)\n# predictions = []\n# for i in range(X_test.shape[0]):\n# o = nn.predict(X_test[i])\n# predictions.append(np.argmax(o))\n# print(confusion_matrix(y_test, predictions))\n# print(classification_report(y_test, predictions))\n",
"step-ids": [
6,
8,
9,
11,
12
]
}
|
[
6,
8,
9,
11,
12
] |
<|reserved_special_token_0|>
class DevelopmentConfig(Config):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class ProductionConfig(Config):
DATABASE_URI = ''
class TestingConfig(Config):
TESTING = True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DevelopmentConfig(Config):
DEBUG = True
ENV = 'development'
class ProductionConfig(Config):
DATABASE_URI = ''
class TestingConfig(Config):
TESTING = True
<|reserved_special_token_1|>
class Config:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class DevelopmentConfig(Config):
DEBUG = True
ENV = 'development'
class ProductionConfig(Config):
DATABASE_URI = ''
class TestingConfig(Config):
TESTING = True
<|reserved_special_token_1|>
class Config:
DEBUG = False
TESTING = False
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:root@127.0.0.1:3306/mydb'
SQLALCHEMY_TRACK_MODIFICATIONS = True
SECRET_KEY = 'hdfjds38948938bmbfsd90008'
class DevelopmentConfig(Config):
DEBUG = True
ENV = 'development'
class ProductionConfig(Config):
DATABASE_URI = ''
class TestingConfig(Config):
TESTING = True
<|reserved_special_token_1|>
class Config:
DEBUG = False
TESTING = False
# mysql+pymysql://user:password@host:port/database
# SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://gjp:976431@49.235.194.73:3306/test'
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:root@127.0.0.1:3306/mydb'
SQLALCHEMY_TRACK_MODIFICATIONS = True
SECRET_KEY = 'hdfjds38948938bmbfsd90008'
class DevelopmentConfig(Config):
DEBUG = True
ENV = 'development'
class ProductionConfig(Config):
DATABASE_URI = ''
class TestingConfig(Config):
TESTING = True
|
flexible
|
{
"blob_id": "d89f0ef24d8e8d23a77cbbb0ae8723c7dec8c00a",
"index": 4954,
"step-1": "<mask token>\n\n\nclass DevelopmentConfig(Config):\n <mask token>\n <mask token>\n\n\nclass ProductionConfig(Config):\n DATABASE_URI = ''\n\n\nclass TestingConfig(Config):\n TESTING = True\n",
"step-2": "<mask token>\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n ENV = 'development'\n\n\nclass ProductionConfig(Config):\n DATABASE_URI = ''\n\n\nclass TestingConfig(Config):\n TESTING = True\n",
"step-3": "class Config:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n ENV = 'development'\n\n\nclass ProductionConfig(Config):\n DATABASE_URI = ''\n\n\nclass TestingConfig(Config):\n TESTING = True\n",
"step-4": "class Config:\n DEBUG = False\n TESTING = False\n SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:root@127.0.0.1:3306/mydb'\n SQLALCHEMY_TRACK_MODIFICATIONS = True\n SECRET_KEY = 'hdfjds38948938bmbfsd90008'\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n ENV = 'development'\n\n\nclass ProductionConfig(Config):\n DATABASE_URI = ''\n\n\nclass TestingConfig(Config):\n TESTING = True\n",
"step-5": "class Config:\n DEBUG = False\n TESTING = False\n # mysql+pymysql://user:password@host:port/database\n # SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://gjp:976431@49.235.194.73:3306/test'\n SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:root@127.0.0.1:3306/mydb'\n SQLALCHEMY_TRACK_MODIFICATIONS = True\n SECRET_KEY = 'hdfjds38948938bmbfsd90008'\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n ENV = 'development'\n\n\nclass ProductionConfig(Config):\n DATABASE_URI = ''\n\n\nclass TestingConfig(Config):\n TESTING = True\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
from django.contrib.auth.models import BaseUserManager
class MyUserManager(BaseUserManager):
def create_user(self, email, password, full_name, national_code, mobile, address):
if not email :
raise ValueError('ایمیل الزامی است')
if not full_name :
raise ValueError('نام و نام خانوادگی الزامی است')
if not national_code :
raise ValueError('کدملی الزامی است')
if not mobile :
raise ValueError('موبایل الزامی است')
if not address :
raise ValueError('آدرس الزامی است')
user = self.model(
email = self.normalize_email(email) ,
full_name = full_name ,
national_code = national_code ,
mobile = mobile ,
address = address,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, full_name, national_code, mobile, address):
user = self.create_user(email, password, full_name, national_code, mobile, address)
user.is_admin = True
user.save(using=self._db)
return user
|
normal
|
{
"blob_id": "f5f14e4d114855b7eef555db182ee991bdf26c39",
"index": 8832,
"step-1": "<mask token>\n\n\nclass MyUserManager(BaseUserManager):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MyUserManager(BaseUserManager):\n <mask token>\n\n def create_superuser(self, email, password, full_name, national_code,\n mobile, address):\n user = self.create_user(email, password, full_name, national_code,\n mobile, address)\n user.is_admin = True\n user.save(using=self._db)\n return user\n",
"step-3": "<mask token>\n\n\nclass MyUserManager(BaseUserManager):\n\n def create_user(self, email, password, full_name, national_code, mobile,\n address):\n if not email:\n raise ValueError('ایمیل الزامی است')\n if not full_name:\n raise ValueError('نام و نام خانوادگی الزامی است')\n if not national_code:\n raise ValueError('کدملی الزامی است')\n if not mobile:\n raise ValueError('موبایل الزامی است')\n if not address:\n raise ValueError('آدرس الزامی است')\n user = self.model(email=self.normalize_email(email), full_name=\n full_name, national_code=national_code, mobile=mobile, address=\n address)\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n def create_superuser(self, email, password, full_name, national_code,\n mobile, address):\n user = self.create_user(email, password, full_name, national_code,\n mobile, address)\n user.is_admin = True\n user.save(using=self._db)\n return user\n",
"step-4": "from django.contrib.auth.models import BaseUserManager\n\n\nclass MyUserManager(BaseUserManager):\n\n def create_user(self, email, password, full_name, national_code, mobile,\n address):\n if not email:\n raise ValueError('ایمیل الزامی است')\n if not full_name:\n raise ValueError('نام و نام خانوادگی الزامی است')\n if not national_code:\n raise ValueError('کدملی الزامی است')\n if not mobile:\n raise ValueError('موبایل الزامی است')\n if not address:\n raise ValueError('آدرس الزامی است')\n user = self.model(email=self.normalize_email(email), full_name=\n full_name, national_code=national_code, mobile=mobile, address=\n address)\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n def create_superuser(self, email, password, full_name, national_code,\n mobile, address):\n user = self.create_user(email, password, full_name, national_code,\n mobile, address)\n user.is_admin = True\n user.save(using=self._db)\n return user\n",
"step-5": "from django.contrib.auth.models import BaseUserManager\n\n\nclass MyUserManager(BaseUserManager):\n def create_user(self, email, password, full_name, national_code, mobile, address):\n if not email :\n raise ValueError('ایمیل الزامی است')\n if not full_name :\n raise ValueError('نام و نام خانوادگی الزامی است')\n if not national_code :\n raise ValueError('کدملی الزامی است')\n if not mobile :\n raise ValueError('موبایل الزامی است')\n if not address :\n raise ValueError('آدرس الزامی است')\n\n user = self.model(\n email = self.normalize_email(email) ,\n full_name = full_name ,\n national_code = national_code ,\n mobile = mobile ,\n address = address,\n )\n user.set_password(password)\n user.save(using=self._db)\n return user\n \n def create_superuser(self, email, password, full_name, national_code, mobile, address):\n user = self.create_user(email, password, full_name, national_code, mobile, address)\n user.is_admin = True\n user.save(using=self._db)\n return user\n\n\n\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common import action_chains, keys
from selenium.webdriver.common.action_chains import ActionChains
import time
import unittest
from pprint import pprint
from bs4 import BeautifulSoup
import json
import jsonpickle
import xlrd
import requests
from pyvirtualdisplay import Display
# display = Display(visible=0, size=(800, 800))
# display.start()
class Verify_Idaho_Links(unittest.TestCase):
def test_LB_Maps(self):
testcounter = 0
driver = webdriver.Chrome()
# Idaho
urlID = 'http://crc-prod-id-wf-elb-382957924.us-west-2.elb.amazonaws.com/idlb/'
driver.get(urlID)
_inputs = driver.find_elements_by_xpath('//img')
for input in _inputs:
item = str(input.get_attribute('src'))
if 'https://maps.googleapis.com/maps/api' in item:
print input.get_attribute('src')
linkID = input.get_attribute('src')
#mapIdaho = driver.find_element_by_xpath("//*[@id='j_idt141']/img")
#linkID = mapIdaho.get_attribute('src')
rID = requests.get(linkID)
print rID.status_code
if rID.status_code != 200:
print 'LB Idaho Map Is Down'
# testcounter += 1
# Louisiana
urlLA = 'https://lb.511la.org/lalb/'
driver.get(urlLA)
time.sleep(1)
mapLA = driver.find_element_by_xpath('//*[@id="j_idt155"]/img')
linkLA = mapLA.get_attribute('src')
# test = driver.find_element_by_xpath("//*[text()[contains(.,'mapPanelContent')]]")
# print test
# "//*[contains(text(), 'Delete this route')]"
rLA = requests.get(linkLA)
print rLA.status_code
if rLA.status_code != 200:
print 'LB Loisiana Map Is Down'
testcounter += 1
# Nebraska
urlNE = 'https://lb.511.nebraska.gov/nelb/'
driver.get(urlNE)
mapNE = driver.find_element_by_xpath('//*[@id="j_idt346"]/img')
linkNE = mapNE.get_attribute('src')
rNE = requests.get(linkNE)
print rNE.status_code
if rNE.status_code != 200:
print 'LB Nebraska Map Is Down'
testcounter += 1
# Iowa
urlIA = 'https://lb.511ia.org/ialb/'
driver.get(urlIA)
mapIA = driver.find_element_by_xpath('//*[@id="j_idt383"]/img')
linkIA = mapIA.get_attribute('src')
rIA = requests.get(linkIA)
print rIA.status_code
if rIA.status_code != 200:
print 'LB Iowa Map Is Down'
testcounter += 1
# Sacog
urlSACOG = 'http://sa.carsstage.org/salbweb/'
driver.get(urlSACOG)
mapSACOG = driver.find_element_by_xpath('//*[@id="j_idt122"]/img')
linkSACOG = mapSACOG.get_attribute('src')
rSACOG = requests.get(linkSACOG)
print rSACOG.status_code
if rSACOG.status_code != 200:
print 'LB Sacramento Map Is Down'
testcounter += 1
# Sandag
urlSAN = 'https://lbw.511sd.com/lbweb/'
driver.get(urlSAN)
mapSAN = driver.find_element_by_xpath('//*[@id="j_idt150"]/img')
linkSAN = mapSAN.get_attribute('src')
rSAN = requests.get(linkSAN)
print rSAN.status_code
if rSAN.status_code != 200:
print 'LB San Fransisco Map Is Down'
testcounter += 1
# Minnesota
urlMN = 'https://lb.511mn.org/mnlb/'
driver.get(urlMN)
print driver.title
#imageWait = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, "//*[@id='j_idt369']/img")))
try:
mapMN = driver.find_element_by_xpath('//*[@id="j_idt166"]/img')
except:
try:
mapMN = driver.find_element_by_xpath('//*[@id="j_idt368"]/img')
except:
try:
mapMN = driver.find_element_by_xpath('//*[@id="j_idt365"]/img')
except:
pass
linkMN = mapMN.get_attribute('src')
rMN = requests.get(linkMN)
print rMN.status_code
if rSAN.status_code != 200:
print 'LB Minnesota Map Is Down'
testcounter += 1
driver.quit()
if testcounter > 0:
assert False
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "5f490d6a3444b3b782eed5691c82ab7e4b2e55db",
"index": 8883,
"step-1": "from selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common import action_chains, keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport time\nimport unittest\nfrom pprint import pprint\nfrom bs4 import BeautifulSoup\nimport json\nimport jsonpickle\nimport xlrd\nimport requests\nfrom pyvirtualdisplay import Display\n\n\n# display = Display(visible=0, size=(800, 800))\n# display.start()\n\nclass Verify_Idaho_Links(unittest.TestCase):\n\n def test_LB_Maps(self):\n\n testcounter = 0\n driver = webdriver.Chrome()\n\n # Idaho\n urlID = 'http://crc-prod-id-wf-elb-382957924.us-west-2.elb.amazonaws.com/idlb/'\n driver.get(urlID)\n\n _inputs = driver.find_elements_by_xpath('//img')\n for input in _inputs:\n item = str(input.get_attribute('src'))\n if 'https://maps.googleapis.com/maps/api' in item:\n print input.get_attribute('src')\n linkID = input.get_attribute('src')\n #mapIdaho = driver.find_element_by_xpath(\"//*[@id='j_idt141']/img\")\n #linkID = mapIdaho.get_attribute('src')\n rID = requests.get(linkID)\n print rID.status_code\n\n if rID.status_code != 200:\n print 'LB Idaho Map Is Down'\n # testcounter += 1\n\n # Louisiana\n urlLA = 'https://lb.511la.org/lalb/'\n driver.get(urlLA)\n time.sleep(1)\n mapLA = driver.find_element_by_xpath('//*[@id=\"j_idt155\"]/img')\n linkLA = mapLA.get_attribute('src')\n\n # test = driver.find_element_by_xpath(\"//*[text()[contains(.,'mapPanelContent')]]\")\n # print test\n # \"//*[contains(text(), 'Delete this route')]\"\n rLA = requests.get(linkLA)\n print rLA.status_code\n\n if rLA.status_code != 200:\n print 'LB Loisiana Map Is Down'\n testcounter += 1\n\n # Nebraska\n urlNE = 'https://lb.511.nebraska.gov/nelb/'\n driver.get(urlNE)\n mapNE = driver.find_element_by_xpath('//*[@id=\"j_idt346\"]/img')\n linkNE = mapNE.get_attribute('src')\n\n rNE = requests.get(linkNE)\n print rNE.status_code\n\n if rNE.status_code != 200:\n print 'LB Nebraska Map Is Down'\n testcounter += 1\n\n # Iowa\n urlIA = 'https://lb.511ia.org/ialb/'\n driver.get(urlIA)\n mapIA = driver.find_element_by_xpath('//*[@id=\"j_idt383\"]/img')\n linkIA = mapIA.get_attribute('src')\n\n rIA = requests.get(linkIA)\n print rIA.status_code\n\n if rIA.status_code != 200:\n print 'LB Iowa Map Is Down'\n testcounter += 1\n\n # Sacog\n urlSACOG = 'http://sa.carsstage.org/salbweb/'\n driver.get(urlSACOG)\n mapSACOG = driver.find_element_by_xpath('//*[@id=\"j_idt122\"]/img')\n linkSACOG = mapSACOG.get_attribute('src')\n\n rSACOG = requests.get(linkSACOG)\n print rSACOG.status_code\n\n if rSACOG.status_code != 200:\n print 'LB Sacramento Map Is Down'\n testcounter += 1\n\n # Sandag\n urlSAN = 'https://lbw.511sd.com/lbweb/'\n driver.get(urlSAN)\n mapSAN = driver.find_element_by_xpath('//*[@id=\"j_idt150\"]/img')\n linkSAN = mapSAN.get_attribute('src')\n\n rSAN = requests.get(linkSAN)\n print rSAN.status_code\n\n if rSAN.status_code != 200:\n print 'LB San Fransisco Map Is Down'\n testcounter += 1\n\n # Minnesota\n urlMN = 'https://lb.511mn.org/mnlb/'\n driver.get(urlMN)\n print driver.title\n #imageWait = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, \"//*[@id='j_idt369']/img\")))\n try:\n mapMN = driver.find_element_by_xpath('//*[@id=\"j_idt166\"]/img')\n except:\n try:\n mapMN = driver.find_element_by_xpath('//*[@id=\"j_idt368\"]/img')\n except:\n try:\n mapMN = driver.find_element_by_xpath('//*[@id=\"j_idt365\"]/img')\n except:\n pass\n linkMN = mapMN.get_attribute('src')\n\n rMN = requests.get(linkMN)\n print rMN.status_code\n\n if rSAN.status_code != 200:\n print 'LB Minnesota Map Is Down'\n testcounter += 1\n\n driver.quit()\n\n if testcounter > 0:\n assert False\n\n\nif __name__ == '__main__':\n unittest.main()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import numpy as np
import cv2
import colorsys
from matplotlib import pyplot as plt
img = cv2.imread('coins.jpg')
b,g,r = cv2.split(img)
rgb_img = cv2.merge([r,g,b])
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Blurring image
grayBlur = cv2.medianBlur(gray, 3)
# Binary threshold
ret, thresh = cv2.threshold(grayBlur, 200,255, cv2.THRESH_BINARY_INV)
# Noise removal
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel,iterations=2)
# Sure background area
sure_bg = cv2.dilate(opening, kernel, iterations=1)
# Finding sure foreground area
dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2,5)
ret, sure_fg = cv2.threshold(dist_transform,0.6*dist_transform.max(),255,0)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg,sure_fg)
# Marker labelling
ret, markers = cv2.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers+1
# Now, mark the region of unknown with zero
markers[unknown==255] = 0
markers = cv2.watershed(img,markers)
# Coloring borders black
img[markers == -1] = [0,0,0]
#Color background white
img[markers == 1] = [255, 255, 255]
# Color nodes
nodes = np.amax(markers)
for i in range(nodes):
(r, g, b) = colorsys.hsv_to_rgb(float(i) / nodes, 1.0, 1.0)
R, G, B = int(255 * r), int(255 * g), int(255 * b)
color = [R,G,B]
print(color)
img[markers == i+2] = list(color)
# Add text with coin count
text = 'Coins: ' + (str)(np.amax(markers-1))
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,text,(160,20), font, 0.5,(0,0,0),1,cv2.LINE_AA)
# Plotting
plt.subplot(321), plt.imshow(rgb_img )
plt.title('Input image'), plt.xticks([]), plt.yticks([])
plt.subplot(322),plt.imshow(thresh, 'gray')
plt.title("Binary threshold"), plt.xticks([]), plt.yticks([])
plt.subplot(323),plt.imshow(sure_bg, 'gray')
plt.title("Sure background"), plt.xticks([]), plt.yticks([])
plt.subplot(324),plt.imshow(sure_fg, 'gray')
plt.title("Sure foreground"), plt.xticks([]), plt.yticks([])
plt.subplot(325),plt.imshow(dist_transform, 'gray')
plt.title("Distance transform"), plt.xticks([]), plt.yticks([])
plt.subplot(326),plt.imshow(img, 'gray')
plt.title("Result from watershed"), plt.xticks([]), plt.yticks([])
plt.tight_layout()
plt.show()
|
normal
|
{
"blob_id": "39dda191ab2137b5f5538660f17e39b0a1358bf4",
"index": 206,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(nodes):\n r, g, b = colorsys.hsv_to_rgb(float(i) / nodes, 1.0, 1.0)\n R, G, B = int(255 * r), int(255 * g), int(255 * b)\n color = [R, G, B]\n print(color)\n img[markers == i + 2] = list(color)\n<mask token>\ncv2.putText(img, text, (160, 20), font, 0.5, (0, 0, 0), 1, cv2.LINE_AA)\nplt.subplot(321), plt.imshow(rgb_img)\nplt.title('Input image'), plt.xticks([]), plt.yticks([])\nplt.subplot(322), plt.imshow(thresh, 'gray')\nplt.title('Binary threshold'), plt.xticks([]), plt.yticks([])\nplt.subplot(323), plt.imshow(sure_bg, 'gray')\nplt.title('Sure background'), plt.xticks([]), plt.yticks([])\nplt.subplot(324), plt.imshow(sure_fg, 'gray')\nplt.title('Sure foreground'), plt.xticks([]), plt.yticks([])\nplt.subplot(325), plt.imshow(dist_transform, 'gray')\nplt.title('Distance transform'), plt.xticks([]), plt.yticks([])\nplt.subplot(326), plt.imshow(img, 'gray')\nplt.title('Result from watershed'), plt.xticks([]), plt.yticks([])\nplt.tight_layout()\nplt.show()\n",
"step-3": "<mask token>\nimg = cv2.imread('coins.jpg')\nb, g, r = cv2.split(img)\nrgb_img = cv2.merge([r, g, b])\ngray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\ngrayBlur = cv2.medianBlur(gray, 3)\nret, thresh = cv2.threshold(grayBlur, 200, 255, cv2.THRESH_BINARY_INV)\nkernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))\nopening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)\nsure_bg = cv2.dilate(opening, kernel, iterations=1)\ndist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)\nret, sure_fg = cv2.threshold(dist_transform, 0.6 * dist_transform.max(), 255, 0\n )\nsure_fg = np.uint8(sure_fg)\nunknown = cv2.subtract(sure_bg, sure_fg)\nret, markers = cv2.connectedComponents(sure_fg)\nmarkers = markers + 1\nmarkers[unknown == 255] = 0\nmarkers = cv2.watershed(img, markers)\nimg[markers == -1] = [0, 0, 0]\nimg[markers == 1] = [255, 255, 255]\nnodes = np.amax(markers)\nfor i in range(nodes):\n r, g, b = colorsys.hsv_to_rgb(float(i) / nodes, 1.0, 1.0)\n R, G, B = int(255 * r), int(255 * g), int(255 * b)\n color = [R, G, B]\n print(color)\n img[markers == i + 2] = list(color)\ntext = 'Coins: ' + str(np.amax(markers - 1))\nfont = cv2.FONT_HERSHEY_SIMPLEX\ncv2.putText(img, text, (160, 20), font, 0.5, (0, 0, 0), 1, cv2.LINE_AA)\nplt.subplot(321), plt.imshow(rgb_img)\nplt.title('Input image'), plt.xticks([]), plt.yticks([])\nplt.subplot(322), plt.imshow(thresh, 'gray')\nplt.title('Binary threshold'), plt.xticks([]), plt.yticks([])\nplt.subplot(323), plt.imshow(sure_bg, 'gray')\nplt.title('Sure background'), plt.xticks([]), plt.yticks([])\nplt.subplot(324), plt.imshow(sure_fg, 'gray')\nplt.title('Sure foreground'), plt.xticks([]), plt.yticks([])\nplt.subplot(325), plt.imshow(dist_transform, 'gray')\nplt.title('Distance transform'), plt.xticks([]), plt.yticks([])\nplt.subplot(326), plt.imshow(img, 'gray')\nplt.title('Result from watershed'), plt.xticks([]), plt.yticks([])\nplt.tight_layout()\nplt.show()\n",
"step-4": "import numpy as np\nimport cv2\nimport colorsys\nfrom matplotlib import pyplot as plt\nimg = cv2.imread('coins.jpg')\nb, g, r = cv2.split(img)\nrgb_img = cv2.merge([r, g, b])\ngray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\ngrayBlur = cv2.medianBlur(gray, 3)\nret, thresh = cv2.threshold(grayBlur, 200, 255, cv2.THRESH_BINARY_INV)\nkernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))\nopening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)\nsure_bg = cv2.dilate(opening, kernel, iterations=1)\ndist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)\nret, sure_fg = cv2.threshold(dist_transform, 0.6 * dist_transform.max(), 255, 0\n )\nsure_fg = np.uint8(sure_fg)\nunknown = cv2.subtract(sure_bg, sure_fg)\nret, markers = cv2.connectedComponents(sure_fg)\nmarkers = markers + 1\nmarkers[unknown == 255] = 0\nmarkers = cv2.watershed(img, markers)\nimg[markers == -1] = [0, 0, 0]\nimg[markers == 1] = [255, 255, 255]\nnodes = np.amax(markers)\nfor i in range(nodes):\n r, g, b = colorsys.hsv_to_rgb(float(i) / nodes, 1.0, 1.0)\n R, G, B = int(255 * r), int(255 * g), int(255 * b)\n color = [R, G, B]\n print(color)\n img[markers == i + 2] = list(color)\ntext = 'Coins: ' + str(np.amax(markers - 1))\nfont = cv2.FONT_HERSHEY_SIMPLEX\ncv2.putText(img, text, (160, 20), font, 0.5, (0, 0, 0), 1, cv2.LINE_AA)\nplt.subplot(321), plt.imshow(rgb_img)\nplt.title('Input image'), plt.xticks([]), plt.yticks([])\nplt.subplot(322), plt.imshow(thresh, 'gray')\nplt.title('Binary threshold'), plt.xticks([]), plt.yticks([])\nplt.subplot(323), plt.imshow(sure_bg, 'gray')\nplt.title('Sure background'), plt.xticks([]), plt.yticks([])\nplt.subplot(324), plt.imshow(sure_fg, 'gray')\nplt.title('Sure foreground'), plt.xticks([]), plt.yticks([])\nplt.subplot(325), plt.imshow(dist_transform, 'gray')\nplt.title('Distance transform'), plt.xticks([]), plt.yticks([])\nplt.subplot(326), plt.imshow(img, 'gray')\nplt.title('Result from watershed'), plt.xticks([]), plt.yticks([])\nplt.tight_layout()\nplt.show()\n",
"step-5": "import numpy as np\nimport cv2\nimport colorsys\nfrom matplotlib import pyplot as plt\n\nimg = cv2.imread('coins.jpg')\n\nb,g,r = cv2.split(img)\nrgb_img = cv2.merge([r,g,b])\n\ngray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\n# Blurring image\ngrayBlur = cv2.medianBlur(gray, 3)\n\n# Binary threshold\nret, thresh = cv2.threshold(grayBlur, 200,255, cv2.THRESH_BINARY_INV)\n\n# Noise removal\nkernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))\nopening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel,iterations=2)\n\n# Sure background area\nsure_bg = cv2.dilate(opening, kernel, iterations=1)\n\n# Finding sure foreground area\ndist_transform = cv2.distanceTransform(opening, cv2.DIST_L2,5)\nret, sure_fg = cv2.threshold(dist_transform,0.6*dist_transform.max(),255,0)\n\n# Finding unknown region\nsure_fg = np.uint8(sure_fg)\nunknown = cv2.subtract(sure_bg,sure_fg)\n\n# Marker labelling\nret, markers = cv2.connectedComponents(sure_fg)\n\n# Add one to all labels so that sure background is not 0, but 1\nmarkers = markers+1\n\n# Now, mark the region of unknown with zero\nmarkers[unknown==255] = 0\n\nmarkers = cv2.watershed(img,markers)\n\n# Coloring borders black\nimg[markers == -1] = [0,0,0]\n\n#Color background white\nimg[markers == 1] = [255, 255, 255]\n\n# Color nodes\nnodes = np.amax(markers)\nfor i in range(nodes):\n (r, g, b) = colorsys.hsv_to_rgb(float(i) / nodes, 1.0, 1.0)\n R, G, B = int(255 * r), int(255 * g), int(255 * b)\n color = [R,G,B]\n print(color)\n img[markers == i+2] = list(color)\n\n# Add text with coin count\ntext = 'Coins: ' + (str)(np.amax(markers-1))\nfont = cv2.FONT_HERSHEY_SIMPLEX\ncv2.putText(img,text,(160,20), font, 0.5,(0,0,0),1,cv2.LINE_AA)\n\n\n# Plotting\nplt.subplot(321), plt.imshow(rgb_img )\nplt.title('Input image'), plt.xticks([]), plt.yticks([])\nplt.subplot(322),plt.imshow(thresh, 'gray')\nplt.title(\"Binary threshold\"), plt.xticks([]), plt.yticks([])\n\nplt.subplot(323),plt.imshow(sure_bg, 'gray')\nplt.title(\"Sure background\"), plt.xticks([]), plt.yticks([])\n\nplt.subplot(324),plt.imshow(sure_fg, 'gray')\nplt.title(\"Sure foreground\"), plt.xticks([]), plt.yticks([])\n\nplt.subplot(325),plt.imshow(dist_transform, 'gray')\nplt.title(\"Distance transform\"), plt.xticks([]), plt.yticks([])\nplt.subplot(326),plt.imshow(img, 'gray')\nplt.title(\"Result from watershed\"), plt.xticks([]), plt.yticks([])\n\nplt.tight_layout()\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from _math import Vector2, Vector3, Quaternion, Transform, Vector3Immutable, QuaternionImmutable, minimum_distance
from _math import mod_2pi
from math import pi as PI, sqrt, fmod, floor, atan2, acos, asin, ceil, pi, e
import operator
from sims4.repr_utils import standard_repr
import enum
import native.animation
import sims4.hash_util
from singletons import DEFAULT
TWO_PI = PI*2
EPSILON = 1.192092896e-07
QUATERNION_EPSILON = 0.001
MAX_FLOAT = 3.402823466e+38
MAX_UINT64 = 18446744073709551615
MAX_INT64 = 922337203685477580
MAX_UINT32 = 4294967295
MAX_INT32 = 2147483647
MAX_UINT16 = 65535
MAX_INT16 = 32767
POS_INFINITY = float('inf')
NEG_INFINITY = float('-inf')
FORWARD_AXIS = Vector3.Z_AXIS()
UP_AXIS = Vector3.Y_AXIS()
def clamp(lower_bound, x, upper_bound):
if x < lower_bound:
return lower_bound
if x > upper_bound:
return upper_bound
return x
def interpolate(a, b, fraction):
return a*fraction + (1 - fraction)*b
def linear_seq_gen(start, stop, step, max_count=None):
delta = stop - start
num = floor(abs(delta/step))
if max_count is not None:
num = min(num, max_count - 1)
if num > 0:
for i in range(0, num + 1):
yield start + i*delta/num
else:
yield start
if stop != start:
yield stop
def deg_to_rad(deg):
return deg*PI/180
def rad_to_deg(rad):
return rad*180/PI
def angle_abs_difference(a1, a2):
delta = sims4.math.mod_2pi(a1 - a2)
if delta > sims4.math.PI:
delta = sims4.math.TWO_PI - delta
return delta
def vector_dot(a, b):
return a.x*b.x + a.y*b.y + a.z*b.z
def vector_dot_2d(a, b):
return a.x*b.x + a.z*b.z
def vector_cross(a, b):
return Vector3(a.y*b.z - a.z*b.y, a.z*b.x - a.x*b.z, a.x*b.y - a.y*b.x)
def vector_cross_2d(a, b):
return a.z*b.x - a.x*b.z
def vector_normalize(v):
return v/v.magnitude()
def vector_flatten(v):
return Vector3(v.x, 0, v.z)
def almost_equal(a, b, epsilon=EPSILON):
return abs(a - b) < epsilon
def vector3_almost_equal(v1, v2, epsilon=EPSILON):
return abs(v1.x - v2.x) < epsilon and (abs(v1.y - v2.y) < epsilon and abs(v1.z - v2.z) < epsilon)
def vector3_almost_equal_2d(v1, v2, epsilon=EPSILON):
return abs(v1.x - v2.x) < epsilon and abs(v1.z - v2.z) < epsilon
def quaternion_almost_equal(q1, q2, epsilon=QUATERNION_EPSILON):
if abs(q1.x - q2.x) < epsilon and (abs(q1.y - q2.y) < epsilon and abs(q1.z - q2.z) < epsilon) and abs(q1.w - q2.w) < epsilon:
return True
if abs(q1.x + q2.x) < epsilon and (abs(q1.y + q2.y) < epsilon and abs(q1.z + q2.z) < epsilon) and abs(q1.w + q2.w) < epsilon:
return True
return False
def transform_almost_equal(t1, t2, epsilon=EPSILON, epsilon_orientation=QUATERNION_EPSILON):
if epsilon_orientation is DEFAULT:
epsilon_orientation = epsilon
return vector3_almost_equal(t1.translation, t2.translation, epsilon=epsilon) and quaternion_almost_equal(t1.orientation, t2.orientation, epsilon=epsilon_orientation)
def transform_almost_equal_2d(t1, t2, epsilon=EPSILON, epsilon_orientation=QUATERNION_EPSILON):
if epsilon_orientation is DEFAULT:
epsilon_orientation = epsilon
return vector3_almost_equal_2d(t1.translation, t2.translation, epsilon=epsilon) and quaternion_almost_equal(t1.orientation, t2.orientation, epsilon=epsilon_orientation)
def vector3_rotate_axis_angle(v, angle, axis):
q = Quaternion.from_axis_angle(angle, axis)
return q.transform_vector(v)
def vector3_angle(v):
return atan2(v.x, v.z)
def angle_to_yaw_quaternion(angle):
return Quaternion.from_axis_angle(angle, UP_AXIS)
def yaw_quaternion_to_angle(q):
if almost_equal(q.y, 0.0):
return 0
angle = acos(q.w)*2.0
if q.y > 0:
return angle
return -angle
def get_closest_point_2D(segment, p):
a1 = segment[0]
a2 = segment[1]
(x1, x2) = (a1.x, a2.x)
x3 = p.x
(z1, z2) = (a1.z, a2.z)
z3 = p.z
dx = x2 - x1
dz = z2 - z1
t = ((x3 - x1)*dx + (z3 - z1)*dz)/(dx*dx + dz*dz)
t = sims4.math.clamp(0, t, 1)
x0 = x1 + t*dx
z0 = z1 + t*dz
return Vector3(x0, p.y, z0)
def invert_quaternion(q):
d = 1.0/(q.x*q.x + q.y*q.y + q.z*q.z + q.w*q.w)
return Quaternion(-d*q.x, -d*q.y, -d*q.z, d*q.w)
def get_difference_transform(transform_a, transform_b):
v = transform_b.translation - transform_a.translation
a_q_i = invert_quaternion(transform_a.orientation)
q = Quaternion.concatenate(transform_b.orientation, a_q_i)
v_prime = Quaternion.transform_vector(a_q_i, v)
return Transform(v_prime, q)
class Location:
__qualname__ = 'Location'
__slots__ = ('transform', 'routing_surface', '_parent_ref', 'joint_name_or_hash', 'slot_hash')
def __init__(self, transform, routing_surface, parent=None, joint_name_or_hash=None, slot_hash=0):
self.transform = transform
self.routing_surface = routing_surface
self.parent = parent
self.joint_name_or_hash = joint_name_or_hash
self.slot_hash = slot_hash
def __repr__(self):
return standard_repr(self, self.transform, self.routing_surface, parent=self.parent, joint_name_or_hash=self.joint_name_or_hash, slot_hash=self.slot_hash)
def __eq__(self, other):
if type(self) is not type(other):
return False
if self.transform != other.transform:
return False
if self.parent != other.parent:
return False
if self.routing_surface != other.routing_surface:
return False
slot_hash0 = self.joint_name_or_hash or self.slot_hash
slot_hash1 = other.joint_name_or_hash or other.slot_hash
if slot_hash0 != slot_hash1:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
@property
def parent(self):
if self._parent_ref is not None:
return self._parent_ref()
@parent.setter
def parent(self, value):
if value is not None:
self._parent_ref = value.ref()
self.routing_surface = None
else:
self._parent_ref = None
@property
def joint_name_hash(self):
if self.joint_name_or_hash is None:
return 0
if isinstance(self.joint_name_or_hash, int):
return self.joint_name_or_hash
return sims4.hash_util.hash32(self.joint_name_or_hash)
@property
def world_routing_surface(self):
if self.parent is not None:
return self.parent.location.world_routing_surface
return self.routing_surface
@property
def zone_id(self):
if self.world_routing_surface.type == 1:
return self.world_routing_surface.primary_id
return sims4.zone_utils.get_zone_id()
@property
def level(self):
return self.world_routing_surface.secondary_id
@property
def world_transform(self):
if self.parent is None:
return self.transform
transform = self.transform
parent = self.parent
if parent.is_part:
parent_transform = parent.part_owner.transform
else:
parent_transform = parent.transform
if self.joint_name_or_hash is None:
if transform is None:
return parent_transform
return sims4.math.Transform.concatenate(transform, parent_transform)
joint_transform = native.animation.get_joint_transform_from_rig(self.parent.rig, self.joint_name_or_hash)
if transform is None:
return sims4.math.Transform.concatenate(joint_transform, parent_transform)
local_transform = sims4.math.Transform.concatenate(transform, joint_transform)
return sims4.math.Transform.concatenate(local_transform, parent_transform)
def duplicate(self):
return type(self)(self.transform, self.routing_surface, self.parent, self.joint_name_or_hash, self.slot_hash)
def clone(self, *, transform=DEFAULT, translation=DEFAULT, orientation=DEFAULT, routing_surface=DEFAULT, parent=DEFAULT, joint_name_or_hash=DEFAULT, slot_hash=DEFAULT):
if transform is DEFAULT:
transform = self.transform
if transform is not None:
if translation is DEFAULT:
translation = transform.translation
if orientation is DEFAULT:
orientation = transform.orientation
transform = Transform(translation, orientation)
if routing_surface is DEFAULT:
routing_surface = self.routing_surface
if parent is DEFAULT:
parent = self.parent
if joint_name_or_hash is DEFAULT:
joint_name_or_hash = self.joint_name_or_hash
if slot_hash is DEFAULT:
slot_hash = self.slot_hash
return type(self)(transform, routing_surface, parent, joint_name_or_hash, slot_hash)
class LinearCurve:
__qualname__ = 'LinearCurve'
__slots__ = ('points',)
def __init__(self, points):
self.points = points
self.points.sort(key=lambda i: i[0])
def get(self, val):
p_max = len(self.points) - 1
if val <= self.points[0][0]:
return self.points[0][1]
if val >= self.points[p_max][0]:
return self.points[p_max][1]
i = p_max - 1
while i > 0:
while val < self.points[i][0]:
i -= 1
p1 = self.points[i]
p2 = self.points[i + 1]
percent = (val - p1[0])/(p2[0] - p1[0])
return (p2[1] - p1[1])*percent + p1[1]
class WeightedUtilityCurve(LinearCurve):
__qualname__ = 'WeightedUtilityCurve'
def __init__(self, points, max_y=0, weight=1):
if max_y == 0:
max_y = self._find_largest_y(points)
transformed_points = [(point[0], point[1]/max_y*weight) for point in points]
super().__init__(transformed_points)
def _find_largest_y(self, points):
max_y = 0
for point in points:
while point[1] > max_y:
max_y = point[1]
return max_y
class CircularUtilityCurve(LinearCurve):
__qualname__ = 'CircularUtilityCurve'
def __init__(self, points, min_x, max_x):
super().__init__(points)
self._min_x = min_x
self._max_x = max_x
last_point = self.points[-1]
distance_to_end = max_x - last_point[0]
total_length = distance_to_end + self.points[0][1]
distance_to_pivot_point = distance_to_end/total_length
pivot_y_value = (self.points[0][1] - last_point[1])*distance_to_pivot_point + self.points[0][1]
self.points.insert(0, (0, pivot_y_value))
self.points.insert(len(self.points), (self._max_x, pivot_y_value))
def get(self, val):
return super().get(val)
class Operator(enum.Int):
__qualname__ = 'Operator'
GREATER = 1
GREATER_OR_EQUAL = 2
EQUAL = 3
NOTEQUAL = 4
LESS_OR_EQUAL = 5
LESS = 6
@staticmethod
def from_function(fn):
if fn == operator.gt:
return Operator.GREATER
if fn == operator.ge:
return Operator.GREATER_OR_EQUAL
if fn == operator.eq:
return Operator.EQUAL
if fn == operator.ne:
return Operator.NOTEQUAL
if fn == operator.le:
return Operator.LESS_OR_EQUAL
if fn == operator.lt:
return Operator.LESS
@property
def function(self):
if self.value == Operator.GREATER:
return operator.gt
if self.value == Operator.GREATER_OR_EQUAL:
return operator.ge
if self.value == Operator.EQUAL:
return operator.eq
if self.value == Operator.NOTEQUAL:
return operator.ne
if self.value == Operator.LESS_OR_EQUAL:
return operator.le
if self.value == Operator.LESS:
return operator.lt
@property
def inverse(self):
if self == Operator.GREATER:
return Operator.LESS_OR_EQUAL
if self == Operator.GREATER_OR_EQUAL:
return Operator.LESS
if self == Operator.EQUAL:
return Operator.NOTEQUAL
if self == Operator.NOTEQUAL:
return Operator.EQUAL
if self == Operator.LESS_OR_EQUAL:
return Operator.GREATER
if self == Operator.LESS:
return Operator.GREATER_OR_EQUAL
@property
def symbol(self):
if self == Operator.GREATER:
return '>'
if self == Operator.GREATER_OR_EQUAL:
return '>='
if self == Operator.EQUAL:
return '=='
if self == Operator.NOTEQUAL:
return '!='
if self == Operator.LESS_OR_EQUAL:
return '<='
if self == Operator.LESS:
return '<'
@property
def category(self):
if self == Operator.GREATER:
return Operator.GREATER
if self == Operator.GREATER_OR_EQUAL:
return Operator.GREATER
if self == Operator.EQUAL:
return Operator.EQUAL
if self == Operator.NOTEQUAL:
return Operator.EQUAL
if self == Operator.LESS_OR_EQUAL:
return Operator.LESS
if self == Operator.LESS:
return Operator.LESS
class InequalityOperator(enum.Int):
__qualname__ = 'InequalityOperator'
GREATER = Operator.GREATER
GREATER_OR_EQUAL = Operator.GREATER_OR_EQUAL
LESS_OR_EQUAL = Operator.LESS_OR_EQUAL
LESS = Operator.LESS
with InequalityOperator.__reload_context__(InequalityOperator, InequalityOperator):
InequalityOperator.from_function = Operator.from_function
InequalityOperator.function = Operator.function
InequalityOperator.inverse = Operator.inverse
InequalityOperator.symbol = Operator.symbol
InequalityOperator.category = Operator.category
class Threshold:
__qualname__ = 'Threshold'
__slots__ = ('value', 'comparison')
def __init__(self, value=None, comparison=None):
self.value = value
self.comparison = comparison
def compare(self, source_value):
if self.value is not None and self.comparison is not None:
return self.comparison(source_value, self.value)
return False
def compare_value(self, source_value):
if self.value is not None and self.comparison is not None:
return self.comparison(source_value.value, self.value.value)
return False
def inverse(self):
return Threshold(self.value, Operator.from_function(self.comparison).inverse.function)
def __str__(self):
if self.comparison is None:
return 'None'
return '{} {}'.format(Operator.from_function(self.comparison).symbol, self.value)
def __repr__(self):
return '<Threshold {}>'.format(str(self))
def __eq__(self, other):
if not isinstance(other, Threshold):
return False
if not self.value == other.value:
return False
if not self.comparison == other.comparison:
return False
return True
def __hash__(self):
return hash((self.value, self.comparison))
|
normal
|
{
"blob_id": "a0310b1bab339064c36ff0fe92d275db7a6c5ba9",
"index": 8734,
"step-1": "<mask token>\n\n\ndef rad_to_deg(rad):\n return rad * 180 / PI\n\n\ndef angle_abs_difference(a1, a2):\n delta = sims4.math.mod_2pi(a1 - a2)\n if delta > sims4.math.PI:\n delta = sims4.math.TWO_PI - delta\n return delta\n\n\n<mask token>\n\n\ndef vector_dot_2d(a, b):\n return a.x * b.x + a.z * b.z\n\n\ndef vector_cross(a, b):\n return Vector3(a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y -\n a.y * b.x)\n\n\n<mask token>\n\n\ndef vector3_rotate_axis_angle(v, angle, axis):\n q = Quaternion.from_axis_angle(angle, axis)\n return q.transform_vector(v)\n\n\n<mask token>\n\n\ndef invert_quaternion(q):\n d = 1.0 / (q.x * q.x + q.y * q.y + q.z * q.z + q.w * q.w)\n return Quaternion(-d * q.x, -d * q.y, -d * q.z, d * q.w)\n\n\n<mask token>\n\n\nclass Location:\n __qualname__ = 'Location'\n __slots__ = ('transform', 'routing_surface', '_parent_ref',\n 'joint_name_or_hash', 'slot_hash')\n\n def __init__(self, transform, routing_surface, parent=None,\n joint_name_or_hash=None, slot_hash=0):\n self.transform = transform\n self.routing_surface = routing_surface\n self.parent = parent\n self.joint_name_or_hash = joint_name_or_hash\n self.slot_hash = slot_hash\n\n def __repr__(self):\n return standard_repr(self, self.transform, self.routing_surface,\n parent=self.parent, joint_name_or_hash=self.joint_name_or_hash,\n slot_hash=self.slot_hash)\n\n def __eq__(self, other):\n if type(self) is not type(other):\n return False\n if self.transform != other.transform:\n return False\n if self.parent != other.parent:\n return False\n if self.routing_surface != other.routing_surface:\n return False\n slot_hash0 = self.joint_name_or_hash or self.slot_hash\n slot_hash1 = other.joint_name_or_hash or other.slot_hash\n if slot_hash0 != slot_hash1:\n return False\n return True\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n @property\n def parent(self):\n if self._parent_ref is not None:\n return self._parent_ref()\n\n @parent.setter\n def parent(self, value):\n if value is not None:\n self._parent_ref = value.ref()\n self.routing_surface = None\n else:\n self._parent_ref = None\n\n @property\n def joint_name_hash(self):\n if self.joint_name_or_hash is None:\n return 0\n if isinstance(self.joint_name_or_hash, int):\n return self.joint_name_or_hash\n return sims4.hash_util.hash32(self.joint_name_or_hash)\n\n @property\n def world_routing_surface(self):\n if self.parent is not None:\n return self.parent.location.world_routing_surface\n return self.routing_surface\n\n @property\n def zone_id(self):\n if self.world_routing_surface.type == 1:\n return self.world_routing_surface.primary_id\n return sims4.zone_utils.get_zone_id()\n\n @property\n def level(self):\n return self.world_routing_surface.secondary_id\n\n @property\n def world_transform(self):\n if self.parent is None:\n return self.transform\n transform = self.transform\n parent = self.parent\n if parent.is_part:\n parent_transform = parent.part_owner.transform\n else:\n parent_transform = parent.transform\n if self.joint_name_or_hash is None:\n if transform is None:\n return parent_transform\n return sims4.math.Transform.concatenate(transform, parent_transform\n )\n joint_transform = native.animation.get_joint_transform_from_rig(self\n .parent.rig, self.joint_name_or_hash)\n if transform is None:\n return sims4.math.Transform.concatenate(joint_transform,\n parent_transform)\n local_transform = sims4.math.Transform.concatenate(transform,\n joint_transform)\n return sims4.math.Transform.concatenate(local_transform,\n parent_transform)\n\n def duplicate(self):\n return type(self)(self.transform, self.routing_surface, self.parent,\n self.joint_name_or_hash, self.slot_hash)\n\n def clone(self, *, transform=DEFAULT, translation=DEFAULT, orientation=\n DEFAULT, routing_surface=DEFAULT, parent=DEFAULT,\n joint_name_or_hash=DEFAULT, slot_hash=DEFAULT):\n if transform is DEFAULT:\n transform = self.transform\n if transform is not None:\n if translation is DEFAULT:\n translation = transform.translation\n if orientation is DEFAULT:\n orientation = transform.orientation\n transform = Transform(translation, orientation)\n if routing_surface is DEFAULT:\n routing_surface = self.routing_surface\n if parent is DEFAULT:\n parent = self.parent\n if joint_name_or_hash is DEFAULT:\n joint_name_or_hash = self.joint_name_or_hash\n if slot_hash is DEFAULT:\n slot_hash = self.slot_hash\n return type(self)(transform, routing_surface, parent,\n joint_name_or_hash, slot_hash)\n\n\nclass LinearCurve:\n __qualname__ = 'LinearCurve'\n __slots__ = 'points',\n\n def __init__(self, points):\n self.points = points\n self.points.sort(key=lambda i: i[0])\n\n def get(self, val):\n p_max = len(self.points) - 1\n if val <= self.points[0][0]:\n return self.points[0][1]\n if val >= self.points[p_max][0]:\n return self.points[p_max][1]\n i = p_max - 1\n while i > 0:\n while val < self.points[i][0]:\n i -= 1\n p1 = self.points[i]\n p2 = self.points[i + 1]\n percent = (val - p1[0]) / (p2[0] - p1[0])\n return (p2[1] - p1[1]) * percent + p1[1]\n\n\nclass WeightedUtilityCurve(LinearCurve):\n __qualname__ = 'WeightedUtilityCurve'\n\n def __init__(self, points, max_y=0, weight=1):\n if max_y == 0:\n max_y = self._find_largest_y(points)\n transformed_points = [(point[0], point[1] / max_y * weight) for\n point in points]\n super().__init__(transformed_points)\n\n def _find_largest_y(self, points):\n max_y = 0\n for point in points:\n while point[1] > max_y:\n max_y = point[1]\n return max_y\n\n\nclass CircularUtilityCurve(LinearCurve):\n __qualname__ = 'CircularUtilityCurve'\n\n def __init__(self, points, min_x, max_x):\n super().__init__(points)\n self._min_x = min_x\n self._max_x = max_x\n last_point = self.points[-1]\n distance_to_end = max_x - last_point[0]\n total_length = distance_to_end + self.points[0][1]\n distance_to_pivot_point = distance_to_end / total_length\n pivot_y_value = (self.points[0][1] - last_point[1]\n ) * distance_to_pivot_point + self.points[0][1]\n self.points.insert(0, (0, pivot_y_value))\n self.points.insert(len(self.points), (self._max_x, pivot_y_value))\n\n def get(self, val):\n return super().get(val)\n\n\nclass Operator(enum.Int):\n __qualname__ = 'Operator'\n GREATER = 1\n GREATER_OR_EQUAL = 2\n EQUAL = 3\n NOTEQUAL = 4\n LESS_OR_EQUAL = 5\n LESS = 6\n\n @staticmethod\n def from_function(fn):\n if fn == operator.gt:\n return Operator.GREATER\n if fn == operator.ge:\n return Operator.GREATER_OR_EQUAL\n if fn == operator.eq:\n return Operator.EQUAL\n if fn == operator.ne:\n return Operator.NOTEQUAL\n if fn == operator.le:\n return Operator.LESS_OR_EQUAL\n if fn == operator.lt:\n return Operator.LESS\n\n @property\n def function(self):\n if self.value == Operator.GREATER:\n return operator.gt\n if self.value == Operator.GREATER_OR_EQUAL:\n return operator.ge\n if self.value == Operator.EQUAL:\n return operator.eq\n if self.value == Operator.NOTEQUAL:\n return operator.ne\n if self.value == Operator.LESS_OR_EQUAL:\n return operator.le\n if self.value == Operator.LESS:\n return operator.lt\n\n @property\n def inverse(self):\n if self == Operator.GREATER:\n return Operator.LESS_OR_EQUAL\n if self == Operator.GREATER_OR_EQUAL:\n return Operator.LESS\n if self == Operator.EQUAL:\n return Operator.NOTEQUAL\n if self == Operator.NOTEQUAL:\n return Operator.EQUAL\n if self == Operator.LESS_OR_EQUAL:\n return Operator.GREATER\n if self == Operator.LESS:\n return Operator.GREATER_OR_EQUAL\n\n @property\n def symbol(self):\n if self == Operator.GREATER:\n return '>'\n if self == Operator.GREATER_OR_EQUAL:\n return '>='\n if self == Operator.EQUAL:\n return '=='\n if self == Operator.NOTEQUAL:\n return '!='\n if self == Operator.LESS_OR_EQUAL:\n return '<='\n if self == Operator.LESS:\n return '<'\n\n @property\n def category(self):\n if self == Operator.GREATER:\n return Operator.GREATER\n if self == Operator.GREATER_OR_EQUAL:\n return Operator.GREATER\n if self == Operator.EQUAL:\n return Operator.EQUAL\n if self == Operator.NOTEQUAL:\n return Operator.EQUAL\n if self == Operator.LESS_OR_EQUAL:\n return Operator.LESS\n if self == Operator.LESS:\n return Operator.LESS\n\n\nclass InequalityOperator(enum.Int):\n __qualname__ = 'InequalityOperator'\n GREATER = Operator.GREATER\n GREATER_OR_EQUAL = Operator.GREATER_OR_EQUAL\n LESS_OR_EQUAL = Operator.LESS_OR_EQUAL\n LESS = Operator.LESS\n\n\n<mask token>\n\n\nclass Threshold:\n __qualname__ = 'Threshold'\n __slots__ = 'value', 'comparison'\n\n def __init__(self, value=None, comparison=None):\n self.value = value\n self.comparison = comparison\n\n def compare(self, source_value):\n if self.value is not None and self.comparison is not None:\n return self.comparison(source_value, self.value)\n return False\n\n def compare_value(self, source_value):\n if self.value is not None and self.comparison is not None:\n return self.comparison(source_value.value, self.value.value)\n return False\n\n def inverse(self):\n return Threshold(self.value, Operator.from_function(self.comparison\n ).inverse.function)\n\n def __str__(self):\n if self.comparison is None:\n return 'None'\n return '{} {}'.format(Operator.from_function(self.comparison).\n symbol, self.value)\n\n def __repr__(self):\n return '<Threshold {}>'.format(str(self))\n\n def __eq__(self, other):\n if not isinstance(other, Threshold):\n return False\n if not self.value == other.value:\n return False\n if not self.comparison == other.comparison:\n return False\n return True\n\n def __hash__(self):\n return hash((self.value, self.comparison))\n",
"step-2": "<mask token>\n\n\ndef rad_to_deg(rad):\n return rad * 180 / PI\n\n\ndef angle_abs_difference(a1, a2):\n delta = sims4.math.mod_2pi(a1 - a2)\n if delta > sims4.math.PI:\n delta = sims4.math.TWO_PI - delta\n return delta\n\n\n<mask token>\n\n\ndef vector_dot_2d(a, b):\n return a.x * b.x + a.z * b.z\n\n\ndef vector_cross(a, b):\n return Vector3(a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y -\n a.y * b.x)\n\n\n<mask token>\n\n\ndef transform_almost_equal(t1, t2, epsilon=EPSILON, epsilon_orientation=\n QUATERNION_EPSILON):\n if epsilon_orientation is DEFAULT:\n epsilon_orientation = epsilon\n return vector3_almost_equal(t1.translation, t2.translation, epsilon=epsilon\n ) and quaternion_almost_equal(t1.orientation, t2.orientation,\n epsilon=epsilon_orientation)\n\n\n<mask token>\n\n\ndef vector3_rotate_axis_angle(v, angle, axis):\n q = Quaternion.from_axis_angle(angle, axis)\n return q.transform_vector(v)\n\n\n<mask token>\n\n\ndef invert_quaternion(q):\n d = 1.0 / (q.x * q.x + q.y * q.y + q.z * q.z + q.w * q.w)\n return Quaternion(-d * q.x, -d * q.y, -d * q.z, d * q.w)\n\n\n<mask token>\n\n\nclass Location:\n __qualname__ = 'Location'\n __slots__ = ('transform', 'routing_surface', '_parent_ref',\n 'joint_name_or_hash', 'slot_hash')\n\n def __init__(self, transform, routing_surface, parent=None,\n joint_name_or_hash=None, slot_hash=0):\n self.transform = transform\n self.routing_surface = routing_surface\n self.parent = parent\n self.joint_name_or_hash = joint_name_or_hash\n self.slot_hash = slot_hash\n\n def __repr__(self):\n return standard_repr(self, self.transform, self.routing_surface,\n parent=self.parent, joint_name_or_hash=self.joint_name_or_hash,\n slot_hash=self.slot_hash)\n\n def __eq__(self, other):\n if type(self) is not type(other):\n return False\n if self.transform != other.transform:\n return False\n if self.parent != other.parent:\n return False\n if self.routing_surface != other.routing_surface:\n return False\n slot_hash0 = self.joint_name_or_hash or self.slot_hash\n slot_hash1 = other.joint_name_or_hash or other.slot_hash\n if slot_hash0 != slot_hash1:\n return False\n return True\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n @property\n def parent(self):\n if self._parent_ref is not None:\n return self._parent_ref()\n\n @parent.setter\n def parent(self, value):\n if value is not None:\n self._parent_ref = value.ref()\n self.routing_surface = None\n else:\n self._parent_ref = None\n\n @property\n def joint_name_hash(self):\n if self.joint_name_or_hash is None:\n return 0\n if isinstance(self.joint_name_or_hash, int):\n return self.joint_name_or_hash\n return sims4.hash_util.hash32(self.joint_name_or_hash)\n\n @property\n def world_routing_surface(self):\n if self.parent is not None:\n return self.parent.location.world_routing_surface\n return self.routing_surface\n\n @property\n def zone_id(self):\n if self.world_routing_surface.type == 1:\n return self.world_routing_surface.primary_id\n return sims4.zone_utils.get_zone_id()\n\n @property\n def level(self):\n return self.world_routing_surface.secondary_id\n\n @property\n def world_transform(self):\n if self.parent is None:\n return self.transform\n transform = self.transform\n parent = self.parent\n if parent.is_part:\n parent_transform = parent.part_owner.transform\n else:\n parent_transform = parent.transform\n if self.joint_name_or_hash is None:\n if transform is None:\n return parent_transform\n return sims4.math.Transform.concatenate(transform, parent_transform\n )\n joint_transform = native.animation.get_joint_transform_from_rig(self\n .parent.rig, self.joint_name_or_hash)\n if transform is None:\n return sims4.math.Transform.concatenate(joint_transform,\n parent_transform)\n local_transform = sims4.math.Transform.concatenate(transform,\n joint_transform)\n return sims4.math.Transform.concatenate(local_transform,\n parent_transform)\n\n def duplicate(self):\n return type(self)(self.transform, self.routing_surface, self.parent,\n self.joint_name_or_hash, self.slot_hash)\n\n def clone(self, *, transform=DEFAULT, translation=DEFAULT, orientation=\n DEFAULT, routing_surface=DEFAULT, parent=DEFAULT,\n joint_name_or_hash=DEFAULT, slot_hash=DEFAULT):\n if transform is DEFAULT:\n transform = self.transform\n if transform is not None:\n if translation is DEFAULT:\n translation = transform.translation\n if orientation is DEFAULT:\n orientation = transform.orientation\n transform = Transform(translation, orientation)\n if routing_surface is DEFAULT:\n routing_surface = self.routing_surface\n if parent is DEFAULT:\n parent = self.parent\n if joint_name_or_hash is DEFAULT:\n joint_name_or_hash = self.joint_name_or_hash\n if slot_hash is DEFAULT:\n slot_hash = self.slot_hash\n return type(self)(transform, routing_surface, parent,\n joint_name_or_hash, slot_hash)\n\n\nclass LinearCurve:\n __qualname__ = 'LinearCurve'\n __slots__ = 'points',\n\n def __init__(self, points):\n self.points = points\n self.points.sort(key=lambda i: i[0])\n\n def get(self, val):\n p_max = len(self.points) - 1\n if val <= self.points[0][0]:\n return self.points[0][1]\n if val >= self.points[p_max][0]:\n return self.points[p_max][1]\n i = p_max - 1\n while i > 0:\n while val < self.points[i][0]:\n i -= 1\n p1 = self.points[i]\n p2 = self.points[i + 1]\n percent = (val - p1[0]) / (p2[0] - p1[0])\n return (p2[1] - p1[1]) * percent + p1[1]\n\n\nclass WeightedUtilityCurve(LinearCurve):\n __qualname__ = 'WeightedUtilityCurve'\n\n def __init__(self, points, max_y=0, weight=1):\n if max_y == 0:\n max_y = self._find_largest_y(points)\n transformed_points = [(point[0], point[1] / max_y * weight) for\n point in points]\n super().__init__(transformed_points)\n\n def _find_largest_y(self, points):\n max_y = 0\n for point in points:\n while point[1] > max_y:\n max_y = point[1]\n return max_y\n\n\nclass CircularUtilityCurve(LinearCurve):\n __qualname__ = 'CircularUtilityCurve'\n\n def __init__(self, points, min_x, max_x):\n super().__init__(points)\n self._min_x = min_x\n self._max_x = max_x\n last_point = self.points[-1]\n distance_to_end = max_x - last_point[0]\n total_length = distance_to_end + self.points[0][1]\n distance_to_pivot_point = distance_to_end / total_length\n pivot_y_value = (self.points[0][1] - last_point[1]\n ) * distance_to_pivot_point + self.points[0][1]\n self.points.insert(0, (0, pivot_y_value))\n self.points.insert(len(self.points), (self._max_x, pivot_y_value))\n\n def get(self, val):\n return super().get(val)\n\n\nclass Operator(enum.Int):\n __qualname__ = 'Operator'\n GREATER = 1\n GREATER_OR_EQUAL = 2\n EQUAL = 3\n NOTEQUAL = 4\n LESS_OR_EQUAL = 5\n LESS = 6\n\n @staticmethod\n def from_function(fn):\n if fn == operator.gt:\n return Operator.GREATER\n if fn == operator.ge:\n return Operator.GREATER_OR_EQUAL\n if fn == operator.eq:\n return Operator.EQUAL\n if fn == operator.ne:\n return Operator.NOTEQUAL\n if fn == operator.le:\n return Operator.LESS_OR_EQUAL\n if fn == operator.lt:\n return Operator.LESS\n\n @property\n def function(self):\n if self.value == Operator.GREATER:\n return operator.gt\n if self.value == Operator.GREATER_OR_EQUAL:\n return operator.ge\n if self.value == Operator.EQUAL:\n return operator.eq\n if self.value == Operator.NOTEQUAL:\n return operator.ne\n if self.value == Operator.LESS_OR_EQUAL:\n return operator.le\n if self.value == Operator.LESS:\n return operator.lt\n\n @property\n def inverse(self):\n if self == Operator.GREATER:\n return Operator.LESS_OR_EQUAL\n if self == Operator.GREATER_OR_EQUAL:\n return Operator.LESS\n if self == Operator.EQUAL:\n return Operator.NOTEQUAL\n if self == Operator.NOTEQUAL:\n return Operator.EQUAL\n if self == Operator.LESS_OR_EQUAL:\n return Operator.GREATER\n if self == Operator.LESS:\n return Operator.GREATER_OR_EQUAL\n\n @property\n def symbol(self):\n if self == Operator.GREATER:\n return '>'\n if self == Operator.GREATER_OR_EQUAL:\n return '>='\n if self == Operator.EQUAL:\n return '=='\n if self == Operator.NOTEQUAL:\n return '!='\n if self == Operator.LESS_OR_EQUAL:\n return '<='\n if self == Operator.LESS:\n return '<'\n\n @property\n def category(self):\n if self == Operator.GREATER:\n return Operator.GREATER\n if self == Operator.GREATER_OR_EQUAL:\n return Operator.GREATER\n if self == Operator.EQUAL:\n return Operator.EQUAL\n if self == Operator.NOTEQUAL:\n return Operator.EQUAL\n if self == Operator.LESS_OR_EQUAL:\n return Operator.LESS\n if self == Operator.LESS:\n return Operator.LESS\n\n\nclass InequalityOperator(enum.Int):\n __qualname__ = 'InequalityOperator'\n GREATER = Operator.GREATER\n GREATER_OR_EQUAL = Operator.GREATER_OR_EQUAL\n LESS_OR_EQUAL = Operator.LESS_OR_EQUAL\n LESS = Operator.LESS\n\n\n<mask token>\n\n\nclass Threshold:\n __qualname__ = 'Threshold'\n __slots__ = 'value', 'comparison'\n\n def __init__(self, value=None, comparison=None):\n self.value = value\n self.comparison = comparison\n\n def compare(self, source_value):\n if self.value is not None and self.comparison is not None:\n return self.comparison(source_value, self.value)\n return False\n\n def compare_value(self, source_value):\n if self.value is not None and self.comparison is not None:\n return self.comparison(source_value.value, self.value.value)\n return False\n\n def inverse(self):\n return Threshold(self.value, Operator.from_function(self.comparison\n ).inverse.function)\n\n def __str__(self):\n if self.comparison is None:\n return 'None'\n return '{} {}'.format(Operator.from_function(self.comparison).\n symbol, self.value)\n\n def __repr__(self):\n return '<Threshold {}>'.format(str(self))\n\n def __eq__(self, other):\n if not isinstance(other, Threshold):\n return False\n if not self.value == other.value:\n return False\n if not self.comparison == other.comparison:\n return False\n return True\n\n def __hash__(self):\n return hash((self.value, self.comparison))\n",
"step-3": "<mask token>\n\n\ndef linear_seq_gen(start, stop, step, max_count=None):\n delta = stop - start\n num = floor(abs(delta / step))\n if max_count is not None:\n num = min(num, max_count - 1)\n if num > 0:\n for i in range(0, num + 1):\n yield start + i * delta / num\n else:\n yield start\n if stop != start:\n yield stop\n\n\n<mask token>\n\n\ndef rad_to_deg(rad):\n return rad * 180 / PI\n\n\ndef angle_abs_difference(a1, a2):\n delta = sims4.math.mod_2pi(a1 - a2)\n if delta > sims4.math.PI:\n delta = sims4.math.TWO_PI - delta\n return delta\n\n\n<mask token>\n\n\ndef vector_dot_2d(a, b):\n return a.x * b.x + a.z * b.z\n\n\ndef vector_cross(a, b):\n return Vector3(a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y -\n a.y * b.x)\n\n\n<mask token>\n\n\ndef almost_equal(a, b, epsilon=EPSILON):\n return abs(a - b) < epsilon\n\n\n<mask token>\n\n\ndef transform_almost_equal(t1, t2, epsilon=EPSILON, epsilon_orientation=\n QUATERNION_EPSILON):\n if epsilon_orientation is DEFAULT:\n epsilon_orientation = epsilon\n return vector3_almost_equal(t1.translation, t2.translation, epsilon=epsilon\n ) and quaternion_almost_equal(t1.orientation, t2.orientation,\n epsilon=epsilon_orientation)\n\n\n<mask token>\n\n\ndef vector3_rotate_axis_angle(v, angle, axis):\n q = Quaternion.from_axis_angle(angle, axis)\n return q.transform_vector(v)\n\n\n<mask token>\n\n\ndef invert_quaternion(q):\n d = 1.0 / (q.x * q.x + q.y * q.y + q.z * q.z + q.w * q.w)\n return Quaternion(-d * q.x, -d * q.y, -d * q.z, d * q.w)\n\n\n<mask token>\n\n\nclass Location:\n __qualname__ = 'Location'\n __slots__ = ('transform', 'routing_surface', '_parent_ref',\n 'joint_name_or_hash', 'slot_hash')\n\n def __init__(self, transform, routing_surface, parent=None,\n joint_name_or_hash=None, slot_hash=0):\n self.transform = transform\n self.routing_surface = routing_surface\n self.parent = parent\n self.joint_name_or_hash = joint_name_or_hash\n self.slot_hash = slot_hash\n\n def __repr__(self):\n return standard_repr(self, self.transform, self.routing_surface,\n parent=self.parent, joint_name_or_hash=self.joint_name_or_hash,\n slot_hash=self.slot_hash)\n\n def __eq__(self, other):\n if type(self) is not type(other):\n return False\n if self.transform != other.transform:\n return False\n if self.parent != other.parent:\n return False\n if self.routing_surface != other.routing_surface:\n return False\n slot_hash0 = self.joint_name_or_hash or self.slot_hash\n slot_hash1 = other.joint_name_or_hash or other.slot_hash\n if slot_hash0 != slot_hash1:\n return False\n return True\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n @property\n def parent(self):\n if self._parent_ref is not None:\n return self._parent_ref()\n\n @parent.setter\n def parent(self, value):\n if value is not None:\n self._parent_ref = value.ref()\n self.routing_surface = None\n else:\n self._parent_ref = None\n\n @property\n def joint_name_hash(self):\n if self.joint_name_or_hash is None:\n return 0\n if isinstance(self.joint_name_or_hash, int):\n return self.joint_name_or_hash\n return sims4.hash_util.hash32(self.joint_name_or_hash)\n\n @property\n def world_routing_surface(self):\n if self.parent is not None:\n return self.parent.location.world_routing_surface\n return self.routing_surface\n\n @property\n def zone_id(self):\n if self.world_routing_surface.type == 1:\n return self.world_routing_surface.primary_id\n return sims4.zone_utils.get_zone_id()\n\n @property\n def level(self):\n return self.world_routing_surface.secondary_id\n\n @property\n def world_transform(self):\n if self.parent is None:\n return self.transform\n transform = self.transform\n parent = self.parent\n if parent.is_part:\n parent_transform = parent.part_owner.transform\n else:\n parent_transform = parent.transform\n if self.joint_name_or_hash is None:\n if transform is None:\n return parent_transform\n return sims4.math.Transform.concatenate(transform, parent_transform\n )\n joint_transform = native.animation.get_joint_transform_from_rig(self\n .parent.rig, self.joint_name_or_hash)\n if transform is None:\n return sims4.math.Transform.concatenate(joint_transform,\n parent_transform)\n local_transform = sims4.math.Transform.concatenate(transform,\n joint_transform)\n return sims4.math.Transform.concatenate(local_transform,\n parent_transform)\n\n def duplicate(self):\n return type(self)(self.transform, self.routing_surface, self.parent,\n self.joint_name_or_hash, self.slot_hash)\n\n def clone(self, *, transform=DEFAULT, translation=DEFAULT, orientation=\n DEFAULT, routing_surface=DEFAULT, parent=DEFAULT,\n joint_name_or_hash=DEFAULT, slot_hash=DEFAULT):\n if transform is DEFAULT:\n transform = self.transform\n if transform is not None:\n if translation is DEFAULT:\n translation = transform.translation\n if orientation is DEFAULT:\n orientation = transform.orientation\n transform = Transform(translation, orientation)\n if routing_surface is DEFAULT:\n routing_surface = self.routing_surface\n if parent is DEFAULT:\n parent = self.parent\n if joint_name_or_hash is DEFAULT:\n joint_name_or_hash = self.joint_name_or_hash\n if slot_hash is DEFAULT:\n slot_hash = self.slot_hash\n return type(self)(transform, routing_surface, parent,\n joint_name_or_hash, slot_hash)\n\n\nclass LinearCurve:\n __qualname__ = 'LinearCurve'\n __slots__ = 'points',\n\n def __init__(self, points):\n self.points = points\n self.points.sort(key=lambda i: i[0])\n\n def get(self, val):\n p_max = len(self.points) - 1\n if val <= self.points[0][0]:\n return self.points[0][1]\n if val >= self.points[p_max][0]:\n return self.points[p_max][1]\n i = p_max - 1\n while i > 0:\n while val < self.points[i][0]:\n i -= 1\n p1 = self.points[i]\n p2 = self.points[i + 1]\n percent = (val - p1[0]) / (p2[0] - p1[0])\n return (p2[1] - p1[1]) * percent + p1[1]\n\n\nclass WeightedUtilityCurve(LinearCurve):\n __qualname__ = 'WeightedUtilityCurve'\n\n def __init__(self, points, max_y=0, weight=1):\n if max_y == 0:\n max_y = self._find_largest_y(points)\n transformed_points = [(point[0], point[1] / max_y * weight) for\n point in points]\n super().__init__(transformed_points)\n\n def _find_largest_y(self, points):\n max_y = 0\n for point in points:\n while point[1] > max_y:\n max_y = point[1]\n return max_y\n\n\nclass CircularUtilityCurve(LinearCurve):\n __qualname__ = 'CircularUtilityCurve'\n\n def __init__(self, points, min_x, max_x):\n super().__init__(points)\n self._min_x = min_x\n self._max_x = max_x\n last_point = self.points[-1]\n distance_to_end = max_x - last_point[0]\n total_length = distance_to_end + self.points[0][1]\n distance_to_pivot_point = distance_to_end / total_length\n pivot_y_value = (self.points[0][1] - last_point[1]\n ) * distance_to_pivot_point + self.points[0][1]\n self.points.insert(0, (0, pivot_y_value))\n self.points.insert(len(self.points), (self._max_x, pivot_y_value))\n\n def get(self, val):\n return super().get(val)\n\n\nclass Operator(enum.Int):\n __qualname__ = 'Operator'\n GREATER = 1\n GREATER_OR_EQUAL = 2\n EQUAL = 3\n NOTEQUAL = 4\n LESS_OR_EQUAL = 5\n LESS = 6\n\n @staticmethod\n def from_function(fn):\n if fn == operator.gt:\n return Operator.GREATER\n if fn == operator.ge:\n return Operator.GREATER_OR_EQUAL\n if fn == operator.eq:\n return Operator.EQUAL\n if fn == operator.ne:\n return Operator.NOTEQUAL\n if fn == operator.le:\n return Operator.LESS_OR_EQUAL\n if fn == operator.lt:\n return Operator.LESS\n\n @property\n def function(self):\n if self.value == Operator.GREATER:\n return operator.gt\n if self.value == Operator.GREATER_OR_EQUAL:\n return operator.ge\n if self.value == Operator.EQUAL:\n return operator.eq\n if self.value == Operator.NOTEQUAL:\n return operator.ne\n if self.value == Operator.LESS_OR_EQUAL:\n return operator.le\n if self.value == Operator.LESS:\n return operator.lt\n\n @property\n def inverse(self):\n if self == Operator.GREATER:\n return Operator.LESS_OR_EQUAL\n if self == Operator.GREATER_OR_EQUAL:\n return Operator.LESS\n if self == Operator.EQUAL:\n return Operator.NOTEQUAL\n if self == Operator.NOTEQUAL:\n return Operator.EQUAL\n if self == Operator.LESS_OR_EQUAL:\n return Operator.GREATER\n if self == Operator.LESS:\n return Operator.GREATER_OR_EQUAL\n\n @property\n def symbol(self):\n if self == Operator.GREATER:\n return '>'\n if self == Operator.GREATER_OR_EQUAL:\n return '>='\n if self == Operator.EQUAL:\n return '=='\n if self == Operator.NOTEQUAL:\n return '!='\n if self == Operator.LESS_OR_EQUAL:\n return '<='\n if self == Operator.LESS:\n return '<'\n\n @property\n def category(self):\n if self == Operator.GREATER:\n return Operator.GREATER\n if self == Operator.GREATER_OR_EQUAL:\n return Operator.GREATER\n if self == Operator.EQUAL:\n return Operator.EQUAL\n if self == Operator.NOTEQUAL:\n return Operator.EQUAL\n if self == Operator.LESS_OR_EQUAL:\n return Operator.LESS\n if self == Operator.LESS:\n return Operator.LESS\n\n\nclass InequalityOperator(enum.Int):\n __qualname__ = 'InequalityOperator'\n GREATER = Operator.GREATER\n GREATER_OR_EQUAL = Operator.GREATER_OR_EQUAL\n LESS_OR_EQUAL = Operator.LESS_OR_EQUAL\n LESS = Operator.LESS\n\n\n<mask token>\n\n\nclass Threshold:\n __qualname__ = 'Threshold'\n __slots__ = 'value', 'comparison'\n\n def __init__(self, value=None, comparison=None):\n self.value = value\n self.comparison = comparison\n\n def compare(self, source_value):\n if self.value is not None and self.comparison is not None:\n return self.comparison(source_value, self.value)\n return False\n\n def compare_value(self, source_value):\n if self.value is not None and self.comparison is not None:\n return self.comparison(source_value.value, self.value.value)\n return False\n\n def inverse(self):\n return Threshold(self.value, Operator.from_function(self.comparison\n ).inverse.function)\n\n def __str__(self):\n if self.comparison is None:\n return 'None'\n return '{} {}'.format(Operator.from_function(self.comparison).\n symbol, self.value)\n\n def __repr__(self):\n return '<Threshold {}>'.format(str(self))\n\n def __eq__(self, other):\n if not isinstance(other, Threshold):\n return False\n if not self.value == other.value:\n return False\n if not self.comparison == other.comparison:\n return False\n return True\n\n def __hash__(self):\n return hash((self.value, self.comparison))\n",
"step-4": "<mask token>\n\n\ndef clamp(lower_bound, x, upper_bound):\n if x < lower_bound:\n return lower_bound\n if x > upper_bound:\n return upper_bound\n return x\n\n\ndef interpolate(a, b, fraction):\n return a * fraction + (1 - fraction) * b\n\n\ndef linear_seq_gen(start, stop, step, max_count=None):\n delta = stop - start\n num = floor(abs(delta / step))\n if max_count is not None:\n num = min(num, max_count - 1)\n if num > 0:\n for i in range(0, num + 1):\n yield start + i * delta / num\n else:\n yield start\n if stop != start:\n yield stop\n\n\ndef deg_to_rad(deg):\n return deg * PI / 180\n\n\ndef rad_to_deg(rad):\n return rad * 180 / PI\n\n\ndef angle_abs_difference(a1, a2):\n delta = sims4.math.mod_2pi(a1 - a2)\n if delta > sims4.math.PI:\n delta = sims4.math.TWO_PI - delta\n return delta\n\n\n<mask token>\n\n\ndef vector_dot_2d(a, b):\n return a.x * b.x + a.z * b.z\n\n\ndef vector_cross(a, b):\n return Vector3(a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y -\n a.y * b.x)\n\n\ndef vector_cross_2d(a, b):\n return a.z * b.x - a.x * b.z\n\n\ndef vector_normalize(v):\n return v / v.magnitude()\n\n\ndef vector_flatten(v):\n return Vector3(v.x, 0, v.z)\n\n\ndef almost_equal(a, b, epsilon=EPSILON):\n return abs(a - b) < epsilon\n\n\n<mask token>\n\n\ndef transform_almost_equal(t1, t2, epsilon=EPSILON, epsilon_orientation=\n QUATERNION_EPSILON):\n if epsilon_orientation is DEFAULT:\n epsilon_orientation = epsilon\n return vector3_almost_equal(t1.translation, t2.translation, epsilon=epsilon\n ) and quaternion_almost_equal(t1.orientation, t2.orientation,\n epsilon=epsilon_orientation)\n\n\ndef transform_almost_equal_2d(t1, t2, epsilon=EPSILON, epsilon_orientation=\n QUATERNION_EPSILON):\n if epsilon_orientation is DEFAULT:\n epsilon_orientation = epsilon\n return vector3_almost_equal_2d(t1.translation, t2.translation, epsilon=\n epsilon) and quaternion_almost_equal(t1.orientation, t2.orientation,\n epsilon=epsilon_orientation)\n\n\ndef vector3_rotate_axis_angle(v, angle, axis):\n q = Quaternion.from_axis_angle(angle, axis)\n return q.transform_vector(v)\n\n\n<mask token>\n\n\ndef angle_to_yaw_quaternion(angle):\n return Quaternion.from_axis_angle(angle, UP_AXIS)\n\n\n<mask token>\n\n\ndef invert_quaternion(q):\n d = 1.0 / (q.x * q.x + q.y * q.y + q.z * q.z + q.w * q.w)\n return Quaternion(-d * q.x, -d * q.y, -d * q.z, d * q.w)\n\n\ndef get_difference_transform(transform_a, transform_b):\n v = transform_b.translation - transform_a.translation\n a_q_i = invert_quaternion(transform_a.orientation)\n q = Quaternion.concatenate(transform_b.orientation, a_q_i)\n v_prime = Quaternion.transform_vector(a_q_i, v)\n return Transform(v_prime, q)\n\n\nclass Location:\n __qualname__ = 'Location'\n __slots__ = ('transform', 'routing_surface', '_parent_ref',\n 'joint_name_or_hash', 'slot_hash')\n\n def __init__(self, transform, routing_surface, parent=None,\n joint_name_or_hash=None, slot_hash=0):\n self.transform = transform\n self.routing_surface = routing_surface\n self.parent = parent\n self.joint_name_or_hash = joint_name_or_hash\n self.slot_hash = slot_hash\n\n def __repr__(self):\n return standard_repr(self, self.transform, self.routing_surface,\n parent=self.parent, joint_name_or_hash=self.joint_name_or_hash,\n slot_hash=self.slot_hash)\n\n def __eq__(self, other):\n if type(self) is not type(other):\n return False\n if self.transform != other.transform:\n return False\n if self.parent != other.parent:\n return False\n if self.routing_surface != other.routing_surface:\n return False\n slot_hash0 = self.joint_name_or_hash or self.slot_hash\n slot_hash1 = other.joint_name_or_hash or other.slot_hash\n if slot_hash0 != slot_hash1:\n return False\n return True\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n @property\n def parent(self):\n if self._parent_ref is not None:\n return self._parent_ref()\n\n @parent.setter\n def parent(self, value):\n if value is not None:\n self._parent_ref = value.ref()\n self.routing_surface = None\n else:\n self._parent_ref = None\n\n @property\n def joint_name_hash(self):\n if self.joint_name_or_hash is None:\n return 0\n if isinstance(self.joint_name_or_hash, int):\n return self.joint_name_or_hash\n return sims4.hash_util.hash32(self.joint_name_or_hash)\n\n @property\n def world_routing_surface(self):\n if self.parent is not None:\n return self.parent.location.world_routing_surface\n return self.routing_surface\n\n @property\n def zone_id(self):\n if self.world_routing_surface.type == 1:\n return self.world_routing_surface.primary_id\n return sims4.zone_utils.get_zone_id()\n\n @property\n def level(self):\n return self.world_routing_surface.secondary_id\n\n @property\n def world_transform(self):\n if self.parent is None:\n return self.transform\n transform = self.transform\n parent = self.parent\n if parent.is_part:\n parent_transform = parent.part_owner.transform\n else:\n parent_transform = parent.transform\n if self.joint_name_or_hash is None:\n if transform is None:\n return parent_transform\n return sims4.math.Transform.concatenate(transform, parent_transform\n )\n joint_transform = native.animation.get_joint_transform_from_rig(self\n .parent.rig, self.joint_name_or_hash)\n if transform is None:\n return sims4.math.Transform.concatenate(joint_transform,\n parent_transform)\n local_transform = sims4.math.Transform.concatenate(transform,\n joint_transform)\n return sims4.math.Transform.concatenate(local_transform,\n parent_transform)\n\n def duplicate(self):\n return type(self)(self.transform, self.routing_surface, self.parent,\n self.joint_name_or_hash, self.slot_hash)\n\n def clone(self, *, transform=DEFAULT, translation=DEFAULT, orientation=\n DEFAULT, routing_surface=DEFAULT, parent=DEFAULT,\n joint_name_or_hash=DEFAULT, slot_hash=DEFAULT):\n if transform is DEFAULT:\n transform = self.transform\n if transform is not None:\n if translation is DEFAULT:\n translation = transform.translation\n if orientation is DEFAULT:\n orientation = transform.orientation\n transform = Transform(translation, orientation)\n if routing_surface is DEFAULT:\n routing_surface = self.routing_surface\n if parent is DEFAULT:\n parent = self.parent\n if joint_name_or_hash is DEFAULT:\n joint_name_or_hash = self.joint_name_or_hash\n if slot_hash is DEFAULT:\n slot_hash = self.slot_hash\n return type(self)(transform, routing_surface, parent,\n joint_name_or_hash, slot_hash)\n\n\nclass LinearCurve:\n __qualname__ = 'LinearCurve'\n __slots__ = 'points',\n\n def __init__(self, points):\n self.points = points\n self.points.sort(key=lambda i: i[0])\n\n def get(self, val):\n p_max = len(self.points) - 1\n if val <= self.points[0][0]:\n return self.points[0][1]\n if val >= self.points[p_max][0]:\n return self.points[p_max][1]\n i = p_max - 1\n while i > 0:\n while val < self.points[i][0]:\n i -= 1\n p1 = self.points[i]\n p2 = self.points[i + 1]\n percent = (val - p1[0]) / (p2[0] - p1[0])\n return (p2[1] - p1[1]) * percent + p1[1]\n\n\nclass WeightedUtilityCurve(LinearCurve):\n __qualname__ = 'WeightedUtilityCurve'\n\n def __init__(self, points, max_y=0, weight=1):\n if max_y == 0:\n max_y = self._find_largest_y(points)\n transformed_points = [(point[0], point[1] / max_y * weight) for\n point in points]\n super().__init__(transformed_points)\n\n def _find_largest_y(self, points):\n max_y = 0\n for point in points:\n while point[1] > max_y:\n max_y = point[1]\n return max_y\n\n\nclass CircularUtilityCurve(LinearCurve):\n __qualname__ = 'CircularUtilityCurve'\n\n def __init__(self, points, min_x, max_x):\n super().__init__(points)\n self._min_x = min_x\n self._max_x = max_x\n last_point = self.points[-1]\n distance_to_end = max_x - last_point[0]\n total_length = distance_to_end + self.points[0][1]\n distance_to_pivot_point = distance_to_end / total_length\n pivot_y_value = (self.points[0][1] - last_point[1]\n ) * distance_to_pivot_point + self.points[0][1]\n self.points.insert(0, (0, pivot_y_value))\n self.points.insert(len(self.points), (self._max_x, pivot_y_value))\n\n def get(self, val):\n return super().get(val)\n\n\nclass Operator(enum.Int):\n __qualname__ = 'Operator'\n GREATER = 1\n GREATER_OR_EQUAL = 2\n EQUAL = 3\n NOTEQUAL = 4\n LESS_OR_EQUAL = 5\n LESS = 6\n\n @staticmethod\n def from_function(fn):\n if fn == operator.gt:\n return Operator.GREATER\n if fn == operator.ge:\n return Operator.GREATER_OR_EQUAL\n if fn == operator.eq:\n return Operator.EQUAL\n if fn == operator.ne:\n return Operator.NOTEQUAL\n if fn == operator.le:\n return Operator.LESS_OR_EQUAL\n if fn == operator.lt:\n return Operator.LESS\n\n @property\n def function(self):\n if self.value == Operator.GREATER:\n return operator.gt\n if self.value == Operator.GREATER_OR_EQUAL:\n return operator.ge\n if self.value == Operator.EQUAL:\n return operator.eq\n if self.value == Operator.NOTEQUAL:\n return operator.ne\n if self.value == Operator.LESS_OR_EQUAL:\n return operator.le\n if self.value == Operator.LESS:\n return operator.lt\n\n @property\n def inverse(self):\n if self == Operator.GREATER:\n return Operator.LESS_OR_EQUAL\n if self == Operator.GREATER_OR_EQUAL:\n return Operator.LESS\n if self == Operator.EQUAL:\n return Operator.NOTEQUAL\n if self == Operator.NOTEQUAL:\n return Operator.EQUAL\n if self == Operator.LESS_OR_EQUAL:\n return Operator.GREATER\n if self == Operator.LESS:\n return Operator.GREATER_OR_EQUAL\n\n @property\n def symbol(self):\n if self == Operator.GREATER:\n return '>'\n if self == Operator.GREATER_OR_EQUAL:\n return '>='\n if self == Operator.EQUAL:\n return '=='\n if self == Operator.NOTEQUAL:\n return '!='\n if self == Operator.LESS_OR_EQUAL:\n return '<='\n if self == Operator.LESS:\n return '<'\n\n @property\n def category(self):\n if self == Operator.GREATER:\n return Operator.GREATER\n if self == Operator.GREATER_OR_EQUAL:\n return Operator.GREATER\n if self == Operator.EQUAL:\n return Operator.EQUAL\n if self == Operator.NOTEQUAL:\n return Operator.EQUAL\n if self == Operator.LESS_OR_EQUAL:\n return Operator.LESS\n if self == Operator.LESS:\n return Operator.LESS\n\n\nclass InequalityOperator(enum.Int):\n __qualname__ = 'InequalityOperator'\n GREATER = Operator.GREATER\n GREATER_OR_EQUAL = Operator.GREATER_OR_EQUAL\n LESS_OR_EQUAL = Operator.LESS_OR_EQUAL\n LESS = Operator.LESS\n\n\n<mask token>\n\n\nclass Threshold:\n __qualname__ = 'Threshold'\n __slots__ = 'value', 'comparison'\n\n def __init__(self, value=None, comparison=None):\n self.value = value\n self.comparison = comparison\n\n def compare(self, source_value):\n if self.value is not None and self.comparison is not None:\n return self.comparison(source_value, self.value)\n return False\n\n def compare_value(self, source_value):\n if self.value is not None and self.comparison is not None:\n return self.comparison(source_value.value, self.value.value)\n return False\n\n def inverse(self):\n return Threshold(self.value, Operator.from_function(self.comparison\n ).inverse.function)\n\n def __str__(self):\n if self.comparison is None:\n return 'None'\n return '{} {}'.format(Operator.from_function(self.comparison).\n symbol, self.value)\n\n def __repr__(self):\n return '<Threshold {}>'.format(str(self))\n\n def __eq__(self, other):\n if not isinstance(other, Threshold):\n return False\n if not self.value == other.value:\n return False\n if not self.comparison == other.comparison:\n return False\n return True\n\n def __hash__(self):\n return hash((self.value, self.comparison))\n",
"step-5": "from _math import Vector2, Vector3, Quaternion, Transform, Vector3Immutable, QuaternionImmutable, minimum_distance\nfrom _math import mod_2pi\nfrom math import pi as PI, sqrt, fmod, floor, atan2, acos, asin, ceil, pi, e\nimport operator\nfrom sims4.repr_utils import standard_repr\nimport enum\nimport native.animation\nimport sims4.hash_util\nfrom singletons import DEFAULT\nTWO_PI = PI*2\nEPSILON = 1.192092896e-07\nQUATERNION_EPSILON = 0.001\nMAX_FLOAT = 3.402823466e+38\nMAX_UINT64 = 18446744073709551615\nMAX_INT64 = 922337203685477580\nMAX_UINT32 = 4294967295\nMAX_INT32 = 2147483647\nMAX_UINT16 = 65535\nMAX_INT16 = 32767\nPOS_INFINITY = float('inf')\nNEG_INFINITY = float('-inf')\nFORWARD_AXIS = Vector3.Z_AXIS()\nUP_AXIS = Vector3.Y_AXIS()\n\ndef clamp(lower_bound, x, upper_bound):\n if x < lower_bound:\n return lower_bound\n if x > upper_bound:\n return upper_bound\n return x\n\ndef interpolate(a, b, fraction):\n return a*fraction + (1 - fraction)*b\n\ndef linear_seq_gen(start, stop, step, max_count=None):\n delta = stop - start\n num = floor(abs(delta/step))\n if max_count is not None:\n num = min(num, max_count - 1)\n if num > 0:\n for i in range(0, num + 1):\n yield start + i*delta/num\n else:\n yield start\n if stop != start:\n yield stop\n\ndef deg_to_rad(deg):\n return deg*PI/180\n\ndef rad_to_deg(rad):\n return rad*180/PI\n\ndef angle_abs_difference(a1, a2):\n delta = sims4.math.mod_2pi(a1 - a2)\n if delta > sims4.math.PI:\n delta = sims4.math.TWO_PI - delta\n return delta\n\ndef vector_dot(a, b):\n return a.x*b.x + a.y*b.y + a.z*b.z\n\ndef vector_dot_2d(a, b):\n return a.x*b.x + a.z*b.z\n\ndef vector_cross(a, b):\n return Vector3(a.y*b.z - a.z*b.y, a.z*b.x - a.x*b.z, a.x*b.y - a.y*b.x)\n\ndef vector_cross_2d(a, b):\n return a.z*b.x - a.x*b.z\n\ndef vector_normalize(v):\n return v/v.magnitude()\n\ndef vector_flatten(v):\n return Vector3(v.x, 0, v.z)\n\ndef almost_equal(a, b, epsilon=EPSILON):\n return abs(a - b) < epsilon\n\ndef vector3_almost_equal(v1, v2, epsilon=EPSILON):\n return abs(v1.x - v2.x) < epsilon and (abs(v1.y - v2.y) < epsilon and abs(v1.z - v2.z) < epsilon)\n\ndef vector3_almost_equal_2d(v1, v2, epsilon=EPSILON):\n return abs(v1.x - v2.x) < epsilon and abs(v1.z - v2.z) < epsilon\n\ndef quaternion_almost_equal(q1, q2, epsilon=QUATERNION_EPSILON):\n if abs(q1.x - q2.x) < epsilon and (abs(q1.y - q2.y) < epsilon and abs(q1.z - q2.z) < epsilon) and abs(q1.w - q2.w) < epsilon:\n return True\n if abs(q1.x + q2.x) < epsilon and (abs(q1.y + q2.y) < epsilon and abs(q1.z + q2.z) < epsilon) and abs(q1.w + q2.w) < epsilon:\n return True\n return False\n\ndef transform_almost_equal(t1, t2, epsilon=EPSILON, epsilon_orientation=QUATERNION_EPSILON):\n if epsilon_orientation is DEFAULT:\n epsilon_orientation = epsilon\n return vector3_almost_equal(t1.translation, t2.translation, epsilon=epsilon) and quaternion_almost_equal(t1.orientation, t2.orientation, epsilon=epsilon_orientation)\n\ndef transform_almost_equal_2d(t1, t2, epsilon=EPSILON, epsilon_orientation=QUATERNION_EPSILON):\n if epsilon_orientation is DEFAULT:\n epsilon_orientation = epsilon\n return vector3_almost_equal_2d(t1.translation, t2.translation, epsilon=epsilon) and quaternion_almost_equal(t1.orientation, t2.orientation, epsilon=epsilon_orientation)\n\ndef vector3_rotate_axis_angle(v, angle, axis):\n q = Quaternion.from_axis_angle(angle, axis)\n return q.transform_vector(v)\n\ndef vector3_angle(v):\n return atan2(v.x, v.z)\n\ndef angle_to_yaw_quaternion(angle):\n return Quaternion.from_axis_angle(angle, UP_AXIS)\n\ndef yaw_quaternion_to_angle(q):\n if almost_equal(q.y, 0.0):\n return 0\n angle = acos(q.w)*2.0\n if q.y > 0:\n return angle\n return -angle\n\ndef get_closest_point_2D(segment, p):\n a1 = segment[0]\n a2 = segment[1]\n (x1, x2) = (a1.x, a2.x)\n x3 = p.x\n (z1, z2) = (a1.z, a2.z)\n z3 = p.z\n dx = x2 - x1\n dz = z2 - z1\n t = ((x3 - x1)*dx + (z3 - z1)*dz)/(dx*dx + dz*dz)\n t = sims4.math.clamp(0, t, 1)\n x0 = x1 + t*dx\n z0 = z1 + t*dz\n return Vector3(x0, p.y, z0)\n\ndef invert_quaternion(q):\n d = 1.0/(q.x*q.x + q.y*q.y + q.z*q.z + q.w*q.w)\n return Quaternion(-d*q.x, -d*q.y, -d*q.z, d*q.w)\n\ndef get_difference_transform(transform_a, transform_b):\n v = transform_b.translation - transform_a.translation\n a_q_i = invert_quaternion(transform_a.orientation)\n q = Quaternion.concatenate(transform_b.orientation, a_q_i)\n v_prime = Quaternion.transform_vector(a_q_i, v)\n return Transform(v_prime, q)\n\nclass Location:\n __qualname__ = 'Location'\n __slots__ = ('transform', 'routing_surface', '_parent_ref', 'joint_name_or_hash', 'slot_hash')\n\n def __init__(self, transform, routing_surface, parent=None, joint_name_or_hash=None, slot_hash=0):\n self.transform = transform\n self.routing_surface = routing_surface\n self.parent = parent\n self.joint_name_or_hash = joint_name_or_hash\n self.slot_hash = slot_hash\n\n def __repr__(self):\n return standard_repr(self, self.transform, self.routing_surface, parent=self.parent, joint_name_or_hash=self.joint_name_or_hash, slot_hash=self.slot_hash)\n\n def __eq__(self, other):\n if type(self) is not type(other):\n return False\n if self.transform != other.transform:\n return False\n if self.parent != other.parent:\n return False\n if self.routing_surface != other.routing_surface:\n return False\n slot_hash0 = self.joint_name_or_hash or self.slot_hash\n slot_hash1 = other.joint_name_or_hash or other.slot_hash\n if slot_hash0 != slot_hash1:\n return False\n return True\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n @property\n def parent(self):\n if self._parent_ref is not None:\n return self._parent_ref()\n\n @parent.setter\n def parent(self, value):\n if value is not None:\n self._parent_ref = value.ref()\n self.routing_surface = None\n else:\n self._parent_ref = None\n\n @property\n def joint_name_hash(self):\n if self.joint_name_or_hash is None:\n return 0\n if isinstance(self.joint_name_or_hash, int):\n return self.joint_name_or_hash\n return sims4.hash_util.hash32(self.joint_name_or_hash)\n\n @property\n def world_routing_surface(self):\n if self.parent is not None:\n return self.parent.location.world_routing_surface\n return self.routing_surface\n\n @property\n def zone_id(self):\n if self.world_routing_surface.type == 1:\n return self.world_routing_surface.primary_id\n return sims4.zone_utils.get_zone_id()\n\n @property\n def level(self):\n return self.world_routing_surface.secondary_id\n\n @property\n def world_transform(self):\n if self.parent is None:\n return self.transform\n transform = self.transform\n parent = self.parent\n if parent.is_part:\n parent_transform = parent.part_owner.transform\n else:\n parent_transform = parent.transform\n if self.joint_name_or_hash is None:\n if transform is None:\n return parent_transform\n return sims4.math.Transform.concatenate(transform, parent_transform)\n joint_transform = native.animation.get_joint_transform_from_rig(self.parent.rig, self.joint_name_or_hash)\n if transform is None:\n return sims4.math.Transform.concatenate(joint_transform, parent_transform)\n local_transform = sims4.math.Transform.concatenate(transform, joint_transform)\n return sims4.math.Transform.concatenate(local_transform, parent_transform)\n\n def duplicate(self):\n return type(self)(self.transform, self.routing_surface, self.parent, self.joint_name_or_hash, self.slot_hash)\n\n def clone(self, *, transform=DEFAULT, translation=DEFAULT, orientation=DEFAULT, routing_surface=DEFAULT, parent=DEFAULT, joint_name_or_hash=DEFAULT, slot_hash=DEFAULT):\n if transform is DEFAULT:\n transform = self.transform\n if transform is not None:\n if translation is DEFAULT:\n translation = transform.translation\n if orientation is DEFAULT:\n orientation = transform.orientation\n transform = Transform(translation, orientation)\n if routing_surface is DEFAULT:\n routing_surface = self.routing_surface\n if parent is DEFAULT:\n parent = self.parent\n if joint_name_or_hash is DEFAULT:\n joint_name_or_hash = self.joint_name_or_hash\n if slot_hash is DEFAULT:\n slot_hash = self.slot_hash\n return type(self)(transform, routing_surface, parent, joint_name_or_hash, slot_hash)\n\nclass LinearCurve:\n __qualname__ = 'LinearCurve'\n __slots__ = ('points',)\n\n def __init__(self, points):\n self.points = points\n self.points.sort(key=lambda i: i[0])\n\n def get(self, val):\n p_max = len(self.points) - 1\n if val <= self.points[0][0]:\n return self.points[0][1]\n if val >= self.points[p_max][0]:\n return self.points[p_max][1]\n i = p_max - 1\n while i > 0:\n while val < self.points[i][0]:\n i -= 1\n p1 = self.points[i]\n p2 = self.points[i + 1]\n percent = (val - p1[0])/(p2[0] - p1[0])\n return (p2[1] - p1[1])*percent + p1[1]\n\nclass WeightedUtilityCurve(LinearCurve):\n __qualname__ = 'WeightedUtilityCurve'\n\n def __init__(self, points, max_y=0, weight=1):\n if max_y == 0:\n max_y = self._find_largest_y(points)\n transformed_points = [(point[0], point[1]/max_y*weight) for point in points]\n super().__init__(transformed_points)\n\n def _find_largest_y(self, points):\n max_y = 0\n for point in points:\n while point[1] > max_y:\n max_y = point[1]\n return max_y\n\nclass CircularUtilityCurve(LinearCurve):\n __qualname__ = 'CircularUtilityCurve'\n\n def __init__(self, points, min_x, max_x):\n super().__init__(points)\n self._min_x = min_x\n self._max_x = max_x\n last_point = self.points[-1]\n distance_to_end = max_x - last_point[0]\n total_length = distance_to_end + self.points[0][1]\n distance_to_pivot_point = distance_to_end/total_length\n pivot_y_value = (self.points[0][1] - last_point[1])*distance_to_pivot_point + self.points[0][1]\n self.points.insert(0, (0, pivot_y_value))\n self.points.insert(len(self.points), (self._max_x, pivot_y_value))\n\n def get(self, val):\n return super().get(val)\n\nclass Operator(enum.Int):\n __qualname__ = 'Operator'\n GREATER = 1\n GREATER_OR_EQUAL = 2\n EQUAL = 3\n NOTEQUAL = 4\n LESS_OR_EQUAL = 5\n LESS = 6\n\n @staticmethod\n def from_function(fn):\n if fn == operator.gt:\n return Operator.GREATER\n if fn == operator.ge:\n return Operator.GREATER_OR_EQUAL\n if fn == operator.eq:\n return Operator.EQUAL\n if fn == operator.ne:\n return Operator.NOTEQUAL\n if fn == operator.le:\n return Operator.LESS_OR_EQUAL\n if fn == operator.lt:\n return Operator.LESS\n\n @property\n def function(self):\n if self.value == Operator.GREATER:\n return operator.gt\n if self.value == Operator.GREATER_OR_EQUAL:\n return operator.ge\n if self.value == Operator.EQUAL:\n return operator.eq\n if self.value == Operator.NOTEQUAL:\n return operator.ne\n if self.value == Operator.LESS_OR_EQUAL:\n return operator.le\n if self.value == Operator.LESS:\n return operator.lt\n\n @property\n def inverse(self):\n if self == Operator.GREATER:\n return Operator.LESS_OR_EQUAL\n if self == Operator.GREATER_OR_EQUAL:\n return Operator.LESS\n if self == Operator.EQUAL:\n return Operator.NOTEQUAL\n if self == Operator.NOTEQUAL:\n return Operator.EQUAL\n if self == Operator.LESS_OR_EQUAL:\n return Operator.GREATER\n if self == Operator.LESS:\n return Operator.GREATER_OR_EQUAL\n\n @property\n def symbol(self):\n if self == Operator.GREATER:\n return '>'\n if self == Operator.GREATER_OR_EQUAL:\n return '>='\n if self == Operator.EQUAL:\n return '=='\n if self == Operator.NOTEQUAL:\n return '!='\n if self == Operator.LESS_OR_EQUAL:\n return '<='\n if self == Operator.LESS:\n return '<'\n\n @property\n def category(self):\n if self == Operator.GREATER:\n return Operator.GREATER\n if self == Operator.GREATER_OR_EQUAL:\n return Operator.GREATER\n if self == Operator.EQUAL:\n return Operator.EQUAL\n if self == Operator.NOTEQUAL:\n return Operator.EQUAL\n if self == Operator.LESS_OR_EQUAL:\n return Operator.LESS\n if self == Operator.LESS:\n return Operator.LESS\n\nclass InequalityOperator(enum.Int):\n __qualname__ = 'InequalityOperator'\n GREATER = Operator.GREATER\n GREATER_OR_EQUAL = Operator.GREATER_OR_EQUAL\n LESS_OR_EQUAL = Operator.LESS_OR_EQUAL\n LESS = Operator.LESS\n\nwith InequalityOperator.__reload_context__(InequalityOperator, InequalityOperator):\n InequalityOperator.from_function = Operator.from_function\n InequalityOperator.function = Operator.function\n InequalityOperator.inverse = Operator.inverse\n InequalityOperator.symbol = Operator.symbol\n InequalityOperator.category = Operator.category\n\nclass Threshold:\n __qualname__ = 'Threshold'\n __slots__ = ('value', 'comparison')\n\n def __init__(self, value=None, comparison=None):\n self.value = value\n self.comparison = comparison\n\n def compare(self, source_value):\n if self.value is not None and self.comparison is not None:\n return self.comparison(source_value, self.value)\n return False\n\n def compare_value(self, source_value):\n if self.value is not None and self.comparison is not None:\n return self.comparison(source_value.value, self.value.value)\n return False\n\n def inverse(self):\n return Threshold(self.value, Operator.from_function(self.comparison).inverse.function)\n\n def __str__(self):\n if self.comparison is None:\n return 'None'\n return '{} {}'.format(Operator.from_function(self.comparison).symbol, self.value)\n\n def __repr__(self):\n return '<Threshold {}>'.format(str(self))\n\n def __eq__(self, other):\n if not isinstance(other, Threshold):\n return False\n if not self.value == other.value:\n return False\n if not self.comparison == other.comparison:\n return False\n return True\n\n def __hash__(self):\n return hash((self.value, self.comparison))\n\n",
"step-ids": [
52,
53,
55,
64,
75
]
}
|
[
52,
53,
55,
64,
75
] |
from Eutils.pathmagic import context
with context():
import argparse
import numpy as np
from model.hourglass_yolo_net_multi_gpu import HOURGLASSYOLONet
from evaluator.Eutils.pascal_val import PASCAL_VAL
# from evaluator.Eutils.coco_val import COCO_VAL
from evaluator.Eutils.detector import Detector
import utils.config as cfg
from utils.logger import Logger
from utils.config_utils import get_config,ds_config
from tqdm import tqdm
import tensorflow as tf
import copy
import os
# import cv2
# from evaluator.Eutils.draw_result import draw_result
class EVALUATOR(object):
def __init__(self, detector, data):
self.detector = detector
self.data = data
self.gt = self.data.gt
self.image_ids, self.bboxes, \
self.prob, self.annotations = self.prepare()
self.precision, self.recall = self.pr_curve()
def prepare(self):
image_ids, bboxes, prob = [], [], []
annotations = {}
# while img_batch:
for i in tqdm(range(self.data.num_batch), desc='batch forward'):
# print("{:5}th batch".format(i))
img_batch, bbox_batch = self.data.get_batch()
results = self.detector.detect_batch(img_batch)
for ii in range(len(results)):
boxes_filtered, probs_filtered = results[ii]
# bbox_gt = bbox_batch[ii]['bbox_det']['bboxes']
# filter_mat_probs = np.array(probs_filtered >= cfg.THRESHOLD, dtype='bool')
# filter_mat_probs = np.nonzero(filter_mat_probs)
# boxes_ft_prob = boxes_filtered[filter_mat_probs]
# probs_ft_prob = probs_filtered[filter_mat_probs]
# image = img_batch[ii]
# draw_result(image, bbox_gt, (0, 0, 255))
# draw_result(image, boxes_ft_prob, (255, 0, 0))
# cv2.imshow('Image', image)
# cv2.waitKey(0)
image_ids.extend([bbox_batch[ii]['id']] * len(boxes_filtered))
bboxes.extend(boxes_filtered)
prob.extend(probs_filtered)
if bbox_batch[ii]['id'] not in annotations:
annotations[bbox_batch[ii]['id']] = copy.deepcopy(bbox_batch[ii]['bbox_det'])
sorted_ind = np.argsort(prob)[::-1]
sorted_prob = np.sort(prob)[::-1]
BB = np.array(bboxes)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
return image_ids, BB, sorted_prob, annotations
def pr_curve(self):
nd = len(self.image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in tqdm(range(nd), desc='painting PR curve'):
# for d in range(nd):
R = self.annotations[self.image_ids[d]]
bb = self.bboxes[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bboxes'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0] - BBGT[:, 2] / 2, bb[0] - bb[2] / 2)
iymin = np.maximum(BBGT[:, 1] - BBGT[:, 3] / 2, bb[1] - bb[3] / 2)
ixmax = np.minimum(BBGT[:, 0] + BBGT[:, 2] / 2, bb[0] + bb[2] / 2)
iymax = np.minimum(BBGT[:, 1] + BBGT[:, 3] / 2, bb[1] + bb[3] / 2)
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = bb[2] * bb[3] + BBGT[:, 2] * BBGT[:, 3] - inters
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > cfg.IOU_THRESHOLD_GT:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(self.gt)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
return prec, rec
def eval(self, use_07_metric=False):
""" ap = eval(rec, prec, [use_07_metric])
Compute AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(self.recall >= t) == 0:
p = 0
else:
p = np.max(self.precision[self.recall >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], self.recall, [1.]))
mpre = np.concatenate(([0.], self.precision, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-ims', '--image_size', default=512, type=int)
parser.add_argument('-g','--gpu', type=str)
parser.add_argument('-c', '--cpu', action='store_true', help='use cpu')
parser.add_argument('-ds', '--data_source', default='all', type=str, choices=['coco', 'pascal', 'all'])
parser.add_argument('-ef', '--eval_file', type=str, required=True)
parser.add_argument('-lf', '--log_file', type=str)
parser.add_argument('-al', '--auto_all', action='store_true')
# when calculate single model
parser.add_argument('--weights', default="hg_yolo-240000", type=str)
parser.add_argument('--weight_dir', default='../log_bbox_hm/0.8_0.08_0.03_conv_fc_l2_0.005_bhm5', type=str)
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = ''
if not args.auto_all:
strings = get_config(args.weight_dir)
net = HOURGLASSYOLONet('eval')
detector = Detector(net, os.path.join(args.weight_dir, args.weights))
# data = COCO_VAL()
data = PASCAL_VAL()
evaluator = EVALUATOR(detector, data)
ap = evaluator.eval()
log = Logger(args.eval_file, level='debug')
log.logger.info('\n calculate single ap from {} {}\n'.format(args.weight_dir, args.weights))
log.logger.info('Data sc:{} AP:{} Weights:{} {}'.format(
data.__class__.__name__, ap, args.weights, strings))
else:
data_source = ds_config(args)
log = Logger(args.eval_file, level='debug')
log.logger.info('\n calculate ap from {}\n'.format(args.eval_file))
model_start = 'hg_yolo'
rootdir = '../' + args.log_file
root_list = os.listdir(rootdir) # 列出文件夹下所有的目录与文件
root_list.sort()
for path in root_list:
model_dir = os.path.join(rootdir, path)
models = os.listdir(model_dir)
models = filter(lambda x: x.startswith(model_start), models)
models = list(set(map(lambda x: x.split('.')[0], models)))
models.sort(key=lambda x: int(x[8:]))
for data in data_source:
for model in models:
strings = get_config(model_dir)
tf.reset_default_graph()
net = HOURGLASSYOLONet('eval')
detector = Detector(net, os.path.join(model_dir, model))
evaluator = EVALUATOR(detector, data)
ap = evaluator.eval()
log.logger.info('Data sc:{} AP:{:<5.5f} Weights:{} {}'.format(
data.__class__.__name__, ap, model, strings))
detector.sess.close()
del net
del detector
del evaluator
if __name__ == '__main__':
main()
# print(os.path.realpath('.'))
# print(os.path.dirname(os.path.realpath('.')))
# print(os.sep)
#
# print(os.path.dirname(os.path.realpath('.')).split(os.sep))
|
normal
|
{
"blob_id": "3bb6305ceb1491db57c7f8b03e438398644c8f90",
"index": 8124,
"step-1": "<mask token>\n\n\nclass EVALUATOR(object):\n\n def __init__(self, detector, data):\n self.detector = detector\n self.data = data\n self.gt = self.data.gt\n self.image_ids, self.bboxes, self.prob, self.annotations = (self.\n prepare())\n self.precision, self.recall = self.pr_curve()\n\n def prepare(self):\n image_ids, bboxes, prob = [], [], []\n annotations = {}\n for i in tqdm(range(self.data.num_batch), desc='batch forward'):\n img_batch, bbox_batch = self.data.get_batch()\n results = self.detector.detect_batch(img_batch)\n for ii in range(len(results)):\n boxes_filtered, probs_filtered = results[ii]\n image_ids.extend([bbox_batch[ii]['id']] * len(boxes_filtered))\n bboxes.extend(boxes_filtered)\n prob.extend(probs_filtered)\n if bbox_batch[ii]['id'] not in annotations:\n annotations[bbox_batch[ii]['id']] = copy.deepcopy(\n bbox_batch[ii]['bbox_det'])\n sorted_ind = np.argsort(prob)[::-1]\n sorted_prob = np.sort(prob)[::-1]\n BB = np.array(bboxes)\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n return image_ids, BB, sorted_prob, annotations\n\n def pr_curve(self):\n nd = len(self.image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in tqdm(range(nd), desc='painting PR curve'):\n R = self.annotations[self.image_ids[d]]\n bb = self.bboxes[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R['bboxes'].astype(float)\n if BBGT.size > 0:\n ixmin = np.maximum(BBGT[:, 0] - BBGT[:, 2] / 2, bb[0] - bb[\n 2] / 2)\n iymin = np.maximum(BBGT[:, 1] - BBGT[:, 3] / 2, bb[1] - bb[\n 3] / 2)\n ixmax = np.minimum(BBGT[:, 0] + BBGT[:, 2] / 2, bb[0] + bb[\n 2] / 2)\n iymax = np.minimum(BBGT[:, 1] + BBGT[:, 3] / 2, bb[1] + bb[\n 3] / 2)\n iw = np.maximum(ixmax - ixmin + 1.0, 0.0)\n ih = np.maximum(iymax - iymin + 1.0, 0.0)\n inters = iw * ih\n uni = bb[2] * bb[3] + BBGT[:, 2] * BBGT[:, 3] - inters\n overlaps = inters / uni\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n if ovmax > cfg.IOU_THRESHOLD_GT:\n if not R['det'][jmax]:\n tp[d] = 1.0\n R['det'][jmax] = 1\n else:\n fp[d] = 1.0\n else:\n fp[d] = 1.0\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(self.gt)\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n return prec, rec\n\n def eval(self, use_07_metric=False):\n \"\"\" ap = eval(rec, prec, [use_07_metric])\n Compute AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:False).\n \"\"\"\n if use_07_metric:\n ap = 0.0\n for t in np.arange(0.0, 1.1, 0.1):\n if np.sum(self.recall >= t) == 0:\n p = 0\n else:\n p = np.max(self.precision[self.recall >= t])\n ap = ap + p / 11.0\n else:\n mrec = np.concatenate(([0.0], self.recall, [1.0]))\n mpre = np.concatenate(([0.0], self.precision, [0.0]))\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n i = np.where(mrec[1:] != mrec[:-1])[0]\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass EVALUATOR(object):\n\n def __init__(self, detector, data):\n self.detector = detector\n self.data = data\n self.gt = self.data.gt\n self.image_ids, self.bboxes, self.prob, self.annotations = (self.\n prepare())\n self.precision, self.recall = self.pr_curve()\n\n def prepare(self):\n image_ids, bboxes, prob = [], [], []\n annotations = {}\n for i in tqdm(range(self.data.num_batch), desc='batch forward'):\n img_batch, bbox_batch = self.data.get_batch()\n results = self.detector.detect_batch(img_batch)\n for ii in range(len(results)):\n boxes_filtered, probs_filtered = results[ii]\n image_ids.extend([bbox_batch[ii]['id']] * len(boxes_filtered))\n bboxes.extend(boxes_filtered)\n prob.extend(probs_filtered)\n if bbox_batch[ii]['id'] not in annotations:\n annotations[bbox_batch[ii]['id']] = copy.deepcopy(\n bbox_batch[ii]['bbox_det'])\n sorted_ind = np.argsort(prob)[::-1]\n sorted_prob = np.sort(prob)[::-1]\n BB = np.array(bboxes)\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n return image_ids, BB, sorted_prob, annotations\n\n def pr_curve(self):\n nd = len(self.image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in tqdm(range(nd), desc='painting PR curve'):\n R = self.annotations[self.image_ids[d]]\n bb = self.bboxes[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R['bboxes'].astype(float)\n if BBGT.size > 0:\n ixmin = np.maximum(BBGT[:, 0] - BBGT[:, 2] / 2, bb[0] - bb[\n 2] / 2)\n iymin = np.maximum(BBGT[:, 1] - BBGT[:, 3] / 2, bb[1] - bb[\n 3] / 2)\n ixmax = np.minimum(BBGT[:, 0] + BBGT[:, 2] / 2, bb[0] + bb[\n 2] / 2)\n iymax = np.minimum(BBGT[:, 1] + BBGT[:, 3] / 2, bb[1] + bb[\n 3] / 2)\n iw = np.maximum(ixmax - ixmin + 1.0, 0.0)\n ih = np.maximum(iymax - iymin + 1.0, 0.0)\n inters = iw * ih\n uni = bb[2] * bb[3] + BBGT[:, 2] * BBGT[:, 3] - inters\n overlaps = inters / uni\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n if ovmax > cfg.IOU_THRESHOLD_GT:\n if not R['det'][jmax]:\n tp[d] = 1.0\n R['det'][jmax] = 1\n else:\n fp[d] = 1.0\n else:\n fp[d] = 1.0\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(self.gt)\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n return prec, rec\n\n def eval(self, use_07_metric=False):\n \"\"\" ap = eval(rec, prec, [use_07_metric])\n Compute AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:False).\n \"\"\"\n if use_07_metric:\n ap = 0.0\n for t in np.arange(0.0, 1.1, 0.1):\n if np.sum(self.recall >= t) == 0:\n p = 0\n else:\n p = np.max(self.precision[self.recall >= t])\n ap = ap + p / 11.0\n else:\n mrec = np.concatenate(([0.0], self.recall, [1.0]))\n mpre = np.concatenate(([0.0], self.precision, [0.0]))\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n i = np.where(mrec[1:] != mrec[:-1])[0]\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-ims', '--image_size', default=512, type=int)\n parser.add_argument('-g', '--gpu', type=str)\n parser.add_argument('-c', '--cpu', action='store_true', help='use cpu')\n parser.add_argument('-ds', '--data_source', default='all', type=str,\n choices=['coco', 'pascal', 'all'])\n parser.add_argument('-ef', '--eval_file', type=str, required=True)\n parser.add_argument('-lf', '--log_file', type=str)\n parser.add_argument('-al', '--auto_all', action='store_true')\n parser.add_argument('--weights', default='hg_yolo-240000', type=str)\n parser.add_argument('--weight_dir', default=\n '../log_bbox_hm/0.8_0.08_0.03_conv_fc_l2_0.005_bhm5', type=str)\n args = parser.parse_args()\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n if args.cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = ''\n if not args.auto_all:\n strings = get_config(args.weight_dir)\n net = HOURGLASSYOLONet('eval')\n detector = Detector(net, os.path.join(args.weight_dir, args.weights))\n data = PASCAL_VAL()\n evaluator = EVALUATOR(detector, data)\n ap = evaluator.eval()\n log = Logger(args.eval_file, level='debug')\n log.logger.info('\\n calculate single ap from {} {}\\n'.format(args.\n weight_dir, args.weights))\n log.logger.info('Data sc:{} AP:{} Weights:{} {}'.format(data.\n __class__.__name__, ap, args.weights, strings))\n else:\n data_source = ds_config(args)\n log = Logger(args.eval_file, level='debug')\n log.logger.info('\\n calculate ap from {}\\n'.format(args.eval_file))\n model_start = 'hg_yolo'\n rootdir = '../' + args.log_file\n root_list = os.listdir(rootdir)\n root_list.sort()\n for path in root_list:\n model_dir = os.path.join(rootdir, path)\n models = os.listdir(model_dir)\n models = filter(lambda x: x.startswith(model_start), models)\n models = list(set(map(lambda x: x.split('.')[0], models)))\n models.sort(key=lambda x: int(x[8:]))\n for data in data_source:\n for model in models:\n strings = get_config(model_dir)\n tf.reset_default_graph()\n net = HOURGLASSYOLONet('eval')\n detector = Detector(net, os.path.join(model_dir, model))\n evaluator = EVALUATOR(detector, data)\n ap = evaluator.eval()\n log.logger.info('Data sc:{} AP:{:<5.5f} Weights:{} {}'\n .format(data.__class__.__name__, ap, model, strings))\n detector.sess.close()\n del net\n del detector\n del evaluator\n\n\n<mask token>\n",
"step-3": "<mask token>\nwith context():\n import argparse\n import numpy as np\n from model.hourglass_yolo_net_multi_gpu import HOURGLASSYOLONet\n from evaluator.Eutils.pascal_val import PASCAL_VAL\n from evaluator.Eutils.detector import Detector\n import utils.config as cfg\n from utils.logger import Logger\n from utils.config_utils import get_config, ds_config\n from tqdm import tqdm\n import tensorflow as tf\n import copy\n import os\n\n\nclass EVALUATOR(object):\n\n def __init__(self, detector, data):\n self.detector = detector\n self.data = data\n self.gt = self.data.gt\n self.image_ids, self.bboxes, self.prob, self.annotations = (self.\n prepare())\n self.precision, self.recall = self.pr_curve()\n\n def prepare(self):\n image_ids, bboxes, prob = [], [], []\n annotations = {}\n for i in tqdm(range(self.data.num_batch), desc='batch forward'):\n img_batch, bbox_batch = self.data.get_batch()\n results = self.detector.detect_batch(img_batch)\n for ii in range(len(results)):\n boxes_filtered, probs_filtered = results[ii]\n image_ids.extend([bbox_batch[ii]['id']] * len(boxes_filtered))\n bboxes.extend(boxes_filtered)\n prob.extend(probs_filtered)\n if bbox_batch[ii]['id'] not in annotations:\n annotations[bbox_batch[ii]['id']] = copy.deepcopy(\n bbox_batch[ii]['bbox_det'])\n sorted_ind = np.argsort(prob)[::-1]\n sorted_prob = np.sort(prob)[::-1]\n BB = np.array(bboxes)\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n return image_ids, BB, sorted_prob, annotations\n\n def pr_curve(self):\n nd = len(self.image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in tqdm(range(nd), desc='painting PR curve'):\n R = self.annotations[self.image_ids[d]]\n bb = self.bboxes[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R['bboxes'].astype(float)\n if BBGT.size > 0:\n ixmin = np.maximum(BBGT[:, 0] - BBGT[:, 2] / 2, bb[0] - bb[\n 2] / 2)\n iymin = np.maximum(BBGT[:, 1] - BBGT[:, 3] / 2, bb[1] - bb[\n 3] / 2)\n ixmax = np.minimum(BBGT[:, 0] + BBGT[:, 2] / 2, bb[0] + bb[\n 2] / 2)\n iymax = np.minimum(BBGT[:, 1] + BBGT[:, 3] / 2, bb[1] + bb[\n 3] / 2)\n iw = np.maximum(ixmax - ixmin + 1.0, 0.0)\n ih = np.maximum(iymax - iymin + 1.0, 0.0)\n inters = iw * ih\n uni = bb[2] * bb[3] + BBGT[:, 2] * BBGT[:, 3] - inters\n overlaps = inters / uni\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n if ovmax > cfg.IOU_THRESHOLD_GT:\n if not R['det'][jmax]:\n tp[d] = 1.0\n R['det'][jmax] = 1\n else:\n fp[d] = 1.0\n else:\n fp[d] = 1.0\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(self.gt)\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n return prec, rec\n\n def eval(self, use_07_metric=False):\n \"\"\" ap = eval(rec, prec, [use_07_metric])\n Compute AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:False).\n \"\"\"\n if use_07_metric:\n ap = 0.0\n for t in np.arange(0.0, 1.1, 0.1):\n if np.sum(self.recall >= t) == 0:\n p = 0\n else:\n p = np.max(self.precision[self.recall >= t])\n ap = ap + p / 11.0\n else:\n mrec = np.concatenate(([0.0], self.recall, [1.0]))\n mpre = np.concatenate(([0.0], self.precision, [0.0]))\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n i = np.where(mrec[1:] != mrec[:-1])[0]\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-ims', '--image_size', default=512, type=int)\n parser.add_argument('-g', '--gpu', type=str)\n parser.add_argument('-c', '--cpu', action='store_true', help='use cpu')\n parser.add_argument('-ds', '--data_source', default='all', type=str,\n choices=['coco', 'pascal', 'all'])\n parser.add_argument('-ef', '--eval_file', type=str, required=True)\n parser.add_argument('-lf', '--log_file', type=str)\n parser.add_argument('-al', '--auto_all', action='store_true')\n parser.add_argument('--weights', default='hg_yolo-240000', type=str)\n parser.add_argument('--weight_dir', default=\n '../log_bbox_hm/0.8_0.08_0.03_conv_fc_l2_0.005_bhm5', type=str)\n args = parser.parse_args()\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n if args.cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = ''\n if not args.auto_all:\n strings = get_config(args.weight_dir)\n net = HOURGLASSYOLONet('eval')\n detector = Detector(net, os.path.join(args.weight_dir, args.weights))\n data = PASCAL_VAL()\n evaluator = EVALUATOR(detector, data)\n ap = evaluator.eval()\n log = Logger(args.eval_file, level='debug')\n log.logger.info('\\n calculate single ap from {} {}\\n'.format(args.\n weight_dir, args.weights))\n log.logger.info('Data sc:{} AP:{} Weights:{} {}'.format(data.\n __class__.__name__, ap, args.weights, strings))\n else:\n data_source = ds_config(args)\n log = Logger(args.eval_file, level='debug')\n log.logger.info('\\n calculate ap from {}\\n'.format(args.eval_file))\n model_start = 'hg_yolo'\n rootdir = '../' + args.log_file\n root_list = os.listdir(rootdir)\n root_list.sort()\n for path in root_list:\n model_dir = os.path.join(rootdir, path)\n models = os.listdir(model_dir)\n models = filter(lambda x: x.startswith(model_start), models)\n models = list(set(map(lambda x: x.split('.')[0], models)))\n models.sort(key=lambda x: int(x[8:]))\n for data in data_source:\n for model in models:\n strings = get_config(model_dir)\n tf.reset_default_graph()\n net = HOURGLASSYOLONet('eval')\n detector = Detector(net, os.path.join(model_dir, model))\n evaluator = EVALUATOR(detector, data)\n ap = evaluator.eval()\n log.logger.info('Data sc:{} AP:{:<5.5f} Weights:{} {}'\n .format(data.__class__.__name__, ap, model, strings))\n detector.sess.close()\n del net\n del detector\n del evaluator\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from Eutils.pathmagic import context\nwith context():\n import argparse\n import numpy as np\n from model.hourglass_yolo_net_multi_gpu import HOURGLASSYOLONet\n from evaluator.Eutils.pascal_val import PASCAL_VAL\n from evaluator.Eutils.detector import Detector\n import utils.config as cfg\n from utils.logger import Logger\n from utils.config_utils import get_config, ds_config\n from tqdm import tqdm\n import tensorflow as tf\n import copy\n import os\n\n\nclass EVALUATOR(object):\n\n def __init__(self, detector, data):\n self.detector = detector\n self.data = data\n self.gt = self.data.gt\n self.image_ids, self.bboxes, self.prob, self.annotations = (self.\n prepare())\n self.precision, self.recall = self.pr_curve()\n\n def prepare(self):\n image_ids, bboxes, prob = [], [], []\n annotations = {}\n for i in tqdm(range(self.data.num_batch), desc='batch forward'):\n img_batch, bbox_batch = self.data.get_batch()\n results = self.detector.detect_batch(img_batch)\n for ii in range(len(results)):\n boxes_filtered, probs_filtered = results[ii]\n image_ids.extend([bbox_batch[ii]['id']] * len(boxes_filtered))\n bboxes.extend(boxes_filtered)\n prob.extend(probs_filtered)\n if bbox_batch[ii]['id'] not in annotations:\n annotations[bbox_batch[ii]['id']] = copy.deepcopy(\n bbox_batch[ii]['bbox_det'])\n sorted_ind = np.argsort(prob)[::-1]\n sorted_prob = np.sort(prob)[::-1]\n BB = np.array(bboxes)\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n return image_ids, BB, sorted_prob, annotations\n\n def pr_curve(self):\n nd = len(self.image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in tqdm(range(nd), desc='painting PR curve'):\n R = self.annotations[self.image_ids[d]]\n bb = self.bboxes[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R['bboxes'].astype(float)\n if BBGT.size > 0:\n ixmin = np.maximum(BBGT[:, 0] - BBGT[:, 2] / 2, bb[0] - bb[\n 2] / 2)\n iymin = np.maximum(BBGT[:, 1] - BBGT[:, 3] / 2, bb[1] - bb[\n 3] / 2)\n ixmax = np.minimum(BBGT[:, 0] + BBGT[:, 2] / 2, bb[0] + bb[\n 2] / 2)\n iymax = np.minimum(BBGT[:, 1] + BBGT[:, 3] / 2, bb[1] + bb[\n 3] / 2)\n iw = np.maximum(ixmax - ixmin + 1.0, 0.0)\n ih = np.maximum(iymax - iymin + 1.0, 0.0)\n inters = iw * ih\n uni = bb[2] * bb[3] + BBGT[:, 2] * BBGT[:, 3] - inters\n overlaps = inters / uni\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n if ovmax > cfg.IOU_THRESHOLD_GT:\n if not R['det'][jmax]:\n tp[d] = 1.0\n R['det'][jmax] = 1\n else:\n fp[d] = 1.0\n else:\n fp[d] = 1.0\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(self.gt)\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n return prec, rec\n\n def eval(self, use_07_metric=False):\n \"\"\" ap = eval(rec, prec, [use_07_metric])\n Compute AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:False).\n \"\"\"\n if use_07_metric:\n ap = 0.0\n for t in np.arange(0.0, 1.1, 0.1):\n if np.sum(self.recall >= t) == 0:\n p = 0\n else:\n p = np.max(self.precision[self.recall >= t])\n ap = ap + p / 11.0\n else:\n mrec = np.concatenate(([0.0], self.recall, [1.0]))\n mpre = np.concatenate(([0.0], self.precision, [0.0]))\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n i = np.where(mrec[1:] != mrec[:-1])[0]\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-ims', '--image_size', default=512, type=int)\n parser.add_argument('-g', '--gpu', type=str)\n parser.add_argument('-c', '--cpu', action='store_true', help='use cpu')\n parser.add_argument('-ds', '--data_source', default='all', type=str,\n choices=['coco', 'pascal', 'all'])\n parser.add_argument('-ef', '--eval_file', type=str, required=True)\n parser.add_argument('-lf', '--log_file', type=str)\n parser.add_argument('-al', '--auto_all', action='store_true')\n parser.add_argument('--weights', default='hg_yolo-240000', type=str)\n parser.add_argument('--weight_dir', default=\n '../log_bbox_hm/0.8_0.08_0.03_conv_fc_l2_0.005_bhm5', type=str)\n args = parser.parse_args()\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n if args.cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = ''\n if not args.auto_all:\n strings = get_config(args.weight_dir)\n net = HOURGLASSYOLONet('eval')\n detector = Detector(net, os.path.join(args.weight_dir, args.weights))\n data = PASCAL_VAL()\n evaluator = EVALUATOR(detector, data)\n ap = evaluator.eval()\n log = Logger(args.eval_file, level='debug')\n log.logger.info('\\n calculate single ap from {} {}\\n'.format(args.\n weight_dir, args.weights))\n log.logger.info('Data sc:{} AP:{} Weights:{} {}'.format(data.\n __class__.__name__, ap, args.weights, strings))\n else:\n data_source = ds_config(args)\n log = Logger(args.eval_file, level='debug')\n log.logger.info('\\n calculate ap from {}\\n'.format(args.eval_file))\n model_start = 'hg_yolo'\n rootdir = '../' + args.log_file\n root_list = os.listdir(rootdir)\n root_list.sort()\n for path in root_list:\n model_dir = os.path.join(rootdir, path)\n models = os.listdir(model_dir)\n models = filter(lambda x: x.startswith(model_start), models)\n models = list(set(map(lambda x: x.split('.')[0], models)))\n models.sort(key=lambda x: int(x[8:]))\n for data in data_source:\n for model in models:\n strings = get_config(model_dir)\n tf.reset_default_graph()\n net = HOURGLASSYOLONet('eval')\n detector = Detector(net, os.path.join(model_dir, model))\n evaluator = EVALUATOR(detector, data)\n ap = evaluator.eval()\n log.logger.info('Data sc:{} AP:{:<5.5f} Weights:{} {}'\n .format(data.__class__.__name__, ap, model, strings))\n detector.sess.close()\n del net\n del detector\n del evaluator\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from Eutils.pathmagic import context\nwith context():\n import argparse\n import numpy as np\n from model.hourglass_yolo_net_multi_gpu import HOURGLASSYOLONet\n from evaluator.Eutils.pascal_val import PASCAL_VAL\n # from evaluator.Eutils.coco_val import COCO_VAL\n from evaluator.Eutils.detector import Detector\n import utils.config as cfg\n from utils.logger import Logger\n from utils.config_utils import get_config,ds_config\n from tqdm import tqdm\n import tensorflow as tf\n import copy\n import os\n\n\n# import cv2\n# from evaluator.Eutils.draw_result import draw_result\n\n\nclass EVALUATOR(object):\n\n def __init__(self, detector, data):\n self.detector = detector\n self.data = data\n self.gt = self.data.gt\n self.image_ids, self.bboxes, \\\n self.prob, self.annotations = self.prepare()\n self.precision, self.recall = self.pr_curve()\n\n def prepare(self):\n image_ids, bboxes, prob = [], [], []\n annotations = {}\n # while img_batch:\n for i in tqdm(range(self.data.num_batch), desc='batch forward'):\n # print(\"{:5}th batch\".format(i))\n img_batch, bbox_batch = self.data.get_batch()\n results = self.detector.detect_batch(img_batch)\n for ii in range(len(results)):\n boxes_filtered, probs_filtered = results[ii]\n # bbox_gt = bbox_batch[ii]['bbox_det']['bboxes']\n # filter_mat_probs = np.array(probs_filtered >= cfg.THRESHOLD, dtype='bool')\n # filter_mat_probs = np.nonzero(filter_mat_probs)\n # boxes_ft_prob = boxes_filtered[filter_mat_probs]\n # probs_ft_prob = probs_filtered[filter_mat_probs]\n # image = img_batch[ii]\n # draw_result(image, bbox_gt, (0, 0, 255))\n # draw_result(image, boxes_ft_prob, (255, 0, 0))\n # cv2.imshow('Image', image)\n # cv2.waitKey(0)\n image_ids.extend([bbox_batch[ii]['id']] * len(boxes_filtered))\n bboxes.extend(boxes_filtered)\n prob.extend(probs_filtered)\n if bbox_batch[ii]['id'] not in annotations:\n annotations[bbox_batch[ii]['id']] = copy.deepcopy(bbox_batch[ii]['bbox_det'])\n sorted_ind = np.argsort(prob)[::-1]\n sorted_prob = np.sort(prob)[::-1]\n BB = np.array(bboxes)\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n return image_ids, BB, sorted_prob, annotations\n\n def pr_curve(self):\n nd = len(self.image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in tqdm(range(nd), desc='painting PR curve'):\n # for d in range(nd):\n R = self.annotations[self.image_ids[d]]\n bb = self.bboxes[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R['bboxes'].astype(float)\n\n if BBGT.size > 0:\n # compute overlaps\n # intersection\n ixmin = np.maximum(BBGT[:, 0] - BBGT[:, 2] / 2, bb[0] - bb[2] / 2)\n iymin = np.maximum(BBGT[:, 1] - BBGT[:, 3] / 2, bb[1] - bb[3] / 2)\n ixmax = np.minimum(BBGT[:, 0] + BBGT[:, 2] / 2, bb[0] + bb[2] / 2)\n iymax = np.minimum(BBGT[:, 1] + BBGT[:, 3] / 2, bb[1] + bb[3] / 2)\n iw = np.maximum(ixmax - ixmin + 1., 0.)\n ih = np.maximum(iymax - iymin + 1., 0.)\n inters = iw * ih\n\n # union\n uni = bb[2] * bb[3] + BBGT[:, 2] * BBGT[:, 3] - inters\n\n overlaps = inters / uni\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n\n if ovmax > cfg.IOU_THRESHOLD_GT:\n if not R['det'][jmax]:\n tp[d] = 1.\n R['det'][jmax] = 1\n else:\n fp[d] = 1.\n else:\n fp[d] = 1.\n\n # compute precision recall\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(self.gt)\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n return prec, rec\n\n def eval(self, use_07_metric=False):\n \"\"\" ap = eval(rec, prec, [use_07_metric])\n Compute AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:False).\n \"\"\"\n\n if use_07_metric:\n # 11 point metric\n ap = 0.\n for t in np.arange(0., 1.1, 0.1):\n if np.sum(self.recall >= t) == 0:\n p = 0\n else:\n p = np.max(self.precision[self.recall >= t])\n ap = ap + p / 11.\n else:\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.], self.recall, [1.]))\n mpre = np.concatenate(([0.], self.precision, [0.]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n\n return ap\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-ims', '--image_size', default=512, type=int)\n parser.add_argument('-g','--gpu', type=str)\n parser.add_argument('-c', '--cpu', action='store_true', help='use cpu')\n parser.add_argument('-ds', '--data_source', default='all', type=str, choices=['coco', 'pascal', 'all'])\n parser.add_argument('-ef', '--eval_file', type=str, required=True)\n parser.add_argument('-lf', '--log_file', type=str)\n parser.add_argument('-al', '--auto_all', action='store_true')\n # when calculate single model\n parser.add_argument('--weights', default=\"hg_yolo-240000\", type=str)\n parser.add_argument('--weight_dir', default='../log_bbox_hm/0.8_0.08_0.03_conv_fc_l2_0.005_bhm5', type=str)\n args = parser.parse_args()\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n if args.cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = ''\n if not args.auto_all:\n strings = get_config(args.weight_dir)\n\n net = HOURGLASSYOLONet('eval')\n detector = Detector(net, os.path.join(args.weight_dir, args.weights))\n # data = COCO_VAL()\n data = PASCAL_VAL()\n evaluator = EVALUATOR(detector, data)\n ap = evaluator.eval()\n log = Logger(args.eval_file, level='debug')\n log.logger.info('\\n calculate single ap from {} {}\\n'.format(args.weight_dir, args.weights))\n log.logger.info('Data sc:{} AP:{} Weights:{} {}'.format(\n data.__class__.__name__, ap, args.weights, strings))\n else:\n data_source = ds_config(args)\n log = Logger(args.eval_file, level='debug')\n log.logger.info('\\n calculate ap from {}\\n'.format(args.eval_file))\n model_start = 'hg_yolo'\n rootdir = '../' + args.log_file\n root_list = os.listdir(rootdir) # 列出文件夹下所有的目录与文件\n root_list.sort()\n for path in root_list:\n model_dir = os.path.join(rootdir, path)\n models = os.listdir(model_dir)\n models = filter(lambda x: x.startswith(model_start), models)\n models = list(set(map(lambda x: x.split('.')[0], models)))\n models.sort(key=lambda x: int(x[8:]))\n for data in data_source:\n for model in models:\n strings = get_config(model_dir)\n tf.reset_default_graph()\n net = HOURGLASSYOLONet('eval')\n detector = Detector(net, os.path.join(model_dir, model))\n evaluator = EVALUATOR(detector, data)\n ap = evaluator.eval()\n log.logger.info('Data sc:{} AP:{:<5.5f} Weights:{} {}'.format(\n data.__class__.__name__, ap, model, strings))\n detector.sess.close()\n del net\n del detector\n del evaluator\n\n\nif __name__ == '__main__':\n main()\n # print(os.path.realpath('.'))\n # print(os.path.dirname(os.path.realpath('.')))\n # print(os.sep)\n #\n # print(os.path.dirname(os.path.realpath('.')).split(os.sep))\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
#THIS BUILD WORKS, BUT IS VERY SLOW. CURRENTLY YIELDS A DECENT SCORE, NOT GREAT
alphabet = "abcdefghijklmnopqrstuvwxyz"
def author():
return ""
def student_id():
return ""
def fill_words(pattern,words,scoring_f,minlen,maxlen):
foundWords = find_words(pattern,words,scoring_f,minlen,maxlen)
foundWords = foundWords + pattern[len(foundWords):]
return foundWords
def find_words(pattern,words,scoring_f,minlen,maxlen):
patternCopy = pattern
bestWord=("",0)
bestState=[("",0),[],[]]
toConsider = ""
possibleWords=[]
length = minlen
wordDict = {}
beg_point = 0
states = []
if len(pattern) < minlen:
return pattern
for w in words:
if len(w) in wordDict:
wordDict[len(w)] += [w]
else:
wordDict[len(w)] = [w]
while len(patternCopy) > 1:
if length in wordDict:
for w in wordDict[length]:
snip = patternCopy[:length]
for p in range(len(snip)):
if patternCopy[p] != "-" and patternCopy[p] != w[p]:
toConsider = ""
break
toConsider = w
try:
if patternCopy[len(toConsider)] == "-" and toConsider != "" and toConsider not in possibleWords:
if scoring_f(toConsider) > bestWord[1]:
bestWord = (toConsider, scoring_f(toConsider))
except:
break
if length == maxlen:
patternCopy = patternCopy[1:]
leftHalf = pattern[:beg_point]
rightHalf = pattern[len(leftHalf) + len(bestWord[0]):]
beg_point += 1
if len(leftHalf) > 0 and leftHalf[-1] == "-":
states.append([bestWord,leftHalf,rightHalf])
bestWord = ("",0)
length = minlen
length+=1
for s in states:
if s[0][1] > bestState[0][1]:
bestState = s
leftState = fill_words(bestState[1],words,scoring_f,minlen,maxlen)
rightState = fill_words(bestState[2],words,scoring_f,minlen,maxlen)
if len(leftState) == 0:
leftState = ""
if len(rightState) == 0:
rightState = ""
return leftState + bestState[0][0] + rightState
minlen, maxlen = 4, 30
letter_values = {
'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2,
'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1,
'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1,
'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10
}
letter_counts = {c: 0 for c in letter_values}
with open('words_sorted.txt', 'r', encoding='utf-8') as f:
words = [x.strip() for x in f]
words = [x for x in words if minlen <= len(x) <= maxlen]
wordset = set(words)
for word in words:
for c in word:
letter_counts[c] += 1
def scrabble_value(word):
if minlen <= len(word) <= maxlen:
return sum(letter_values.get(c, 0) for c in word)
else:
return 0
def length_squared(word):
if minlen <= len(word) <= maxlen:
return len(word) ** 2
else:
return 0
def scoring_f(w):
return length_squared(w) + scrabble_value(w)
pattern = "-l-h--i-o--w--s--u--g-d-u-n-c-c--b--c-t-"
# print(pattern)
# print(fill_words(pattern,words,scoring_f,minlen,maxlen))
|
normal
|
{
"blob_id": "9bd659bb3bf812e48710f625bb65a848d3a8d074",
"index": 594,
"step-1": "<mask token>\n\n\ndef author():\n return ''\n\n\ndef student_id():\n return ''\n\n\n<mask token>\n\n\ndef find_words(pattern, words, scoring_f, minlen, maxlen):\n patternCopy = pattern\n bestWord = '', 0\n bestState = [('', 0), [], []]\n toConsider = ''\n possibleWords = []\n length = minlen\n wordDict = {}\n beg_point = 0\n states = []\n if len(pattern) < minlen:\n return pattern\n for w in words:\n if len(w) in wordDict:\n wordDict[len(w)] += [w]\n else:\n wordDict[len(w)] = [w]\n while len(patternCopy) > 1:\n if length in wordDict:\n for w in wordDict[length]:\n snip = patternCopy[:length]\n for p in range(len(snip)):\n if patternCopy[p] != '-' and patternCopy[p] != w[p]:\n toConsider = ''\n break\n toConsider = w\n try:\n if (patternCopy[len(toConsider)] == '-' and toConsider !=\n '' and toConsider not in possibleWords):\n if scoring_f(toConsider) > bestWord[1]:\n bestWord = toConsider, scoring_f(toConsider)\n except:\n break\n if length == maxlen:\n patternCopy = patternCopy[1:]\n leftHalf = pattern[:beg_point]\n rightHalf = pattern[len(leftHalf) + len(bestWord[0]):]\n beg_point += 1\n if len(leftHalf) > 0 and leftHalf[-1] == '-':\n states.append([bestWord, leftHalf, rightHalf])\n bestWord = '', 0\n length = minlen\n length += 1\n for s in states:\n if s[0][1] > bestState[0][1]:\n bestState = s\n leftState = fill_words(bestState[1], words, scoring_f, minlen, maxlen)\n rightState = fill_words(bestState[2], words, scoring_f, minlen, maxlen)\n if len(leftState) == 0:\n leftState = ''\n if len(rightState) == 0:\n rightState = ''\n return leftState + bestState[0][0] + rightState\n\n\n<mask token>\n\n\ndef length_squared(word):\n if minlen <= len(word) <= maxlen:\n return len(word) ** 2\n else:\n return 0\n\n\ndef scoring_f(w):\n return length_squared(w) + scrabble_value(w)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef author():\n return ''\n\n\ndef student_id():\n return ''\n\n\ndef fill_words(pattern, words, scoring_f, minlen, maxlen):\n foundWords = find_words(pattern, words, scoring_f, minlen, maxlen)\n foundWords = foundWords + pattern[len(foundWords):]\n return foundWords\n\n\ndef find_words(pattern, words, scoring_f, minlen, maxlen):\n patternCopy = pattern\n bestWord = '', 0\n bestState = [('', 0), [], []]\n toConsider = ''\n possibleWords = []\n length = minlen\n wordDict = {}\n beg_point = 0\n states = []\n if len(pattern) < minlen:\n return pattern\n for w in words:\n if len(w) in wordDict:\n wordDict[len(w)] += [w]\n else:\n wordDict[len(w)] = [w]\n while len(patternCopy) > 1:\n if length in wordDict:\n for w in wordDict[length]:\n snip = patternCopy[:length]\n for p in range(len(snip)):\n if patternCopy[p] != '-' and patternCopy[p] != w[p]:\n toConsider = ''\n break\n toConsider = w\n try:\n if (patternCopy[len(toConsider)] == '-' and toConsider !=\n '' and toConsider not in possibleWords):\n if scoring_f(toConsider) > bestWord[1]:\n bestWord = toConsider, scoring_f(toConsider)\n except:\n break\n if length == maxlen:\n patternCopy = patternCopy[1:]\n leftHalf = pattern[:beg_point]\n rightHalf = pattern[len(leftHalf) + len(bestWord[0]):]\n beg_point += 1\n if len(leftHalf) > 0 and leftHalf[-1] == '-':\n states.append([bestWord, leftHalf, rightHalf])\n bestWord = '', 0\n length = minlen\n length += 1\n for s in states:\n if s[0][1] > bestState[0][1]:\n bestState = s\n leftState = fill_words(bestState[1], words, scoring_f, minlen, maxlen)\n rightState = fill_words(bestState[2], words, scoring_f, minlen, maxlen)\n if len(leftState) == 0:\n leftState = ''\n if len(rightState) == 0:\n rightState = ''\n return leftState + bestState[0][0] + rightState\n\n\n<mask token>\n\n\ndef scrabble_value(word):\n if minlen <= len(word) <= maxlen:\n return sum(letter_values.get(c, 0) for c in word)\n else:\n return 0\n\n\ndef length_squared(word):\n if minlen <= len(word) <= maxlen:\n return len(word) ** 2\n else:\n return 0\n\n\ndef scoring_f(w):\n return length_squared(w) + scrabble_value(w)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef author():\n return ''\n\n\ndef student_id():\n return ''\n\n\ndef fill_words(pattern, words, scoring_f, minlen, maxlen):\n foundWords = find_words(pattern, words, scoring_f, minlen, maxlen)\n foundWords = foundWords + pattern[len(foundWords):]\n return foundWords\n\n\ndef find_words(pattern, words, scoring_f, minlen, maxlen):\n patternCopy = pattern\n bestWord = '', 0\n bestState = [('', 0), [], []]\n toConsider = ''\n possibleWords = []\n length = minlen\n wordDict = {}\n beg_point = 0\n states = []\n if len(pattern) < minlen:\n return pattern\n for w in words:\n if len(w) in wordDict:\n wordDict[len(w)] += [w]\n else:\n wordDict[len(w)] = [w]\n while len(patternCopy) > 1:\n if length in wordDict:\n for w in wordDict[length]:\n snip = patternCopy[:length]\n for p in range(len(snip)):\n if patternCopy[p] != '-' and patternCopy[p] != w[p]:\n toConsider = ''\n break\n toConsider = w\n try:\n if (patternCopy[len(toConsider)] == '-' and toConsider !=\n '' and toConsider not in possibleWords):\n if scoring_f(toConsider) > bestWord[1]:\n bestWord = toConsider, scoring_f(toConsider)\n except:\n break\n if length == maxlen:\n patternCopy = patternCopy[1:]\n leftHalf = pattern[:beg_point]\n rightHalf = pattern[len(leftHalf) + len(bestWord[0]):]\n beg_point += 1\n if len(leftHalf) > 0 and leftHalf[-1] == '-':\n states.append([bestWord, leftHalf, rightHalf])\n bestWord = '', 0\n length = minlen\n length += 1\n for s in states:\n if s[0][1] > bestState[0][1]:\n bestState = s\n leftState = fill_words(bestState[1], words, scoring_f, minlen, maxlen)\n rightState = fill_words(bestState[2], words, scoring_f, minlen, maxlen)\n if len(leftState) == 0:\n leftState = ''\n if len(rightState) == 0:\n rightState = ''\n return leftState + bestState[0][0] + rightState\n\n\n<mask token>\nwith open('words_sorted.txt', 'r', encoding='utf-8') as f:\n words = [x.strip() for x in f]\n words = [x for x in words if minlen <= len(x) <= maxlen]\n wordset = set(words)\n for word in words:\n for c in word:\n letter_counts[c] += 1\n\n\ndef scrabble_value(word):\n if minlen <= len(word) <= maxlen:\n return sum(letter_values.get(c, 0) for c in word)\n else:\n return 0\n\n\ndef length_squared(word):\n if minlen <= len(word) <= maxlen:\n return len(word) ** 2\n else:\n return 0\n\n\ndef scoring_f(w):\n return length_squared(w) + scrabble_value(w)\n\n\n<mask token>\n",
"step-4": "alphabet = 'abcdefghijklmnopqrstuvwxyz'\n\n\ndef author():\n return ''\n\n\ndef student_id():\n return ''\n\n\ndef fill_words(pattern, words, scoring_f, minlen, maxlen):\n foundWords = find_words(pattern, words, scoring_f, minlen, maxlen)\n foundWords = foundWords + pattern[len(foundWords):]\n return foundWords\n\n\ndef find_words(pattern, words, scoring_f, minlen, maxlen):\n patternCopy = pattern\n bestWord = '', 0\n bestState = [('', 0), [], []]\n toConsider = ''\n possibleWords = []\n length = minlen\n wordDict = {}\n beg_point = 0\n states = []\n if len(pattern) < minlen:\n return pattern\n for w in words:\n if len(w) in wordDict:\n wordDict[len(w)] += [w]\n else:\n wordDict[len(w)] = [w]\n while len(patternCopy) > 1:\n if length in wordDict:\n for w in wordDict[length]:\n snip = patternCopy[:length]\n for p in range(len(snip)):\n if patternCopy[p] != '-' and patternCopy[p] != w[p]:\n toConsider = ''\n break\n toConsider = w\n try:\n if (patternCopy[len(toConsider)] == '-' and toConsider !=\n '' and toConsider not in possibleWords):\n if scoring_f(toConsider) > bestWord[1]:\n bestWord = toConsider, scoring_f(toConsider)\n except:\n break\n if length == maxlen:\n patternCopy = patternCopy[1:]\n leftHalf = pattern[:beg_point]\n rightHalf = pattern[len(leftHalf) + len(bestWord[0]):]\n beg_point += 1\n if len(leftHalf) > 0 and leftHalf[-1] == '-':\n states.append([bestWord, leftHalf, rightHalf])\n bestWord = '', 0\n length = minlen\n length += 1\n for s in states:\n if s[0][1] > bestState[0][1]:\n bestState = s\n leftState = fill_words(bestState[1], words, scoring_f, minlen, maxlen)\n rightState = fill_words(bestState[2], words, scoring_f, minlen, maxlen)\n if len(leftState) == 0:\n leftState = ''\n if len(rightState) == 0:\n rightState = ''\n return leftState + bestState[0][0] + rightState\n\n\nminlen, maxlen = 4, 30\nletter_values = {'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2,\n 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3,\n 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4,\n 'z': 10}\nletter_counts = {c: (0) for c in letter_values}\nwith open('words_sorted.txt', 'r', encoding='utf-8') as f:\n words = [x.strip() for x in f]\n words = [x for x in words if minlen <= len(x) <= maxlen]\n wordset = set(words)\n for word in words:\n for c in word:\n letter_counts[c] += 1\n\n\ndef scrabble_value(word):\n if minlen <= len(word) <= maxlen:\n return sum(letter_values.get(c, 0) for c in word)\n else:\n return 0\n\n\ndef length_squared(word):\n if minlen <= len(word) <= maxlen:\n return len(word) ** 2\n else:\n return 0\n\n\ndef scoring_f(w):\n return length_squared(w) + scrabble_value(w)\n\n\npattern = '-l-h--i-o--w--s--u--g-d-u-n-c-c--b--c-t-'\n",
"step-5": "#THIS BUILD WORKS, BUT IS VERY SLOW. CURRENTLY YIELDS A DECENT SCORE, NOT GREAT \n\nalphabet = \"abcdefghijklmnopqrstuvwxyz\"\n\ndef author():\n return \"\"\ndef student_id():\n return \"\"\ndef fill_words(pattern,words,scoring_f,minlen,maxlen):\n \n foundWords = find_words(pattern,words,scoring_f,minlen,maxlen)\n \n foundWords = foundWords + pattern[len(foundWords):]\n \n return foundWords\n\ndef find_words(pattern,words,scoring_f,minlen,maxlen):\n patternCopy = pattern\n bestWord=(\"\",0)\n bestState=[(\"\",0),[],[]]\n toConsider = \"\"\n possibleWords=[]\n length = minlen\n wordDict = {}\n beg_point = 0\n states = []\n \n if len(pattern) < minlen:\n return pattern\n \n for w in words:\n if len(w) in wordDict:\n wordDict[len(w)] += [w]\n else:\n wordDict[len(w)] = [w]\n while len(patternCopy) > 1:\n if length in wordDict:\n for w in wordDict[length]:\n snip = patternCopy[:length]\n for p in range(len(snip)):\n if patternCopy[p] != \"-\" and patternCopy[p] != w[p]:\n toConsider = \"\"\n break\n toConsider = w\n try:\n if patternCopy[len(toConsider)] == \"-\" and toConsider != \"\" and toConsider not in possibleWords:\n if scoring_f(toConsider) > bestWord[1]:\n bestWord = (toConsider, scoring_f(toConsider))\n except:\n break\n if length == maxlen:\n patternCopy = patternCopy[1:]\n\n leftHalf = pattern[:beg_point]\n rightHalf = pattern[len(leftHalf) + len(bestWord[0]):]\n beg_point += 1\n \n if len(leftHalf) > 0 and leftHalf[-1] == \"-\":\n states.append([bestWord,leftHalf,rightHalf])\n\n bestWord = (\"\",0)\n length = minlen\n length+=1\n\n for s in states:\n if s[0][1] > bestState[0][1]:\n bestState = s\n \n leftState = fill_words(bestState[1],words,scoring_f,minlen,maxlen)\n rightState = fill_words(bestState[2],words,scoring_f,minlen,maxlen)\n if len(leftState) == 0:\n leftState = \"\"\n if len(rightState) == 0:\n rightState = \"\"\n return leftState + bestState[0][0] + rightState\n\n\n\nminlen, maxlen = 4, 30\nletter_values = {\n 'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2,\n 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1,\n 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1,\n 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10\n }\n\nletter_counts = {c: 0 for c in letter_values}\n\nwith open('words_sorted.txt', 'r', encoding='utf-8') as f:\n words = [x.strip() for x in f]\n words = [x for x in words if minlen <= len(x) <= maxlen]\n wordset = set(words)\n for word in words:\n for c in word:\n letter_counts[c] += 1\n \ndef scrabble_value(word):\n if minlen <= len(word) <= maxlen:\n return sum(letter_values.get(c, 0) for c in word)\n else:\n return 0\n\ndef length_squared(word):\n if minlen <= len(word) <= maxlen:\n return len(word) ** 2\n else:\n return 0\n \ndef scoring_f(w):\n return length_squared(w) + scrabble_value(w)\n\npattern = \"-l-h--i-o--w--s--u--g-d-u-n-c-c--b--c-t-\"\n\n# print(pattern)\n# print(fill_words(pattern,words,scoring_f,minlen,maxlen))",
"step-ids": [
5,
7,
8,
9,
10
]
}
|
[
5,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(contador_letras(lista_animais))
<|reserved_special_token_1|>
contador_letras = lambda lista: [len(x) for x in lista]
lista_animais = ['cachorro', 'pato', 'marreco']
print(contador_letras(lista_animais))
<|reserved_special_token_1|>
# As variáveis abaixo estão recebendo uma função anônima
contador_letras = lambda lista: [len(x) for x in lista]
lista_animais = ['cachorro', 'pato', 'marreco']
print(contador_letras(lista_animais))
|
flexible
|
{
"blob_id": "d13957c3d3f4d34279dc660d80ca91ca84ba4a77",
"index": 4504,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(contador_letras(lista_animais))\n",
"step-3": "contador_letras = lambda lista: [len(x) for x in lista]\nlista_animais = ['cachorro', 'pato', 'marreco']\nprint(contador_letras(lista_animais))\n",
"step-4": "# As variáveis abaixo estão recebendo uma função anônima\ncontador_letras = lambda lista: [len(x) for x in lista]\n\nlista_animais = ['cachorro', 'pato', 'marreco']\nprint(contador_letras(lista_animais))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#SEE /etc/rc.local FOR BOOTUP COMMANDS
from Measure_and_File import *
from WebServer import *
from multiprocessing import *
web = WebServer()
board_boy = Measurer_and_Filer()
#try:
proc1 = Process( target=board_boy.measure_and_file, args=() )
proc1.start()
proc2 = Process( target=web.serve, args=() )
proc2.start()
#except:
#print ("Error: unable to start processes")
|
normal
|
{
"blob_id": "26744d51dbce835d31d572a053294c9d280e1a8b",
"index": 3956,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nproc1.start()\n<mask token>\nproc2.start()\n",
"step-3": "<mask token>\nweb = WebServer()\nboard_boy = Measurer_and_Filer()\nproc1 = Process(target=board_boy.measure_and_file, args=())\nproc1.start()\nproc2 = Process(target=web.serve, args=())\nproc2.start()\n",
"step-4": "from Measure_and_File import *\nfrom WebServer import *\nfrom multiprocessing import *\nweb = WebServer()\nboard_boy = Measurer_and_Filer()\nproc1 = Process(target=board_boy.measure_and_file, args=())\nproc1.start()\nproc2 = Process(target=web.serve, args=())\nproc2.start()\n",
"step-5": "#SEE /etc/rc.local FOR BOOTUP COMMANDS\n\nfrom Measure_and_File import *\nfrom WebServer import *\nfrom multiprocessing import *\n\nweb = WebServer()\nboard_boy = Measurer_and_Filer()\n\n#try:\nproc1 = Process( target=board_boy.measure_and_file, args=() )\nproc1.start()\nproc2 = Process( target=web.serve, args=() )\nproc2.start()\n#except:\n #print (\"Error: unable to start processes\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Point:
def __init__(self,x,y):
self.x=x
self.y=y
def __str__(self):
return "({0},{1})".format(self.x,self.y)
def __add__(self, other):
self.x=self.x+other.x
self.y=self.y+other.y
return Point(self.x,self.y)
p1=Point(1,2)
p2=Point(3,4)
print(p1)
print(p2)
p3=p1+p2
print(p3)
|
normal
|
{
"blob_id": "1bebd3c18742f5362d2e5f22c539f6b13ad58d2a",
"index": 2873,
"step-1": "class Point:\n <mask token>\n\n def __str__(self):\n return '({0},{1})'.format(self.x, self.y)\n\n def __add__(self, other):\n self.x = self.x + other.x\n self.y = self.y + other.y\n return Point(self.x, self.y)\n\n\n<mask token>\n",
"step-2": "class Point:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __str__(self):\n return '({0},{1})'.format(self.x, self.y)\n\n def __add__(self, other):\n self.x = self.x + other.x\n self.y = self.y + other.y\n return Point(self.x, self.y)\n\n\n<mask token>\n",
"step-3": "class Point:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __str__(self):\n return '({0},{1})'.format(self.x, self.y)\n\n def __add__(self, other):\n self.x = self.x + other.x\n self.y = self.y + other.y\n return Point(self.x, self.y)\n\n\n<mask token>\nprint(p1)\nprint(p2)\n<mask token>\nprint(p3)\n",
"step-4": "class Point:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __str__(self):\n return '({0},{1})'.format(self.x, self.y)\n\n def __add__(self, other):\n self.x = self.x + other.x\n self.y = self.y + other.y\n return Point(self.x, self.y)\n\n\np1 = Point(1, 2)\np2 = Point(3, 4)\nprint(p1)\nprint(p2)\np3 = p1 + p2\nprint(p3)\n",
"step-5": "class Point:\r\n def __init__(self,x,y):\r\n self.x=x\r\n self.y=y\r\n\r\n def __str__(self):\r\n return \"({0},{1})\".format(self.x,self.y)\r\n\r\n def __add__(self, other):\r\n self.x=self.x+other.x\r\n self.y=self.y+other.y\r\n return Point(self.x,self.y)\r\n\r\np1=Point(1,2)\r\np2=Point(3,4)\r\nprint(p1)\r\nprint(p2)\r\np3=p1+p2\r\nprint(p3)\r\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DrawApp(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class SavedDrawings(models.Model):
username = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models
.CASCADE, null=True)
saveId = models.IntegerField()
saveName = models.CharField(max_length=500)
corners = models.TextField()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DrawApp(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _str_(self):
return self.title
class SavedDrawings(models.Model):
username = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models
.CASCADE, null=True)
saveId = models.IntegerField()
saveName = models.CharField(max_length=500)
corners = models.TextField()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DrawApp(models.Model):
title = models.CharField(max_length=120)
description = models.TextField()
completed = models.BooleanField(default=False)
def _str_(self):
return self.title
class SavedDrawings(models.Model):
username = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models
.CASCADE, null=True)
saveId = models.IntegerField()
saveName = models.CharField(max_length=500)
corners = models.TextField()
<|reserved_special_token_1|>
from django.db import models
# from rest_framework import permissions
from drawAppBackend import settings
# from django.contrib.auth.models import AbstractUser
# Create your models here.
class DrawApp(models.Model):
title = models.CharField(max_length=120)
description = models.TextField()
completed = models.BooleanField(default=False)
def _str_(self):
return self.title
class SavedDrawings(models.Model):
username = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True)
saveId = models.IntegerField()
saveName = models.CharField(max_length=500)
corners = models.TextField()
# class CustomUser(AbstractUser):
# # Any extra fields would go here
# def __str__(self):
# return self.email
|
flexible
|
{
"blob_id": "fa566eb77b17830acad8c7bfc2b958760d982925",
"index": 7623,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass DrawApp(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass SavedDrawings(models.Model):\n username = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models\n .CASCADE, null=True)\n saveId = models.IntegerField()\n saveName = models.CharField(max_length=500)\n corners = models.TextField()\n",
"step-3": "<mask token>\n\n\nclass DrawApp(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n def _str_(self):\n return self.title\n\n\nclass SavedDrawings(models.Model):\n username = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models\n .CASCADE, null=True)\n saveId = models.IntegerField()\n saveName = models.CharField(max_length=500)\n corners = models.TextField()\n",
"step-4": "<mask token>\n\n\nclass DrawApp(models.Model):\n title = models.CharField(max_length=120)\n description = models.TextField()\n completed = models.BooleanField(default=False)\n\n def _str_(self):\n return self.title\n\n\nclass SavedDrawings(models.Model):\n username = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models\n .CASCADE, null=True)\n saveId = models.IntegerField()\n saveName = models.CharField(max_length=500)\n corners = models.TextField()\n",
"step-5": "from django.db import models\n# from rest_framework import permissions\nfrom drawAppBackend import settings\n\n# from django.contrib.auth.models import AbstractUser\n\n# Create your models here.\n\n\nclass DrawApp(models.Model):\n title = models.CharField(max_length=120)\n description = models.TextField()\n completed = models.BooleanField(default=False)\n\n def _str_(self):\n return self.title\n\n\nclass SavedDrawings(models.Model):\n username = models.ForeignKey(\n settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True)\n saveId = models.IntegerField()\n saveName = models.CharField(max_length=500)\n corners = models.TextField()\n\n# class CustomUser(AbstractUser):\n# # Any extra fields would go here\n# def __str__(self):\n# return self.email\n",
"step-ids": [
0,
3,
4,
5,
7
]
}
|
[
0,
3,
4,
5,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(message, type(message))
<|reserved_special_token_0|>
for msg in message:
print(msg, message.count(msg))
msg_dict[msg] = message.count(msg)
print(msg_dict)
<|reserved_special_token_1|>
message = (
'It was a bright cold day in April, and the clocks were striking thirteen.'
)
print(message, type(message))
msg_dict = dict()
for msg in message:
print(msg, message.count(msg))
msg_dict[msg] = message.count(msg)
print(msg_dict)
<|reserved_special_token_1|>
# p.85 (문자 갯수 카운팅)
message = \
'It was a bright cold day in April, and the clocks were striking thirteen.'
print(message, type(message))
msg_dict = dict() #빈 dict() 생성
for msg in message:
print(msg, message.count(msg))
msg_dict[msg] = message.count(msg)
print(msg_dict)
|
flexible
|
{
"blob_id": "20671470c087719fa9ea8ffa25be55e9ade67681",
"index": 5373,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(message, type(message))\n<mask token>\nfor msg in message:\n print(msg, message.count(msg))\n msg_dict[msg] = message.count(msg)\nprint(msg_dict)\n",
"step-3": "message = (\n 'It was a bright cold day in April, and the clocks were striking thirteen.'\n )\nprint(message, type(message))\nmsg_dict = dict()\nfor msg in message:\n print(msg, message.count(msg))\n msg_dict[msg] = message.count(msg)\nprint(msg_dict)\n",
"step-4": "# p.85 (문자 갯수 카운팅)\nmessage = \\\n 'It was a bright cold day in April, and the clocks were striking thirteen.'\nprint(message, type(message))\n\nmsg_dict = dict() #빈 dict() 생성\nfor msg in message:\n print(msg, message.count(msg))\n msg_dict[msg] = message.count(msg)\n\nprint(msg_dict)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from __future__ import division, print_function, unicode_literals
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from pyglet.gl import *
from pyglet.window import key
from cocos.actions import *
from cocos.director import director
from cocos.layer import Layer
from cocos.scene import Scene
from cocos.sprite import Sprite
from haiku import generate_haiku
from time import time
def get_steps(index):
return Scene(FontLayer(title="", subtitle='\n'.join(generate_haiku())), SpriteMoveTo(index))
class SpriteLayer(Layer):
is_event_handler = True #: enable pyglet's events
def __init__(self, index=1):
super(SpriteLayer, self).__init__()
self.index = index
self.image = pyglet.resource.image('flat-black-l.png')
self.image.anchor_x = self.image.width
self.image.anchor_y = self.image.height
def on_key_release(self, keys, mod):
# LEFT: go to previous scene
# RIGTH: go to next scene
# ENTER: restart scene
max_steps = 8
if keys == key.LEFT:
self.index -= 1
if self.index < 0:
self.index = max_steps - 1
elif keys == key.RIGHT:
self.index += 1
if self.index >= 8:
self.index = 0
if keys in (key.LEFT, key.RIGHT, key.ENTER):
director.replace(get_steps(self.index))
return True
# def on_exit( self ):
# for o in self.objects:
# o.stop()
class SpriteMoveTo(SpriteLayer):
def on_enter(self):
super(SpriteMoveTo, self).on_enter()
sprite3 = Sprite(self.image)
self.add(sprite3)
x, y = divmod(self.index, 3)
sprite3.position = x * 100 +100 , y * 100 + 100
# sprite3.do(MoveTo((620, 300), 1))
class FontLayer(Layer):
def __init__(self, title="Sprite Exmaple #", subtitle="Goto()"):
super(FontLayer, self).__init__()
self.title = title
self.subtitle = subtitle
self.batch = pyglet.graphics.Batch()
self.text_title = pyglet.text.Label(self.title,
font_size=32,
x=5,
y=director.get_window_size()[1],
anchor_x='left',
anchor_y='top',
batch=self.batch)
self.text_subtitle = pyglet.text.Label(self.subtitle,
multiline=True,
width=600,
font_size=16,
x=5,
y=director.get_window_size()[1] - 80,
anchor_x='left',
anchor_y='top',
batch=self.batch)
self.text_help = pyglet.text.Label("Press LEFT / RIGHT for prev/next test, "
"ENTER to restart test",
font_size=16,
x=director.get_window_size()[0] // 2,
y=20,
anchor_x='center',
anchor_y='center',
batch=self.batch)
def draw(self):
super(FontLayer, self).draw()
self.batch.draw()
if __name__ == "__main__":
director.init(resizable=True, caption='SuperStepper')
director.run(get_steps(1))
|
normal
|
{
"blob_id": "2678aac08104a580e866984bc4cf4adf8cb8ac5c",
"index": 5930,
"step-1": "<mask token>\n\n\nclass SpriteMoveTo(SpriteLayer):\n <mask token>\n\n\nclass FontLayer(Layer):\n\n def __init__(self, title='Sprite Exmaple #', subtitle='Goto()'):\n super(FontLayer, self).__init__()\n self.title = title\n self.subtitle = subtitle\n self.batch = pyglet.graphics.Batch()\n self.text_title = pyglet.text.Label(self.title, font_size=32, x=5,\n y=director.get_window_size()[1], anchor_x='left', anchor_y=\n 'top', batch=self.batch)\n self.text_subtitle = pyglet.text.Label(self.subtitle, multiline=\n True, width=600, font_size=16, x=5, y=director.get_window_size(\n )[1] - 80, anchor_x='left', anchor_y='top', batch=self.batch)\n self.text_help = pyglet.text.Label(\n 'Press LEFT / RIGHT for prev/next test, ENTER to restart test',\n font_size=16, x=director.get_window_size()[0] // 2, y=20,\n anchor_x='center', anchor_y='center', batch=self.batch)\n\n def draw(self):\n super(FontLayer, self).draw()\n self.batch.draw()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SpriteLayer(Layer):\n is_event_handler = True\n\n def __init__(self, index=1):\n super(SpriteLayer, self).__init__()\n self.index = index\n self.image = pyglet.resource.image('flat-black-l.png')\n self.image.anchor_x = self.image.width\n self.image.anchor_y = self.image.height\n\n def on_key_release(self, keys, mod):\n max_steps = 8\n if keys == key.LEFT:\n self.index -= 1\n if self.index < 0:\n self.index = max_steps - 1\n elif keys == key.RIGHT:\n self.index += 1\n if self.index >= 8:\n self.index = 0\n if keys in (key.LEFT, key.RIGHT, key.ENTER):\n director.replace(get_steps(self.index))\n return True\n\n\nclass SpriteMoveTo(SpriteLayer):\n\n def on_enter(self):\n super(SpriteMoveTo, self).on_enter()\n sprite3 = Sprite(self.image)\n self.add(sprite3)\n x, y = divmod(self.index, 3)\n sprite3.position = x * 100 + 100, y * 100 + 100\n\n\nclass FontLayer(Layer):\n\n def __init__(self, title='Sprite Exmaple #', subtitle='Goto()'):\n super(FontLayer, self).__init__()\n self.title = title\n self.subtitle = subtitle\n self.batch = pyglet.graphics.Batch()\n self.text_title = pyglet.text.Label(self.title, font_size=32, x=5,\n y=director.get_window_size()[1], anchor_x='left', anchor_y=\n 'top', batch=self.batch)\n self.text_subtitle = pyglet.text.Label(self.subtitle, multiline=\n True, width=600, font_size=16, x=5, y=director.get_window_size(\n )[1] - 80, anchor_x='left', anchor_y='top', batch=self.batch)\n self.text_help = pyglet.text.Label(\n 'Press LEFT / RIGHT for prev/next test, ENTER to restart test',\n font_size=16, x=director.get_window_size()[0] // 2, y=20,\n anchor_x='center', anchor_y='center', batch=self.batch)\n\n def draw(self):\n super(FontLayer, self).draw()\n self.batch.draw()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_steps(index):\n return Scene(FontLayer(title='', subtitle='\\n'.join(generate_haiku())),\n SpriteMoveTo(index))\n\n\nclass SpriteLayer(Layer):\n is_event_handler = True\n\n def __init__(self, index=1):\n super(SpriteLayer, self).__init__()\n self.index = index\n self.image = pyglet.resource.image('flat-black-l.png')\n self.image.anchor_x = self.image.width\n self.image.anchor_y = self.image.height\n\n def on_key_release(self, keys, mod):\n max_steps = 8\n if keys == key.LEFT:\n self.index -= 1\n if self.index < 0:\n self.index = max_steps - 1\n elif keys == key.RIGHT:\n self.index += 1\n if self.index >= 8:\n self.index = 0\n if keys in (key.LEFT, key.RIGHT, key.ENTER):\n director.replace(get_steps(self.index))\n return True\n\n\nclass SpriteMoveTo(SpriteLayer):\n\n def on_enter(self):\n super(SpriteMoveTo, self).on_enter()\n sprite3 = Sprite(self.image)\n self.add(sprite3)\n x, y = divmod(self.index, 3)\n sprite3.position = x * 100 + 100, y * 100 + 100\n\n\nclass FontLayer(Layer):\n\n def __init__(self, title='Sprite Exmaple #', subtitle='Goto()'):\n super(FontLayer, self).__init__()\n self.title = title\n self.subtitle = subtitle\n self.batch = pyglet.graphics.Batch()\n self.text_title = pyglet.text.Label(self.title, font_size=32, x=5,\n y=director.get_window_size()[1], anchor_x='left', anchor_y=\n 'top', batch=self.batch)\n self.text_subtitle = pyglet.text.Label(self.subtitle, multiline=\n True, width=600, font_size=16, x=5, y=director.get_window_size(\n )[1] - 80, anchor_x='left', anchor_y='top', batch=self.batch)\n self.text_help = pyglet.text.Label(\n 'Press LEFT / RIGHT for prev/next test, ENTER to restart test',\n font_size=16, x=director.get_window_size()[0] // 2, y=20,\n anchor_x='center', anchor_y='center', batch=self.batch)\n\n def draw(self):\n super(FontLayer, self).draw()\n self.batch.draw()\n\n\n<mask token>\n",
"step-4": "<mask token>\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))\n<mask token>\n\n\ndef get_steps(index):\n return Scene(FontLayer(title='', subtitle='\\n'.join(generate_haiku())),\n SpriteMoveTo(index))\n\n\nclass SpriteLayer(Layer):\n is_event_handler = True\n\n def __init__(self, index=1):\n super(SpriteLayer, self).__init__()\n self.index = index\n self.image = pyglet.resource.image('flat-black-l.png')\n self.image.anchor_x = self.image.width\n self.image.anchor_y = self.image.height\n\n def on_key_release(self, keys, mod):\n max_steps = 8\n if keys == key.LEFT:\n self.index -= 1\n if self.index < 0:\n self.index = max_steps - 1\n elif keys == key.RIGHT:\n self.index += 1\n if self.index >= 8:\n self.index = 0\n if keys in (key.LEFT, key.RIGHT, key.ENTER):\n director.replace(get_steps(self.index))\n return True\n\n\nclass SpriteMoveTo(SpriteLayer):\n\n def on_enter(self):\n super(SpriteMoveTo, self).on_enter()\n sprite3 = Sprite(self.image)\n self.add(sprite3)\n x, y = divmod(self.index, 3)\n sprite3.position = x * 100 + 100, y * 100 + 100\n\n\nclass FontLayer(Layer):\n\n def __init__(self, title='Sprite Exmaple #', subtitle='Goto()'):\n super(FontLayer, self).__init__()\n self.title = title\n self.subtitle = subtitle\n self.batch = pyglet.graphics.Batch()\n self.text_title = pyglet.text.Label(self.title, font_size=32, x=5,\n y=director.get_window_size()[1], anchor_x='left', anchor_y=\n 'top', batch=self.batch)\n self.text_subtitle = pyglet.text.Label(self.subtitle, multiline=\n True, width=600, font_size=16, x=5, y=director.get_window_size(\n )[1] - 80, anchor_x='left', anchor_y='top', batch=self.batch)\n self.text_help = pyglet.text.Label(\n 'Press LEFT / RIGHT for prev/next test, ENTER to restart test',\n font_size=16, x=director.get_window_size()[0] // 2, y=20,\n anchor_x='center', anchor_y='center', batch=self.batch)\n\n def draw(self):\n super(FontLayer, self).draw()\n self.batch.draw()\n\n\nif __name__ == '__main__':\n director.init(resizable=True, caption='SuperStepper')\n director.run(get_steps(1))\n",
"step-5": "from __future__ import division, print_function, unicode_literals\n\nimport sys\nimport os\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))\n\nfrom pyglet.gl import *\nfrom pyglet.window import key\n\nfrom cocos.actions import *\nfrom cocos.director import director\nfrom cocos.layer import Layer\nfrom cocos.scene import Scene\nfrom cocos.sprite import Sprite\nfrom haiku import generate_haiku\n\nfrom time import time\n\ndef get_steps(index):\n \n return Scene(FontLayer(title=\"\", subtitle='\\n'.join(generate_haiku())), SpriteMoveTo(index))\n\nclass SpriteLayer(Layer):\n\n is_event_handler = True #: enable pyglet's events\n\n def __init__(self, index=1):\n super(SpriteLayer, self).__init__()\n self.index = index\n\n self.image = pyglet.resource.image('flat-black-l.png')\n self.image.anchor_x = self.image.width\n self.image.anchor_y = self.image.height\n\n def on_key_release(self, keys, mod):\n # LEFT: go to previous scene\n # RIGTH: go to next scene\n # ENTER: restart scene\n max_steps = 8\n\n if keys == key.LEFT:\n self.index -= 1\n if self.index < 0:\n self.index = max_steps - 1\n elif keys == key.RIGHT:\n self.index += 1\n if self.index >= 8:\n self.index = 0\n\n if keys in (key.LEFT, key.RIGHT, key.ENTER):\n director.replace(get_steps(self.index))\n return True\n\n # def on_exit( self ):\n # for o in self.objects:\n # o.stop()\n\nclass SpriteMoveTo(SpriteLayer):\n\n def on_enter(self):\n super(SpriteMoveTo, self).on_enter()\n\n sprite3 = Sprite(self.image)\n self.add(sprite3)\n x, y = divmod(self.index, 3)\n\n sprite3.position = x * 100 +100 , y * 100 + 100\n # sprite3.do(MoveTo((620, 300), 1))\n\n\nclass FontLayer(Layer):\n\n def __init__(self, title=\"Sprite Exmaple #\", subtitle=\"Goto()\"):\n super(FontLayer, self).__init__()\n\n self.title = title\n self.subtitle = subtitle\n\n self.batch = pyglet.graphics.Batch()\n\n self.text_title = pyglet.text.Label(self.title,\n font_size=32,\n x=5,\n y=director.get_window_size()[1],\n anchor_x='left',\n anchor_y='top',\n batch=self.batch)\n\n self.text_subtitle = pyglet.text.Label(self.subtitle,\n multiline=True,\n width=600,\n font_size=16,\n x=5,\n y=director.get_window_size()[1] - 80,\n anchor_x='left',\n anchor_y='top',\n batch=self.batch)\n\n self.text_help = pyglet.text.Label(\"Press LEFT / RIGHT for prev/next test, \"\n \"ENTER to restart test\",\n font_size=16,\n x=director.get_window_size()[0] // 2,\n y=20,\n anchor_x='center',\n anchor_y='center',\n batch=self.batch)\n\n def draw(self):\n super(FontLayer, self).draw()\n self.batch.draw()\n\n\n\nif __name__ == \"__main__\":\n director.init(resizable=True, caption='SuperStepper')\n director.run(get_steps(1))",
"step-ids": [
4,
9,
10,
11,
13
]
}
|
[
4,
9,
10,
11,
13
] |
<|reserved_special_token_0|>
def get_content(t):
content = t['full_text']
if 'entities' in t:
raw_urls = re.findall(
'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
, content)
for m in t['entities']['user_mentions']:
screen_name = m['screen_name']
mdlink = '[@%s](https://twitter.com/%s/)' % (screen_name,
screen_name)
content = content.replace('@' + screen_name, mdlink)
processed_urls = []
for u in t['entities']['urls']:
url = u['url']
processed_urls.append(url)
expanded_url = u['expanded_url']
processed_urls.append(expanded_url)
expanded_url, no_errors = resolver.get_final_url(expanded_url)
processed_urls.append(expanded_url)
content = content.replace(url, expanded_url)
for raw_url in raw_urls:
if raw_url not in processed_urls:
expanded_url, no_errors = resolver.get_final_url(raw_url)
content = content.replace(raw_url, expanded_url)
return content
<|reserved_special_token_0|>
def process_tweet(d1):
orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME,
d1['id_str'])
if orig_tweet_url in urlmap:
og = urlmap.get(orig_tweet_url)
if og['source_path'].startswith('post\\') or og['source_path'
].startswith('photos\\'):
return True
tweet_source = d1['source']
for s in syndicated_sources:
if tweet_source.find(s) >= 0:
for u in d1.get('entities', {}).get('urls', []):
raw_url = u['url']
url = u['expanded_url']
if process_syn_url(d1, raw_url, url):
return True
raw_urls = re.findall(
'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
, d1['full_text'])
for raw_url in raw_urls:
if process_syn_url(d1, raw_url, raw_url):
return True
break
return create_post(d1)
def import_all():
countbysource = {}
replies = 0
retweets = 0
withmedia = 0
raw = 0
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
for d1 in d:
if debug_id is not None and d1['id_str'] != debug_id:
continue
if process_tweet(d1):
continue
tweet_source = d1['source']
if tweet_source not in countbysource:
countbysource[tweet_source] = 1
else:
countbysource[tweet_source] = countbysource[tweet_source] + 1
is_reply = False
if ('in_reply_to_status_id_str' in d1 and
'in_reply_to_screen_name' in d1):
replies = replies + 1
is_reply = True
is_retweet = False
content = d1['full_text']
if content.startswith('RT @'):
retweets = retweets + 1
is_retweet = True
media = []
if 'extended_entities' in d1:
for m in d1['extended_entities']['media']:
media.append(m['media_url_https'])
if len(media) > 0:
withmedia = withmedia + 1
if not is_reply and not is_retweet and len(media) == 0:
raw = raw + 1
idx = idx + 1
resolver.save_cache()
for source in countbysource:
print('countbysource: %s = %s' % (source, countbysource[source]))
print('replies: %s' % replies)
print('retweets: %s' % retweets)
print('withmedia: %s' % withmedia)
print('raw: %s' % raw)
print('total: %s' % idx)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def loadurlmap(cleanupdupes=False):
blogdir = Path(os.environ['HUGO_BLOG_OUTDIR'])
urlmapfile = blogdir / 'urlmap.json'
urlmap = {}
urlmapdupes = {}
with urlmapfile.open(encoding='UTF-8') as f:
tempurlmap = json.loads(f.read())
for u in tempurlmap:
u1 = tempurlmap[u]
if 'syndicated' in u1:
for s in u1['syndicated']:
if 'url' in s:
su = s['url']
if su in urlmap:
if su not in urlmapdupes:
urlmapdupes[su] = [u1, urlmap[su]]
else:
urlmapdupes[su].append(u1)
else:
urlmap[su] = u1
urlmap[u] = u1
title = u1.get('title', '').strip()
if len(title) > 0:
urlmap[title] = u1
if cleanupdupes:
for su in urlmapdupes:
dupes = urlmapdupes[su]
canonical = None
for_deletion = []
for d in dupes:
if d['source_path'].startswith('post') or d['source_path'
].startswith('links') or len(d['syndicated']) > 2:
if canonical is not None:
print(
'\n\r##### WTH. More than one canonical urls were detected for %s'
% su)
print(json.dumps(dupes, indent=4))
canonical = d
else:
for_deletion.append(d)
if canonical is None:
print(
'##### Dupes were detected for %s but no canonical url found!'
% su)
print(dupes)
else:
urlmap[su] = canonical
for d in for_deletion:
source_path = Path(d['source_path'])
full_path = contentdir / source_path
if full_path.exists():
os.remove(str(full_path))
return urlmap
<|reserved_special_token_0|>
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def add_syndication(mdfile, url, stype):
with mdfile.open(encoding='UTF-8') as f:
try:
post = frontmatter.load(f)
except:
print('Error parsing file')
return
if post.get('syndicated') == None:
post['syndicated'] = []
else:
for s in post['syndicated']:
if s['type'] == stype and s['url'] == url:
return
post['syndicated'].append({'type': stype, 'url': url})
newfile = frontmatter.dumps(post)
with mdfile.open('w', encoding='UTF-8') as w:
w.write(newfile)
def get_content(t):
content = t['full_text']
if 'entities' in t:
raw_urls = re.findall(
'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
, content)
for m in t['entities']['user_mentions']:
screen_name = m['screen_name']
mdlink = '[@%s](https://twitter.com/%s/)' % (screen_name,
screen_name)
content = content.replace('@' + screen_name, mdlink)
processed_urls = []
for u in t['entities']['urls']:
url = u['url']
processed_urls.append(url)
expanded_url = u['expanded_url']
processed_urls.append(expanded_url)
expanded_url, no_errors = resolver.get_final_url(expanded_url)
processed_urls.append(expanded_url)
content = content.replace(url, expanded_url)
for raw_url in raw_urls:
if raw_url not in processed_urls:
expanded_url, no_errors = resolver.get_final_url(raw_url)
content = content.replace(raw_url, expanded_url)
return content
def create_post(t):
id = t['id_str']
d = datetime.strptime(t['created_at'], '%a %b %d %H:%M:%S %z %Y')
content = get_content(t)
post = frontmatter.Post(content)
post['date'] = d
post['syndicated'] = [{'type': 'twitter', 'url':
'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME, t['id'])}]
kind = 'notes'
if 'in_reply_to_status_id_str' in t and 'in_reply_to_screen_name' in t:
kind = 'replies'
post['reply_to'] = {'type': 'twitter', 'url':
'https://twitter.com/%s/statuses/%s/' % (t[
'in_reply_to_screen_name'], t['in_reply_to_status_id_str']),
'name': t['in_reply_to_screen_name'], 'label': "%s's tweet" % t
['in_reply_to_screen_name']}
elif t['full_text'].startswith('RT @'):
rc = retweetscache.get(id)
if rc is None:
pass
elif 'retweeted_user' in rc:
kind = 'reposts'
post['repost_source'] = {'type': 'twitter', 'name': rc[
'retweeted_user'], 'url':
'https://twitter.com/%s/statuses/%s/' % (rc[
'retweeted_user'], rc['retweeted_id'])}
else:
pass
media = []
for m in t.get('extended_entities', {}).get('media', []):
media.append(m['media_url_https'])
if len(media) > 0:
if kind != 'reposts' and kind != 'replies':
kind = 'photos'
tags = []
for tag in t.get('entites', {}).get('hashtags', []):
tags.append(tag['text'].lower())
parsed_tags = re.findall('\\s#(\\w+)', ' ' + content)
for tag in parsed_tags:
if tag not in tags:
tags.append(tag.lower())
for tag in auto_tags:
if tag not in tags:
tags.append(tag)
if len(tags) > 0:
post['tags'] = tags
post['source'] = 'twitter'
outdir = contentdir / kind / d.strftime('%Y') / d.strftime('%m')
if len(media) > 0:
outdir = outdir / id
if not outdir.exists():
outdir.mkdir(parents=True)
if len(media) > 0:
outfile = outdir / 'index.md'
for imgfile in mediadir.glob(id + '*.*'):
to_file = outdir / imgfile.name
shutil.copy(str(imgfile), str(to_file))
else:
outfile = outdir / (id + '.md')
newfile = frontmatter.dumps(post)
with outfile.open('w', encoding='UTF-8') as w:
w.write(newfile)
return True
<|reserved_special_token_0|>
def process_tweet(d1):
orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME,
d1['id_str'])
if orig_tweet_url in urlmap:
og = urlmap.get(orig_tweet_url)
if og['source_path'].startswith('post\\') or og['source_path'
].startswith('photos\\'):
return True
tweet_source = d1['source']
for s in syndicated_sources:
if tweet_source.find(s) >= 0:
for u in d1.get('entities', {}).get('urls', []):
raw_url = u['url']
url = u['expanded_url']
if process_syn_url(d1, raw_url, url):
return True
raw_urls = re.findall(
'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
, d1['full_text'])
for raw_url in raw_urls:
if process_syn_url(d1, raw_url, raw_url):
return True
break
return create_post(d1)
def import_all():
countbysource = {}
replies = 0
retweets = 0
withmedia = 0
raw = 0
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
for d1 in d:
if debug_id is not None and d1['id_str'] != debug_id:
continue
if process_tweet(d1):
continue
tweet_source = d1['source']
if tweet_source not in countbysource:
countbysource[tweet_source] = 1
else:
countbysource[tweet_source] = countbysource[tweet_source] + 1
is_reply = False
if ('in_reply_to_status_id_str' in d1 and
'in_reply_to_screen_name' in d1):
replies = replies + 1
is_reply = True
is_retweet = False
content = d1['full_text']
if content.startswith('RT @'):
retweets = retweets + 1
is_retweet = True
media = []
if 'extended_entities' in d1:
for m in d1['extended_entities']['media']:
media.append(m['media_url_https'])
if len(media) > 0:
withmedia = withmedia + 1
if not is_reply and not is_retweet and len(media) == 0:
raw = raw + 1
idx = idx + 1
resolver.save_cache()
for source in countbysource:
print('countbysource: %s = %s' % (source, countbysource[source]))
print('replies: %s' % replies)
print('retweets: %s' % retweets)
print('withmedia: %s' % withmedia)
print('raw: %s' % raw)
print('total: %s' % idx)
<|reserved_special_token_0|>
def stats():
countbysource = {}
replies = 0
retweets = 0
withmedia = 0
raw = 0
count_by_year = {}
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
for t in d:
dt = datetime.strptime(t['created_at'], '%a %b %d %H:%M:%S %z %Y')
count_by_year[dt.year] = count_by_year.get(dt.year, 0) + 1
print(json.dumps(count_by_year, indent=2))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def loadurlmap(cleanupdupes=False):
blogdir = Path(os.environ['HUGO_BLOG_OUTDIR'])
urlmapfile = blogdir / 'urlmap.json'
urlmap = {}
urlmapdupes = {}
with urlmapfile.open(encoding='UTF-8') as f:
tempurlmap = json.loads(f.read())
for u in tempurlmap:
u1 = tempurlmap[u]
if 'syndicated' in u1:
for s in u1['syndicated']:
if 'url' in s:
su = s['url']
if su in urlmap:
if su not in urlmapdupes:
urlmapdupes[su] = [u1, urlmap[su]]
else:
urlmapdupes[su].append(u1)
else:
urlmap[su] = u1
urlmap[u] = u1
title = u1.get('title', '').strip()
if len(title) > 0:
urlmap[title] = u1
if cleanupdupes:
for su in urlmapdupes:
dupes = urlmapdupes[su]
canonical = None
for_deletion = []
for d in dupes:
if d['source_path'].startswith('post') or d['source_path'
].startswith('links') or len(d['syndicated']) > 2:
if canonical is not None:
print(
'\n\r##### WTH. More than one canonical urls were detected for %s'
% su)
print(json.dumps(dupes, indent=4))
canonical = d
else:
for_deletion.append(d)
if canonical is None:
print(
'##### Dupes were detected for %s but no canonical url found!'
% su)
print(dupes)
else:
urlmap[su] = canonical
for d in for_deletion:
source_path = Path(d['source_path'])
full_path = contentdir / source_path
if full_path.exists():
os.remove(str(full_path))
return urlmap
<|reserved_special_token_0|>
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def add_syndication(mdfile, url, stype):
with mdfile.open(encoding='UTF-8') as f:
try:
post = frontmatter.load(f)
except:
print('Error parsing file')
return
if post.get('syndicated') == None:
post['syndicated'] = []
else:
for s in post['syndicated']:
if s['type'] == stype and s['url'] == url:
return
post['syndicated'].append({'type': stype, 'url': url})
newfile = frontmatter.dumps(post)
with mdfile.open('w', encoding='UTF-8') as w:
w.write(newfile)
def get_content(t):
content = t['full_text']
if 'entities' in t:
raw_urls = re.findall(
'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
, content)
for m in t['entities']['user_mentions']:
screen_name = m['screen_name']
mdlink = '[@%s](https://twitter.com/%s/)' % (screen_name,
screen_name)
content = content.replace('@' + screen_name, mdlink)
processed_urls = []
for u in t['entities']['urls']:
url = u['url']
processed_urls.append(url)
expanded_url = u['expanded_url']
processed_urls.append(expanded_url)
expanded_url, no_errors = resolver.get_final_url(expanded_url)
processed_urls.append(expanded_url)
content = content.replace(url, expanded_url)
for raw_url in raw_urls:
if raw_url not in processed_urls:
expanded_url, no_errors = resolver.get_final_url(raw_url)
content = content.replace(raw_url, expanded_url)
return content
def create_post(t):
id = t['id_str']
d = datetime.strptime(t['created_at'], '%a %b %d %H:%M:%S %z %Y')
content = get_content(t)
post = frontmatter.Post(content)
post['date'] = d
post['syndicated'] = [{'type': 'twitter', 'url':
'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME, t['id'])}]
kind = 'notes'
if 'in_reply_to_status_id_str' in t and 'in_reply_to_screen_name' in t:
kind = 'replies'
post['reply_to'] = {'type': 'twitter', 'url':
'https://twitter.com/%s/statuses/%s/' % (t[
'in_reply_to_screen_name'], t['in_reply_to_status_id_str']),
'name': t['in_reply_to_screen_name'], 'label': "%s's tweet" % t
['in_reply_to_screen_name']}
elif t['full_text'].startswith('RT @'):
rc = retweetscache.get(id)
if rc is None:
pass
elif 'retweeted_user' in rc:
kind = 'reposts'
post['repost_source'] = {'type': 'twitter', 'name': rc[
'retweeted_user'], 'url':
'https://twitter.com/%s/statuses/%s/' % (rc[
'retweeted_user'], rc['retweeted_id'])}
else:
pass
media = []
for m in t.get('extended_entities', {}).get('media', []):
media.append(m['media_url_https'])
if len(media) > 0:
if kind != 'reposts' and kind != 'replies':
kind = 'photos'
tags = []
for tag in t.get('entites', {}).get('hashtags', []):
tags.append(tag['text'].lower())
parsed_tags = re.findall('\\s#(\\w+)', ' ' + content)
for tag in parsed_tags:
if tag not in tags:
tags.append(tag.lower())
for tag in auto_tags:
if tag not in tags:
tags.append(tag)
if len(tags) > 0:
post['tags'] = tags
post['source'] = 'twitter'
outdir = contentdir / kind / d.strftime('%Y') / d.strftime('%m')
if len(media) > 0:
outdir = outdir / id
if not outdir.exists():
outdir.mkdir(parents=True)
if len(media) > 0:
outfile = outdir / 'index.md'
for imgfile in mediadir.glob(id + '*.*'):
to_file = outdir / imgfile.name
shutil.copy(str(imgfile), str(to_file))
else:
outfile = outdir / (id + '.md')
newfile = frontmatter.dumps(post)
with outfile.open('w', encoding='UTF-8') as w:
w.write(newfile)
return True
def process_syn_url(d1, raw_url, url):
orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME,
d1['id_str'])
url, no_errors = resolver.get_final_url(url)
if not no_errors:
print(d1['full_text'])
url = url.replace('www.instagram.com', 'instagram.com')
url = url.replace('/roytang0400', '')
url = urldefrag(url)[0]
if url.find('instagram.com') >= 0 and url.find('?') >= 0:
url = url.split('?')[0]
if url in urlmap:
u = urlmap[url]
source_path = Path(u['source_path'])
full_path = contentdir / source_path
add_syndication(full_path, orig_tweet_url, 'twitter')
return True
if url.find('://roytang.net') >= 0 or url.find('://mtgstorm.com') >= 0:
link_url = urlparse(url)
u = urlmap.get(link_url.path, None)
if u is None:
title_search_term = d1['full_text']
title_search_term = title_search_term.replace('New blog post: ', ''
)
title_search_term = title_search_term.replace('New post: ', '')
title_search_term = title_search_term.replace(raw_url, '')
title_search_term = title_search_term.strip()
u = urlmap.get(title_search_term, None)
if u is not None:
source_path = Path(u['source_path'])
full_path = contentdir / source_path
add_syndication(full_path, orig_tweet_url, 'twitter')
return True
else:
print('######## Unmatched roytang url: %s' % url)
print(d1['full_text'])
return True
return False
def process_tweet(d1):
orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME,
d1['id_str'])
if orig_tweet_url in urlmap:
og = urlmap.get(orig_tweet_url)
if og['source_path'].startswith('post\\') or og['source_path'
].startswith('photos\\'):
return True
tweet_source = d1['source']
for s in syndicated_sources:
if tweet_source.find(s) >= 0:
for u in d1.get('entities', {}).get('urls', []):
raw_url = u['url']
url = u['expanded_url']
if process_syn_url(d1, raw_url, url):
return True
raw_urls = re.findall(
'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
, d1['full_text'])
for raw_url in raw_urls:
if process_syn_url(d1, raw_url, raw_url):
return True
break
return create_post(d1)
def import_all():
countbysource = {}
replies = 0
retweets = 0
withmedia = 0
raw = 0
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
for d1 in d:
if debug_id is not None and d1['id_str'] != debug_id:
continue
if process_tweet(d1):
continue
tweet_source = d1['source']
if tweet_source not in countbysource:
countbysource[tweet_source] = 1
else:
countbysource[tweet_source] = countbysource[tweet_source] + 1
is_reply = False
if ('in_reply_to_status_id_str' in d1 and
'in_reply_to_screen_name' in d1):
replies = replies + 1
is_reply = True
is_retweet = False
content = d1['full_text']
if content.startswith('RT @'):
retweets = retweets + 1
is_retweet = True
media = []
if 'extended_entities' in d1:
for m in d1['extended_entities']['media']:
media.append(m['media_url_https'])
if len(media) > 0:
withmedia = withmedia + 1
if not is_reply and not is_retweet and len(media) == 0:
raw = raw + 1
idx = idx + 1
resolver.save_cache()
for source in countbysource:
print('countbysource: %s = %s' % (source, countbysource[source]))
print('replies: %s' % replies)
print('retweets: %s' % retweets)
print('withmedia: %s' % withmedia)
print('raw: %s' % raw)
print('total: %s' % idx)
def thread_replies():
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
d = reversed(d)
for d1 in d:
is_reply = False
if ('in_reply_to_status_id_str' in d1 and
'in_reply_to_screen_name' in d1):
is_reply = True
if not is_reply:
continue
id_str = d1['id_str']
orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (
TWITTER_USERNAME, id_str)
if orig_tweet_url in urlmap:
continue
date = datetime.strptime(d1['created_at'],
'%a %b %d %H:%M:%S %z %Y')
if d1['in_reply_to_screen_name'] == TWITTER_USERNAME:
replied_to_url = 'https://twitter.com/%s/statuses/%s/' % (d1
['in_reply_to_screen_name'], d1[
'in_reply_to_status_id_str'])
info = urlmap[replied_to_url]
source_path = Path(info['source_path'])
full_path = contentdir / source_path
if full_path.name == 'index.md':
parentdir = full_path.parent
else:
parentdir = full_path.parent / full_path.stem
if not parentdir.exists():
parentdir.mkdir(parents=True)
oldfile = full_path
full_path = parentdir / 'index.md'
shutil.move(str(oldfile), str(full_path))
urlmap[replied_to_url]['source_path'] = str(full_path.
relative_to(contentdir))
with full_path.open(encoding='UTF-8') as f:
try:
post = frontmatter.load(f)
except:
print('Error parsing file')
return
post['syndicated'].append({'type': 'twitter', 'url':
orig_tweet_url})
content = get_content(d1)
post.content = post.content + '\n\r' + content
newfile = frontmatter.dumps(post)
with full_path.open('w', encoding='UTF-8') as w:
w.write(newfile)
media = []
for m in d1.get('extended_entities', {}).get('media', []):
media.append(m['media_url_https'])
for imgfile in mediadir.glob(d1['id_str'] + '*.*'):
to_file = parentdir / imgfile.name
shutil.copy(str(imgfile), str(to_file))
oldfile = contentdir / 'replies' / date.strftime('%Y'
) / date.strftime('%m') / (id_str + '.md')
if oldfile.exists():
os.remove(str(oldfile))
oldfolder = contentdir / 'replies' / date.strftime('%Y'
) / date.strftime('%m') / id_str
if oldfolder.exists():
shutil.rmtree(str(oldfolder))
urlmap[orig_tweet_url] = info
else:
continue
idx = idx + 1
print(idx)
<|reserved_special_token_0|>
def cleanup_videos():
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
for d1 in d:
orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (
TWITTER_USERNAME, d1['id_str'])
info = urlmap.get(orig_tweet_url)
if info is None:
continue
for m in d1.get('extended_entities', {}).get('media', []):
if 'video_info' in m:
videos = []
lowest_bitrate = 1000000000000
lowest_video = ''
for vi in m['video_info']['variants']:
if 'bitrate' in vi:
videos.append(vi['url'])
bitrate = int(vi['bitrate'])
if bitrate < lowest_bitrate:
lowest_video = vi['url']
lowest_bitrate = bitrate
mdfile = urlmap_to_mdfile(info)
if str(mdfile).find('\\photos\\') >= 0:
print(mdfile)
p = PostBuilder.from_mdfile(mdfile)
p.kind = 'notes'
p.save()
container = mdfile.parent
for f in container.iterdir():
os.remove(str(f))
container.rmdir()
continue
for v in videos:
if v == lowest_video:
continue
name = Path(v).name
if name.find('?') >= 0:
name = name.split('?')[0]
vfilename = d1['id_str'] + '-' + name
vfile = container / vfilename
print(vfile)
os.remove(str(vfile))
def stats():
countbysource = {}
replies = 0
retweets = 0
withmedia = 0
raw = 0
count_by_year = {}
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
for t in d:
dt = datetime.strptime(t['created_at'], '%a %b %d %H:%M:%S %z %Y')
count_by_year[dt.year] = count_by_year.get(dt.year, 0) + 1
print(json.dumps(count_by_year, indent=2))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
SOURCE_FILE = 'D:\\temp\\twitter\\tweet.js'
TWITTER_USERNAME = 'roytang'
auto_tags = ['mtg']
syndicated_sources = ['IFTTT', 'Tumblr', 'instagram.com', 'Mailchimp',
'Twitter Web', 'TweetDeck', 'mtgstorm']
debug_id = None
import frontmatter
import json
import requests
import urllib.request
from urllib.parse import urlparse, parse_qs, urldefrag
from urllib.error import HTTPError
import sys
from pathlib import Path
import os, shutil
import inspect
from datetime import datetime
import re
from utils import loadurlmap, load_map_from_json, URLResolver, PostBuilder
cwd = Path.cwd()
contentdir = cwd / 'content'
blogdir = Path(os.environ['HUGO_BLOG_OUTDIR'])
mediadir = Path('D:\\temp\\roy_mtg-twitter\\tweet_media')
retweetscache = load_map_from_json('d:\\temp\\twitter\\retweets.json')
resolver = URLResolver()
def loadurlmap(cleanupdupes=False):
blogdir = Path(os.environ['HUGO_BLOG_OUTDIR'])
urlmapfile = blogdir / 'urlmap.json'
urlmap = {}
urlmapdupes = {}
with urlmapfile.open(encoding='UTF-8') as f:
tempurlmap = json.loads(f.read())
for u in tempurlmap:
u1 = tempurlmap[u]
if 'syndicated' in u1:
for s in u1['syndicated']:
if 'url' in s:
su = s['url']
if su in urlmap:
if su not in urlmapdupes:
urlmapdupes[su] = [u1, urlmap[su]]
else:
urlmapdupes[su].append(u1)
else:
urlmap[su] = u1
urlmap[u] = u1
title = u1.get('title', '').strip()
if len(title) > 0:
urlmap[title] = u1
if cleanupdupes:
for su in urlmapdupes:
dupes = urlmapdupes[su]
canonical = None
for_deletion = []
for d in dupes:
if d['source_path'].startswith('post') or d['source_path'
].startswith('links') or len(d['syndicated']) > 2:
if canonical is not None:
print(
'\n\r##### WTH. More than one canonical urls were detected for %s'
% su)
print(json.dumps(dupes, indent=4))
canonical = d
else:
for_deletion.append(d)
if canonical is None:
print(
'##### Dupes were detected for %s but no canonical url found!'
% su)
print(dupes)
else:
urlmap[su] = canonical
for d in for_deletion:
source_path = Path(d['source_path'])
full_path = contentdir / source_path
if full_path.exists():
os.remove(str(full_path))
return urlmap
urlmap = loadurlmap(False)
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def add_syndication(mdfile, url, stype):
with mdfile.open(encoding='UTF-8') as f:
try:
post = frontmatter.load(f)
except:
print('Error parsing file')
return
if post.get('syndicated') == None:
post['syndicated'] = []
else:
for s in post['syndicated']:
if s['type'] == stype and s['url'] == url:
return
post['syndicated'].append({'type': stype, 'url': url})
newfile = frontmatter.dumps(post)
with mdfile.open('w', encoding='UTF-8') as w:
w.write(newfile)
def get_content(t):
content = t['full_text']
if 'entities' in t:
raw_urls = re.findall(
'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
, content)
for m in t['entities']['user_mentions']:
screen_name = m['screen_name']
mdlink = '[@%s](https://twitter.com/%s/)' % (screen_name,
screen_name)
content = content.replace('@' + screen_name, mdlink)
processed_urls = []
for u in t['entities']['urls']:
url = u['url']
processed_urls.append(url)
expanded_url = u['expanded_url']
processed_urls.append(expanded_url)
expanded_url, no_errors = resolver.get_final_url(expanded_url)
processed_urls.append(expanded_url)
content = content.replace(url, expanded_url)
for raw_url in raw_urls:
if raw_url not in processed_urls:
expanded_url, no_errors = resolver.get_final_url(raw_url)
content = content.replace(raw_url, expanded_url)
return content
def create_post(t):
id = t['id_str']
d = datetime.strptime(t['created_at'], '%a %b %d %H:%M:%S %z %Y')
content = get_content(t)
post = frontmatter.Post(content)
post['date'] = d
post['syndicated'] = [{'type': 'twitter', 'url':
'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME, t['id'])}]
kind = 'notes'
if 'in_reply_to_status_id_str' in t and 'in_reply_to_screen_name' in t:
kind = 'replies'
post['reply_to'] = {'type': 'twitter', 'url':
'https://twitter.com/%s/statuses/%s/' % (t[
'in_reply_to_screen_name'], t['in_reply_to_status_id_str']),
'name': t['in_reply_to_screen_name'], 'label': "%s's tweet" % t
['in_reply_to_screen_name']}
elif t['full_text'].startswith('RT @'):
rc = retweetscache.get(id)
if rc is None:
pass
elif 'retweeted_user' in rc:
kind = 'reposts'
post['repost_source'] = {'type': 'twitter', 'name': rc[
'retweeted_user'], 'url':
'https://twitter.com/%s/statuses/%s/' % (rc[
'retweeted_user'], rc['retweeted_id'])}
else:
pass
media = []
for m in t.get('extended_entities', {}).get('media', []):
media.append(m['media_url_https'])
if len(media) > 0:
if kind != 'reposts' and kind != 'replies':
kind = 'photos'
tags = []
for tag in t.get('entites', {}).get('hashtags', []):
tags.append(tag['text'].lower())
parsed_tags = re.findall('\\s#(\\w+)', ' ' + content)
for tag in parsed_tags:
if tag not in tags:
tags.append(tag.lower())
for tag in auto_tags:
if tag not in tags:
tags.append(tag)
if len(tags) > 0:
post['tags'] = tags
post['source'] = 'twitter'
outdir = contentdir / kind / d.strftime('%Y') / d.strftime('%m')
if len(media) > 0:
outdir = outdir / id
if not outdir.exists():
outdir.mkdir(parents=True)
if len(media) > 0:
outfile = outdir / 'index.md'
for imgfile in mediadir.glob(id + '*.*'):
to_file = outdir / imgfile.name
shutil.copy(str(imgfile), str(to_file))
else:
outfile = outdir / (id + '.md')
newfile = frontmatter.dumps(post)
with outfile.open('w', encoding='UTF-8') as w:
w.write(newfile)
return True
def process_syn_url(d1, raw_url, url):
orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME,
d1['id_str'])
url, no_errors = resolver.get_final_url(url)
if not no_errors:
print(d1['full_text'])
url = url.replace('www.instagram.com', 'instagram.com')
url = url.replace('/roytang0400', '')
url = urldefrag(url)[0]
if url.find('instagram.com') >= 0 and url.find('?') >= 0:
url = url.split('?')[0]
if url in urlmap:
u = urlmap[url]
source_path = Path(u['source_path'])
full_path = contentdir / source_path
add_syndication(full_path, orig_tweet_url, 'twitter')
return True
if url.find('://roytang.net') >= 0 or url.find('://mtgstorm.com') >= 0:
link_url = urlparse(url)
u = urlmap.get(link_url.path, None)
if u is None:
title_search_term = d1['full_text']
title_search_term = title_search_term.replace('New blog post: ', ''
)
title_search_term = title_search_term.replace('New post: ', '')
title_search_term = title_search_term.replace(raw_url, '')
title_search_term = title_search_term.strip()
u = urlmap.get(title_search_term, None)
if u is not None:
source_path = Path(u['source_path'])
full_path = contentdir / source_path
add_syndication(full_path, orig_tweet_url, 'twitter')
return True
else:
print('######## Unmatched roytang url: %s' % url)
print(d1['full_text'])
return True
return False
def process_tweet(d1):
orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME,
d1['id_str'])
if orig_tweet_url in urlmap:
og = urlmap.get(orig_tweet_url)
if og['source_path'].startswith('post\\') or og['source_path'
].startswith('photos\\'):
return True
tweet_source = d1['source']
for s in syndicated_sources:
if tweet_source.find(s) >= 0:
for u in d1.get('entities', {}).get('urls', []):
raw_url = u['url']
url = u['expanded_url']
if process_syn_url(d1, raw_url, url):
return True
raw_urls = re.findall(
'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
, d1['full_text'])
for raw_url in raw_urls:
if process_syn_url(d1, raw_url, raw_url):
return True
break
return create_post(d1)
def import_all():
countbysource = {}
replies = 0
retweets = 0
withmedia = 0
raw = 0
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
for d1 in d:
if debug_id is not None and d1['id_str'] != debug_id:
continue
if process_tweet(d1):
continue
tweet_source = d1['source']
if tweet_source not in countbysource:
countbysource[tweet_source] = 1
else:
countbysource[tweet_source] = countbysource[tweet_source] + 1
is_reply = False
if ('in_reply_to_status_id_str' in d1 and
'in_reply_to_screen_name' in d1):
replies = replies + 1
is_reply = True
is_retweet = False
content = d1['full_text']
if content.startswith('RT @'):
retweets = retweets + 1
is_retweet = True
media = []
if 'extended_entities' in d1:
for m in d1['extended_entities']['media']:
media.append(m['media_url_https'])
if len(media) > 0:
withmedia = withmedia + 1
if not is_reply and not is_retweet and len(media) == 0:
raw = raw + 1
idx = idx + 1
resolver.save_cache()
for source in countbysource:
print('countbysource: %s = %s' % (source, countbysource[source]))
print('replies: %s' % replies)
print('retweets: %s' % retweets)
print('withmedia: %s' % withmedia)
print('raw: %s' % raw)
print('total: %s' % idx)
def thread_replies():
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
d = reversed(d)
for d1 in d:
is_reply = False
if ('in_reply_to_status_id_str' in d1 and
'in_reply_to_screen_name' in d1):
is_reply = True
if not is_reply:
continue
id_str = d1['id_str']
orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (
TWITTER_USERNAME, id_str)
if orig_tweet_url in urlmap:
continue
date = datetime.strptime(d1['created_at'],
'%a %b %d %H:%M:%S %z %Y')
if d1['in_reply_to_screen_name'] == TWITTER_USERNAME:
replied_to_url = 'https://twitter.com/%s/statuses/%s/' % (d1
['in_reply_to_screen_name'], d1[
'in_reply_to_status_id_str'])
info = urlmap[replied_to_url]
source_path = Path(info['source_path'])
full_path = contentdir / source_path
if full_path.name == 'index.md':
parentdir = full_path.parent
else:
parentdir = full_path.parent / full_path.stem
if not parentdir.exists():
parentdir.mkdir(parents=True)
oldfile = full_path
full_path = parentdir / 'index.md'
shutil.move(str(oldfile), str(full_path))
urlmap[replied_to_url]['source_path'] = str(full_path.
relative_to(contentdir))
with full_path.open(encoding='UTF-8') as f:
try:
post = frontmatter.load(f)
except:
print('Error parsing file')
return
post['syndicated'].append({'type': 'twitter', 'url':
orig_tweet_url})
content = get_content(d1)
post.content = post.content + '\n\r' + content
newfile = frontmatter.dumps(post)
with full_path.open('w', encoding='UTF-8') as w:
w.write(newfile)
media = []
for m in d1.get('extended_entities', {}).get('media', []):
media.append(m['media_url_https'])
for imgfile in mediadir.glob(d1['id_str'] + '*.*'):
to_file = parentdir / imgfile.name
shutil.copy(str(imgfile), str(to_file))
oldfile = contentdir / 'replies' / date.strftime('%Y'
) / date.strftime('%m') / (id_str + '.md')
if oldfile.exists():
os.remove(str(oldfile))
oldfolder = contentdir / 'replies' / date.strftime('%Y'
) / date.strftime('%m') / id_str
if oldfolder.exists():
shutil.rmtree(str(oldfolder))
urlmap[orig_tweet_url] = info
else:
continue
idx = idx + 1
print(idx)
from utils import urlmap_to_mdfile
def cleanup_videos():
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
for d1 in d:
orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (
TWITTER_USERNAME, d1['id_str'])
info = urlmap.get(orig_tweet_url)
if info is None:
continue
for m in d1.get('extended_entities', {}).get('media', []):
if 'video_info' in m:
videos = []
lowest_bitrate = 1000000000000
lowest_video = ''
for vi in m['video_info']['variants']:
if 'bitrate' in vi:
videos.append(vi['url'])
bitrate = int(vi['bitrate'])
if bitrate < lowest_bitrate:
lowest_video = vi['url']
lowest_bitrate = bitrate
mdfile = urlmap_to_mdfile(info)
if str(mdfile).find('\\photos\\') >= 0:
print(mdfile)
p = PostBuilder.from_mdfile(mdfile)
p.kind = 'notes'
p.save()
container = mdfile.parent
for f in container.iterdir():
os.remove(str(f))
container.rmdir()
continue
for v in videos:
if v == lowest_video:
continue
name = Path(v).name
if name.find('?') >= 0:
name = name.split('?')[0]
vfilename = d1['id_str'] + '-' + name
vfile = container / vfilename
print(vfile)
os.remove(str(vfile))
def stats():
countbysource = {}
replies = 0
retweets = 0
withmedia = 0
raw = 0
count_by_year = {}
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
for t in d:
dt = datetime.strptime(t['created_at'], '%a %b %d %H:%M:%S %z %Y')
count_by_year[dt.year] = count_by_year.get(dt.year, 0) + 1
print(json.dumps(count_by_year, indent=2))
stats()
<|reserved_special_token_1|>
SOURCE_FILE = "D:\\temp\\twitter\\tweet.js"
TWITTER_USERNAME = 'roytang'
auto_tags = ["mtg"]
syndicated_sources = ["IFTTT", "Tumblr", "instagram.com", "Mailchimp", "Twitter Web", "TweetDeck", "mtgstorm"]
debug_id = None
# debug_id = "11143081155"
import frontmatter
import json
import requests
import urllib.request
from urllib.parse import urlparse, parse_qs, urldefrag
from urllib.error import HTTPError
import sys
from pathlib import Path
import os, shutil
import inspect
from datetime import datetime
import re
from utils import loadurlmap, load_map_from_json, URLResolver, PostBuilder
cwd = Path.cwd()
contentdir = cwd / "content"
blogdir = Path(os.environ['HUGO_BLOG_OUTDIR'])
mediadir = Path("D:\\temp\\roy_mtg-twitter\\tweet_media")
retweetscache = load_map_from_json("d:\\temp\\twitter\\retweets.json")
resolver = URLResolver()
def loadurlmap(cleanupdupes=False):
blogdir = Path(os.environ['HUGO_BLOG_OUTDIR'])
urlmapfile = blogdir / "urlmap.json"
urlmap = {}
urlmapdupes = {}
with urlmapfile.open(encoding="UTF-8") as f:
tempurlmap = json.loads(f.read())
for u in tempurlmap:
u1 = tempurlmap[u]
if "syndicated" in u1:
for s in u1['syndicated']:
if 'url' in s:
su = s['url']
if su in urlmap:
# we expect syndicated urls to be unique,
# so if it's already in the map,
# it must be a dupe
# (This is really just to clean up my own mess!)
if su not in urlmapdupes:
urlmapdupes[su] = [u1, urlmap[su]]
else:
urlmapdupes[su].append(u1)
else:
urlmap[su] = u1
urlmap[u] = u1
title = u1.get("title", "").strip()
if len(title) > 0:
urlmap[title] = u1
if cleanupdupes:
# clean up any found dupes by syndicated url
for su in urlmapdupes:
dupes = urlmapdupes[su]
canonical = None
for_deletion = []
for d in dupes:
if d["source_path"].startswith("post") or d["source_path"].startswith("links") or len(d['syndicated']) > 2:
if canonical is not None:
print("\n\r##### WTH. More than one canonical urls were detected for %s" % (su))
print(json.dumps(dupes, indent=4))
canonical = d
else:
for_deletion.append(d)
if canonical is None:
print("##### Dupes were detected for %s but no canonical url found!" % (su))
print(dupes)
else:
urlmap[su] = canonical
for d in for_deletion:
source_path = Path(d['source_path'])
full_path = contentdir / source_path
if full_path.exists():
os.remove(str(full_path))
return urlmap
urlmap = loadurlmap(False)
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def add_syndication(mdfile, url, stype):
with mdfile.open(encoding="UTF-8") as f:
try:
post = frontmatter.load(f)
except:
print("Error parsing file")
return
if post.get('syndicated') == None:
post['syndicated'] = []
else:
for s in post['syndicated']:
if s["type"] == stype and s["url"] == url:
# dont add a duplicate!
return
post['syndicated'].append({
'type': stype,
'url': url
})
newfile = frontmatter.dumps(post)
with mdfile.open("w", encoding="UTF-8") as w:
w.write(newfile)
def get_content(t):
content = t['full_text']
if "entities" in t:
# get raw urls in the text
raw_urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', content)
# replace mentions with link
for m in t["entities"]["user_mentions"]:
screen_name = m["screen_name"]
# replace with markdown link
mdlink = "[@%s](https://twitter.com/%s/)" % (screen_name, screen_name)
content = content.replace("@"+screen_name, mdlink)
processed_urls = []
# clean urls
for u in t["entities"]["urls"]:
url = u["url"]
processed_urls.append(url)
expanded_url = u["expanded_url"]
processed_urls.append(expanded_url)
# print("##### A URL!!! %s" % expanded_url)
expanded_url, no_errors = resolver.get_final_url(expanded_url)
processed_urls.append(expanded_url)
content = content.replace(url, expanded_url)
# find urls that were not in the entities
for raw_url in raw_urls:
if raw_url not in processed_urls:
expanded_url, no_errors = resolver.get_final_url(raw_url)
content = content.replace(raw_url, expanded_url)
return content
def create_post(t):
id = t['id_str']
d = datetime.strptime(t['created_at'], "%a %b %d %H:%M:%S %z %Y")
content = get_content(t)
post = frontmatter.Post(content)
post['date'] = d
post['syndicated'] = [
{
"type": "twitter",
"url": "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, t['id'])
}
]
kind = "notes"
if "in_reply_to_status_id_str" in t and "in_reply_to_screen_name" in t:
kind = "replies"
post["reply_to"] = {
"type": "twitter",
"url": "https://twitter.com/%s/statuses/%s/" % (t['in_reply_to_screen_name'], t['in_reply_to_status_id_str']),
"name": t["in_reply_to_screen_name"],
"label": "%s's tweet" % (t["in_reply_to_screen_name"])
}
elif t["full_text"].startswith("RT @"):
rc = retweetscache.get(id)
if rc is None:
# RTed status is inaccessible, we'll just render it as an ordinary note
pass
else:
if "retweeted_user" in rc:
kind = "reposts"
post['repost_source'] = {
"type": "twitter",
"name": rc["retweeted_user"],
"url": "https://twitter.com/%s/statuses/%s/" % (rc['retweeted_user'], rc['retweeted_id'])
}
# dont process reposts for now
# return False
else:
# 785744070027030528 fails this
# RTed status is inaccessible, we'll just render it as an ordinary note
pass
# else:
# # dont process others for now
# return False
media = []
for m in t.get("extended_entities", {}).get("media", []):
media.append(m["media_url_https"])
if len(media) > 0:
if kind != "reposts" and kind != "replies":
kind = "photos"
# dont process media for now
# return False
tags = []
for tag in t.get('entites', {}).get('hashtags', []):
tags.append(tag['text'].lower())
parsed_tags = re.findall(r"\s#(\w+)", " " + content)
for tag in parsed_tags:
if tag not in tags:
tags.append(tag.lower())
for tag in auto_tags:
if tag not in tags:
tags.append(tag)
if len(tags) > 0:
post["tags"] = tags
post["source"] = "twitter"
outdir = contentdir / kind / d.strftime("%Y") / d.strftime("%m")
if len(media) > 0:
outdir = outdir / (id)
if not outdir.exists():
outdir.mkdir(parents=True)
if len(media) > 0:
outfile = outdir / ( "index.md" )
# find photos
for imgfile in mediadir.glob(id + "*.*"):
to_file = outdir / imgfile.name
shutil.copy(str(imgfile), str(to_file))
else:
outfile = outdir / ( id + ".md" )
newfile = frontmatter.dumps(post)
with outfile.open("w", encoding="UTF-8") as w:
w.write(newfile)
return True
def process_syn_url(d1, raw_url, url):
orig_tweet_url = "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, d1['id_str'])
url, no_errors = resolver.get_final_url(url)
if not no_errors:
print(d1["full_text"])
url = url.replace("www.instagram.com", "instagram.com")
url = url.replace("/roytang0400", "")
url = urldefrag(url)[0]
if url.find("instagram.com") >= 0 and url.find("?") >= 0:
# remove utm and other misc query params from insta urls
url = url.split("?")[0]
if url in urlmap:
u = urlmap[url]
source_path = Path(u['source_path'])
full_path = contentdir / source_path
add_syndication(full_path, orig_tweet_url, "twitter")
return True
if url.find("://roytang.net") >= 0 or url.find("://mtgstorm.com") >= 0:
link_url = urlparse(url)
u = urlmap.get(link_url.path, None)
if u is None:
# try matching by title
title_search_term = d1["full_text"]
title_search_term = title_search_term.replace("New blog post: ", "")
title_search_term = title_search_term.replace("New post: ", "")
title_search_term = title_search_term.replace(raw_url, "")
title_search_term = title_search_term.strip()
u = urlmap.get(title_search_term, None)
if u is not None:
source_path = Path(u['source_path'])
full_path = contentdir / source_path
add_syndication(full_path, orig_tweet_url, "twitter")
return True
else:
print("######## Unmatched roytang url: %s" % (url))
print(d1["full_text"])
return True
return False
def process_tweet(d1):
orig_tweet_url = "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, d1['id_str'])
if orig_tweet_url in urlmap:
og = urlmap.get(orig_tweet_url)
if og['source_path'].startswith('post\\') or og['source_path'].startswith('photos\\'):
# no need to process further any tweets that are already mapped to a post
return True
tweet_source = d1["source"]
# print("#### %s: %s" % (tweet_source, orig_tweet_url))
# detect content syndicated from elsewhere
# instagram, tumblr, roytang.net
for s in syndicated_sources:
if tweet_source.find(s) >= 0:
for u in d1.get('entities', {}).get("urls", []):
raw_url = u["url"]
url = u["expanded_url"]
if process_syn_url(d1, raw_url, url):
return True
# print("######## URL = %s" % (url))
# also process raw urls
raw_urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', d1["full_text"])
for raw_url in raw_urls:
if process_syn_url(d1, raw_url, raw_url):
return True
break
return create_post(d1)
def import_all():
countbysource = {}
replies = 0
retweets = 0
withmedia = 0
raw = 0
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
for d1 in d:
if debug_id is not None and d1["id_str"] != debug_id:
continue
if process_tweet(d1):
continue
tweet_source = d1["source"]
if tweet_source not in countbysource:
countbysource[tweet_source] = 1
else:
countbysource[tweet_source] = countbysource[tweet_source] + 1
is_reply = False
if "in_reply_to_status_id_str" in d1 and "in_reply_to_screen_name" in d1:
replies = replies + 1
is_reply = True
# handle retweet
is_retweet = False
content = d1["full_text"]
if content.startswith("RT @"):
retweets = retweets + 1
is_retweet = True
media = []
if "extended_entities" in d1:
for m in d1["extended_entities"]["media"]:
media.append(m["media_url_https"])
if len(media) > 0:
withmedia = withmedia + 1
if not is_reply and not is_retweet and len(media) == 0:
raw = raw + 1
idx = idx + 1
# if idx > 100:
# break
# save the url cache for future use
resolver.save_cache()
for source in countbysource:
print("countbysource: %s = %s" % (source, countbysource[source]))
print("replies: %s" % (replies))
print("retweets: %s" % (retweets))
print("withmedia: %s" % (withmedia))
print("raw: %s" % (raw))
print("total: %s" % (idx))
def thread_replies():
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
# process in reverse order so tweet sequences are in order
d = reversed(d)
for d1 in d:
is_reply = False
if "in_reply_to_status_id_str" in d1 and "in_reply_to_screen_name" in d1:
is_reply = True
if not is_reply:
continue
id_str = d1['id_str']
# if id_str != "602009895437737984" and id_str != "602009747294924802":
# continue
orig_tweet_url = "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, id_str)
# dont bother if already syndicated
if orig_tweet_url in urlmap:
continue
date = datetime.strptime(d1['created_at'], "%a %b %d %H:%M:%S %z %Y")
# process replies to myself
if d1["in_reply_to_screen_name"] == TWITTER_USERNAME:
replied_to_url = "https://twitter.com/%s/statuses/%s/" % (d1['in_reply_to_screen_name'], d1['in_reply_to_status_id_str'])
info = urlmap[replied_to_url]
source_path = Path(info['source_path'])
full_path = contentdir / source_path
# welp, we might as well move them to bundles
if full_path.name == "index.md":
parentdir = full_path.parent
else:
parentdir = full_path.parent / full_path.stem
if not parentdir.exists():
parentdir.mkdir(parents=True)
oldfile = full_path
full_path = parentdir / "index.md"
shutil.move(str(oldfile), str(full_path))
# also update the urlmap!
urlmap[replied_to_url]['source_path'] = str(full_path.relative_to(contentdir))
# append the reply to the original post, and add it as a syndication as well
with full_path.open(encoding="UTF-8") as f:
try:
post = frontmatter.load(f)
except:
print("Error parsing file")
return
post['syndicated'].append({
'type': 'twitter',
'url': orig_tweet_url
})
content = get_content(d1)
post.content = post.content + "\n\r" + content
newfile = frontmatter.dumps(post)
with full_path.open("w", encoding="UTF-8") as w:
w.write(newfile)
# copy over any media from the reply as well
media = []
for m in d1.get("extended_entities", {}).get("media", []):
media.append(m["media_url_https"])
for imgfile in mediadir.glob(d1["id_str"] + "*.*"):
to_file = parentdir / imgfile.name
shutil.copy(str(imgfile), str(to_file))
# delete any existing file created for this reply
oldfile = contentdir / "replies" / date.strftime("%Y") / date.strftime("%m") / (id_str + ".md")
if oldfile.exists():
os.remove(str(oldfile))
oldfolder = contentdir / "replies" / date.strftime("%Y") / date.strftime("%m") / (id_str)
if oldfolder.exists():
shutil.rmtree(str(oldfolder))
# replace this entry in the urlmap! this is so that succeeding replies can find the correct root tweet to attach to
urlmap[orig_tweet_url] = info
else:
continue
idx = idx + 1
print(idx)
from utils import urlmap_to_mdfile
def cleanup_videos():
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
for d1 in d:
orig_tweet_url = "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, d1["id_str"])
info = urlmap.get(orig_tweet_url)
if info is None:
continue
for m in d1.get("extended_entities", {}).get("media", []):
if "video_info" in m:
videos = []
lowest_bitrate = 1000000000000
lowest_video = ""
for vi in m["video_info"]["variants"]:
if 'bitrate' in vi:
videos.append(vi["url"])
bitrate = int(vi['bitrate'])
if bitrate < lowest_bitrate:
lowest_video = vi["url"]
lowest_bitrate = bitrate
mdfile = urlmap_to_mdfile(info)
if str(mdfile).find("\\photos\\") >= 0:
print(mdfile)
# move it to notes, since it's not a photo
p = PostBuilder.from_mdfile(mdfile)
p.kind = "notes"
p.save()
# delete the old files
container = mdfile.parent
for f in container.iterdir():
os.remove(str(f))
container.rmdir()
continue
# delete all the video files except for the one with the lowest bitrate
for v in videos:
if v == lowest_video:
continue
name = Path(v).name
if name.find("?") >= 0:
name = name.split("?")[0]
vfilename = d1["id_str"] + "-" + name
vfile = container / vfilename
print(vfile)
os.remove(str(vfile))
def stats():
countbysource = {}
replies = 0
retweets = 0
withmedia = 0
raw = 0
count_by_year = {}
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
for t in d:
dt = datetime.strptime(t['created_at'], "%a %b %d %H:%M:%S %z %Y")
count_by_year[dt.year] = count_by_year.get(dt.year, 0) + 1
print(json.dumps(count_by_year, indent=2))
# thread_replies()
# import_all()
# cleanup_videos()
stats()
|
flexible
|
{
"blob_id": "001d2ae89a2d008fdf6621a1be73de94c766c65f",
"index": 4570,
"step-1": "<mask token>\n\n\ndef get_content(t):\n content = t['full_text']\n if 'entities' in t:\n raw_urls = re.findall(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n , content)\n for m in t['entities']['user_mentions']:\n screen_name = m['screen_name']\n mdlink = '[@%s](https://twitter.com/%s/)' % (screen_name,\n screen_name)\n content = content.replace('@' + screen_name, mdlink)\n processed_urls = []\n for u in t['entities']['urls']:\n url = u['url']\n processed_urls.append(url)\n expanded_url = u['expanded_url']\n processed_urls.append(expanded_url)\n expanded_url, no_errors = resolver.get_final_url(expanded_url)\n processed_urls.append(expanded_url)\n content = content.replace(url, expanded_url)\n for raw_url in raw_urls:\n if raw_url not in processed_urls:\n expanded_url, no_errors = resolver.get_final_url(raw_url)\n content = content.replace(raw_url, expanded_url)\n return content\n\n\n<mask token>\n\n\ndef process_tweet(d1):\n orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME,\n d1['id_str'])\n if orig_tweet_url in urlmap:\n og = urlmap.get(orig_tweet_url)\n if og['source_path'].startswith('post\\\\') or og['source_path'\n ].startswith('photos\\\\'):\n return True\n tweet_source = d1['source']\n for s in syndicated_sources:\n if tweet_source.find(s) >= 0:\n for u in d1.get('entities', {}).get('urls', []):\n raw_url = u['url']\n url = u['expanded_url']\n if process_syn_url(d1, raw_url, url):\n return True\n raw_urls = re.findall(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n , d1['full_text'])\n for raw_url in raw_urls:\n if process_syn_url(d1, raw_url, raw_url):\n return True\n break\n return create_post(d1)\n\n\ndef import_all():\n countbysource = {}\n replies = 0\n retweets = 0\n withmedia = 0\n raw = 0\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for d1 in d:\n if debug_id is not None and d1['id_str'] != debug_id:\n continue\n if process_tweet(d1):\n continue\n tweet_source = d1['source']\n if tweet_source not in countbysource:\n countbysource[tweet_source] = 1\n else:\n countbysource[tweet_source] = countbysource[tweet_source] + 1\n is_reply = False\n if ('in_reply_to_status_id_str' in d1 and \n 'in_reply_to_screen_name' in d1):\n replies = replies + 1\n is_reply = True\n is_retweet = False\n content = d1['full_text']\n if content.startswith('RT @'):\n retweets = retweets + 1\n is_retweet = True\n media = []\n if 'extended_entities' in d1:\n for m in d1['extended_entities']['media']:\n media.append(m['media_url_https'])\n if len(media) > 0:\n withmedia = withmedia + 1\n if not is_reply and not is_retweet and len(media) == 0:\n raw = raw + 1\n idx = idx + 1\n resolver.save_cache()\n for source in countbysource:\n print('countbysource: %s = %s' % (source, countbysource[source]))\n print('replies: %s' % replies)\n print('retweets: %s' % retweets)\n print('withmedia: %s' % withmedia)\n print('raw: %s' % raw)\n print('total: %s' % idx)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef loadurlmap(cleanupdupes=False):\n blogdir = Path(os.environ['HUGO_BLOG_OUTDIR'])\n urlmapfile = blogdir / 'urlmap.json'\n urlmap = {}\n urlmapdupes = {}\n with urlmapfile.open(encoding='UTF-8') as f:\n tempurlmap = json.loads(f.read())\n for u in tempurlmap:\n u1 = tempurlmap[u]\n if 'syndicated' in u1:\n for s in u1['syndicated']:\n if 'url' in s:\n su = s['url']\n if su in urlmap:\n if su not in urlmapdupes:\n urlmapdupes[su] = [u1, urlmap[su]]\n else:\n urlmapdupes[su].append(u1)\n else:\n urlmap[su] = u1\n urlmap[u] = u1\n title = u1.get('title', '').strip()\n if len(title) > 0:\n urlmap[title] = u1\n if cleanupdupes:\n for su in urlmapdupes:\n dupes = urlmapdupes[su]\n canonical = None\n for_deletion = []\n for d in dupes:\n if d['source_path'].startswith('post') or d['source_path'\n ].startswith('links') or len(d['syndicated']) > 2:\n if canonical is not None:\n print(\n '\\n\\r##### WTH. More than one canonical urls were detected for %s'\n % su)\n print(json.dumps(dupes, indent=4))\n canonical = d\n else:\n for_deletion.append(d)\n if canonical is None:\n print(\n '##### Dupes were detected for %s but no canonical url found!'\n % su)\n print(dupes)\n else:\n urlmap[su] = canonical\n for d in for_deletion:\n source_path = Path(d['source_path'])\n full_path = contentdir / source_path\n if full_path.exists():\n os.remove(str(full_path))\n return urlmap\n\n\n<mask token>\n\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n\ndef add_syndication(mdfile, url, stype):\n with mdfile.open(encoding='UTF-8') as f:\n try:\n post = frontmatter.load(f)\n except:\n print('Error parsing file')\n return\n if post.get('syndicated') == None:\n post['syndicated'] = []\n else:\n for s in post['syndicated']:\n if s['type'] == stype and s['url'] == url:\n return\n post['syndicated'].append({'type': stype, 'url': url})\n newfile = frontmatter.dumps(post)\n with mdfile.open('w', encoding='UTF-8') as w:\n w.write(newfile)\n\n\ndef get_content(t):\n content = t['full_text']\n if 'entities' in t:\n raw_urls = re.findall(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n , content)\n for m in t['entities']['user_mentions']:\n screen_name = m['screen_name']\n mdlink = '[@%s](https://twitter.com/%s/)' % (screen_name,\n screen_name)\n content = content.replace('@' + screen_name, mdlink)\n processed_urls = []\n for u in t['entities']['urls']:\n url = u['url']\n processed_urls.append(url)\n expanded_url = u['expanded_url']\n processed_urls.append(expanded_url)\n expanded_url, no_errors = resolver.get_final_url(expanded_url)\n processed_urls.append(expanded_url)\n content = content.replace(url, expanded_url)\n for raw_url in raw_urls:\n if raw_url not in processed_urls:\n expanded_url, no_errors = resolver.get_final_url(raw_url)\n content = content.replace(raw_url, expanded_url)\n return content\n\n\ndef create_post(t):\n id = t['id_str']\n d = datetime.strptime(t['created_at'], '%a %b %d %H:%M:%S %z %Y')\n content = get_content(t)\n post = frontmatter.Post(content)\n post['date'] = d\n post['syndicated'] = [{'type': 'twitter', 'url': \n 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME, t['id'])}]\n kind = 'notes'\n if 'in_reply_to_status_id_str' in t and 'in_reply_to_screen_name' in t:\n kind = 'replies'\n post['reply_to'] = {'type': 'twitter', 'url': \n 'https://twitter.com/%s/statuses/%s/' % (t[\n 'in_reply_to_screen_name'], t['in_reply_to_status_id_str']),\n 'name': t['in_reply_to_screen_name'], 'label': \"%s's tweet\" % t\n ['in_reply_to_screen_name']}\n elif t['full_text'].startswith('RT @'):\n rc = retweetscache.get(id)\n if rc is None:\n pass\n elif 'retweeted_user' in rc:\n kind = 'reposts'\n post['repost_source'] = {'type': 'twitter', 'name': rc[\n 'retweeted_user'], 'url': \n 'https://twitter.com/%s/statuses/%s/' % (rc[\n 'retweeted_user'], rc['retweeted_id'])}\n else:\n pass\n media = []\n for m in t.get('extended_entities', {}).get('media', []):\n media.append(m['media_url_https'])\n if len(media) > 0:\n if kind != 'reposts' and kind != 'replies':\n kind = 'photos'\n tags = []\n for tag in t.get('entites', {}).get('hashtags', []):\n tags.append(tag['text'].lower())\n parsed_tags = re.findall('\\\\s#(\\\\w+)', ' ' + content)\n for tag in parsed_tags:\n if tag not in tags:\n tags.append(tag.lower())\n for tag in auto_tags:\n if tag not in tags:\n tags.append(tag)\n if len(tags) > 0:\n post['tags'] = tags\n post['source'] = 'twitter'\n outdir = contentdir / kind / d.strftime('%Y') / d.strftime('%m')\n if len(media) > 0:\n outdir = outdir / id\n if not outdir.exists():\n outdir.mkdir(parents=True)\n if len(media) > 0:\n outfile = outdir / 'index.md'\n for imgfile in mediadir.glob(id + '*.*'):\n to_file = outdir / imgfile.name\n shutil.copy(str(imgfile), str(to_file))\n else:\n outfile = outdir / (id + '.md')\n newfile = frontmatter.dumps(post)\n with outfile.open('w', encoding='UTF-8') as w:\n w.write(newfile)\n return True\n\n\n<mask token>\n\n\ndef process_tweet(d1):\n orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME,\n d1['id_str'])\n if orig_tweet_url in urlmap:\n og = urlmap.get(orig_tweet_url)\n if og['source_path'].startswith('post\\\\') or og['source_path'\n ].startswith('photos\\\\'):\n return True\n tweet_source = d1['source']\n for s in syndicated_sources:\n if tweet_source.find(s) >= 0:\n for u in d1.get('entities', {}).get('urls', []):\n raw_url = u['url']\n url = u['expanded_url']\n if process_syn_url(d1, raw_url, url):\n return True\n raw_urls = re.findall(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n , d1['full_text'])\n for raw_url in raw_urls:\n if process_syn_url(d1, raw_url, raw_url):\n return True\n break\n return create_post(d1)\n\n\ndef import_all():\n countbysource = {}\n replies = 0\n retweets = 0\n withmedia = 0\n raw = 0\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for d1 in d:\n if debug_id is not None and d1['id_str'] != debug_id:\n continue\n if process_tweet(d1):\n continue\n tweet_source = d1['source']\n if tweet_source not in countbysource:\n countbysource[tweet_source] = 1\n else:\n countbysource[tweet_source] = countbysource[tweet_source] + 1\n is_reply = False\n if ('in_reply_to_status_id_str' in d1 and \n 'in_reply_to_screen_name' in d1):\n replies = replies + 1\n is_reply = True\n is_retweet = False\n content = d1['full_text']\n if content.startswith('RT @'):\n retweets = retweets + 1\n is_retweet = True\n media = []\n if 'extended_entities' in d1:\n for m in d1['extended_entities']['media']:\n media.append(m['media_url_https'])\n if len(media) > 0:\n withmedia = withmedia + 1\n if not is_reply and not is_retweet and len(media) == 0:\n raw = raw + 1\n idx = idx + 1\n resolver.save_cache()\n for source in countbysource:\n print('countbysource: %s = %s' % (source, countbysource[source]))\n print('replies: %s' % replies)\n print('retweets: %s' % retweets)\n print('withmedia: %s' % withmedia)\n print('raw: %s' % raw)\n print('total: %s' % idx)\n\n\n<mask token>\n\n\ndef stats():\n countbysource = {}\n replies = 0\n retweets = 0\n withmedia = 0\n raw = 0\n count_by_year = {}\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for t in d:\n dt = datetime.strptime(t['created_at'], '%a %b %d %H:%M:%S %z %Y')\n count_by_year[dt.year] = count_by_year.get(dt.year, 0) + 1\n print(json.dumps(count_by_year, indent=2))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef loadurlmap(cleanupdupes=False):\n blogdir = Path(os.environ['HUGO_BLOG_OUTDIR'])\n urlmapfile = blogdir / 'urlmap.json'\n urlmap = {}\n urlmapdupes = {}\n with urlmapfile.open(encoding='UTF-8') as f:\n tempurlmap = json.loads(f.read())\n for u in tempurlmap:\n u1 = tempurlmap[u]\n if 'syndicated' in u1:\n for s in u1['syndicated']:\n if 'url' in s:\n su = s['url']\n if su in urlmap:\n if su not in urlmapdupes:\n urlmapdupes[su] = [u1, urlmap[su]]\n else:\n urlmapdupes[su].append(u1)\n else:\n urlmap[su] = u1\n urlmap[u] = u1\n title = u1.get('title', '').strip()\n if len(title) > 0:\n urlmap[title] = u1\n if cleanupdupes:\n for su in urlmapdupes:\n dupes = urlmapdupes[su]\n canonical = None\n for_deletion = []\n for d in dupes:\n if d['source_path'].startswith('post') or d['source_path'\n ].startswith('links') or len(d['syndicated']) > 2:\n if canonical is not None:\n print(\n '\\n\\r##### WTH. More than one canonical urls were detected for %s'\n % su)\n print(json.dumps(dupes, indent=4))\n canonical = d\n else:\n for_deletion.append(d)\n if canonical is None:\n print(\n '##### Dupes were detected for %s but no canonical url found!'\n % su)\n print(dupes)\n else:\n urlmap[su] = canonical\n for d in for_deletion:\n source_path = Path(d['source_path'])\n full_path = contentdir / source_path\n if full_path.exists():\n os.remove(str(full_path))\n return urlmap\n\n\n<mask token>\n\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n\ndef add_syndication(mdfile, url, stype):\n with mdfile.open(encoding='UTF-8') as f:\n try:\n post = frontmatter.load(f)\n except:\n print('Error parsing file')\n return\n if post.get('syndicated') == None:\n post['syndicated'] = []\n else:\n for s in post['syndicated']:\n if s['type'] == stype and s['url'] == url:\n return\n post['syndicated'].append({'type': stype, 'url': url})\n newfile = frontmatter.dumps(post)\n with mdfile.open('w', encoding='UTF-8') as w:\n w.write(newfile)\n\n\ndef get_content(t):\n content = t['full_text']\n if 'entities' in t:\n raw_urls = re.findall(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n , content)\n for m in t['entities']['user_mentions']:\n screen_name = m['screen_name']\n mdlink = '[@%s](https://twitter.com/%s/)' % (screen_name,\n screen_name)\n content = content.replace('@' + screen_name, mdlink)\n processed_urls = []\n for u in t['entities']['urls']:\n url = u['url']\n processed_urls.append(url)\n expanded_url = u['expanded_url']\n processed_urls.append(expanded_url)\n expanded_url, no_errors = resolver.get_final_url(expanded_url)\n processed_urls.append(expanded_url)\n content = content.replace(url, expanded_url)\n for raw_url in raw_urls:\n if raw_url not in processed_urls:\n expanded_url, no_errors = resolver.get_final_url(raw_url)\n content = content.replace(raw_url, expanded_url)\n return content\n\n\ndef create_post(t):\n id = t['id_str']\n d = datetime.strptime(t['created_at'], '%a %b %d %H:%M:%S %z %Y')\n content = get_content(t)\n post = frontmatter.Post(content)\n post['date'] = d\n post['syndicated'] = [{'type': 'twitter', 'url': \n 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME, t['id'])}]\n kind = 'notes'\n if 'in_reply_to_status_id_str' in t and 'in_reply_to_screen_name' in t:\n kind = 'replies'\n post['reply_to'] = {'type': 'twitter', 'url': \n 'https://twitter.com/%s/statuses/%s/' % (t[\n 'in_reply_to_screen_name'], t['in_reply_to_status_id_str']),\n 'name': t['in_reply_to_screen_name'], 'label': \"%s's tweet\" % t\n ['in_reply_to_screen_name']}\n elif t['full_text'].startswith('RT @'):\n rc = retweetscache.get(id)\n if rc is None:\n pass\n elif 'retweeted_user' in rc:\n kind = 'reposts'\n post['repost_source'] = {'type': 'twitter', 'name': rc[\n 'retweeted_user'], 'url': \n 'https://twitter.com/%s/statuses/%s/' % (rc[\n 'retweeted_user'], rc['retweeted_id'])}\n else:\n pass\n media = []\n for m in t.get('extended_entities', {}).get('media', []):\n media.append(m['media_url_https'])\n if len(media) > 0:\n if kind != 'reposts' and kind != 'replies':\n kind = 'photos'\n tags = []\n for tag in t.get('entites', {}).get('hashtags', []):\n tags.append(tag['text'].lower())\n parsed_tags = re.findall('\\\\s#(\\\\w+)', ' ' + content)\n for tag in parsed_tags:\n if tag not in tags:\n tags.append(tag.lower())\n for tag in auto_tags:\n if tag not in tags:\n tags.append(tag)\n if len(tags) > 0:\n post['tags'] = tags\n post['source'] = 'twitter'\n outdir = contentdir / kind / d.strftime('%Y') / d.strftime('%m')\n if len(media) > 0:\n outdir = outdir / id\n if not outdir.exists():\n outdir.mkdir(parents=True)\n if len(media) > 0:\n outfile = outdir / 'index.md'\n for imgfile in mediadir.glob(id + '*.*'):\n to_file = outdir / imgfile.name\n shutil.copy(str(imgfile), str(to_file))\n else:\n outfile = outdir / (id + '.md')\n newfile = frontmatter.dumps(post)\n with outfile.open('w', encoding='UTF-8') as w:\n w.write(newfile)\n return True\n\n\ndef process_syn_url(d1, raw_url, url):\n orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME,\n d1['id_str'])\n url, no_errors = resolver.get_final_url(url)\n if not no_errors:\n print(d1['full_text'])\n url = url.replace('www.instagram.com', 'instagram.com')\n url = url.replace('/roytang0400', '')\n url = urldefrag(url)[0]\n if url.find('instagram.com') >= 0 and url.find('?') >= 0:\n url = url.split('?')[0]\n if url in urlmap:\n u = urlmap[url]\n source_path = Path(u['source_path'])\n full_path = contentdir / source_path\n add_syndication(full_path, orig_tweet_url, 'twitter')\n return True\n if url.find('://roytang.net') >= 0 or url.find('://mtgstorm.com') >= 0:\n link_url = urlparse(url)\n u = urlmap.get(link_url.path, None)\n if u is None:\n title_search_term = d1['full_text']\n title_search_term = title_search_term.replace('New blog post: ', ''\n )\n title_search_term = title_search_term.replace('New post: ', '')\n title_search_term = title_search_term.replace(raw_url, '')\n title_search_term = title_search_term.strip()\n u = urlmap.get(title_search_term, None)\n if u is not None:\n source_path = Path(u['source_path'])\n full_path = contentdir / source_path\n add_syndication(full_path, orig_tweet_url, 'twitter')\n return True\n else:\n print('######## Unmatched roytang url: %s' % url)\n print(d1['full_text'])\n return True\n return False\n\n\ndef process_tweet(d1):\n orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME,\n d1['id_str'])\n if orig_tweet_url in urlmap:\n og = urlmap.get(orig_tweet_url)\n if og['source_path'].startswith('post\\\\') or og['source_path'\n ].startswith('photos\\\\'):\n return True\n tweet_source = d1['source']\n for s in syndicated_sources:\n if tweet_source.find(s) >= 0:\n for u in d1.get('entities', {}).get('urls', []):\n raw_url = u['url']\n url = u['expanded_url']\n if process_syn_url(d1, raw_url, url):\n return True\n raw_urls = re.findall(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n , d1['full_text'])\n for raw_url in raw_urls:\n if process_syn_url(d1, raw_url, raw_url):\n return True\n break\n return create_post(d1)\n\n\ndef import_all():\n countbysource = {}\n replies = 0\n retweets = 0\n withmedia = 0\n raw = 0\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for d1 in d:\n if debug_id is not None and d1['id_str'] != debug_id:\n continue\n if process_tweet(d1):\n continue\n tweet_source = d1['source']\n if tweet_source not in countbysource:\n countbysource[tweet_source] = 1\n else:\n countbysource[tweet_source] = countbysource[tweet_source] + 1\n is_reply = False\n if ('in_reply_to_status_id_str' in d1 and \n 'in_reply_to_screen_name' in d1):\n replies = replies + 1\n is_reply = True\n is_retweet = False\n content = d1['full_text']\n if content.startswith('RT @'):\n retweets = retweets + 1\n is_retweet = True\n media = []\n if 'extended_entities' in d1:\n for m in d1['extended_entities']['media']:\n media.append(m['media_url_https'])\n if len(media) > 0:\n withmedia = withmedia + 1\n if not is_reply and not is_retweet and len(media) == 0:\n raw = raw + 1\n idx = idx + 1\n resolver.save_cache()\n for source in countbysource:\n print('countbysource: %s = %s' % (source, countbysource[source]))\n print('replies: %s' % replies)\n print('retweets: %s' % retweets)\n print('withmedia: %s' % withmedia)\n print('raw: %s' % raw)\n print('total: %s' % idx)\n\n\ndef thread_replies():\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n d = reversed(d)\n for d1 in d:\n is_reply = False\n if ('in_reply_to_status_id_str' in d1 and \n 'in_reply_to_screen_name' in d1):\n is_reply = True\n if not is_reply:\n continue\n id_str = d1['id_str']\n orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (\n TWITTER_USERNAME, id_str)\n if orig_tweet_url in urlmap:\n continue\n date = datetime.strptime(d1['created_at'],\n '%a %b %d %H:%M:%S %z %Y')\n if d1['in_reply_to_screen_name'] == TWITTER_USERNAME:\n replied_to_url = 'https://twitter.com/%s/statuses/%s/' % (d1\n ['in_reply_to_screen_name'], d1[\n 'in_reply_to_status_id_str'])\n info = urlmap[replied_to_url]\n source_path = Path(info['source_path'])\n full_path = contentdir / source_path\n if full_path.name == 'index.md':\n parentdir = full_path.parent\n else:\n parentdir = full_path.parent / full_path.stem\n if not parentdir.exists():\n parentdir.mkdir(parents=True)\n oldfile = full_path\n full_path = parentdir / 'index.md'\n shutil.move(str(oldfile), str(full_path))\n urlmap[replied_to_url]['source_path'] = str(full_path.\n relative_to(contentdir))\n with full_path.open(encoding='UTF-8') as f:\n try:\n post = frontmatter.load(f)\n except:\n print('Error parsing file')\n return\n post['syndicated'].append({'type': 'twitter', 'url':\n orig_tweet_url})\n content = get_content(d1)\n post.content = post.content + '\\n\\r' + content\n newfile = frontmatter.dumps(post)\n with full_path.open('w', encoding='UTF-8') as w:\n w.write(newfile)\n media = []\n for m in d1.get('extended_entities', {}).get('media', []):\n media.append(m['media_url_https'])\n for imgfile in mediadir.glob(d1['id_str'] + '*.*'):\n to_file = parentdir / imgfile.name\n shutil.copy(str(imgfile), str(to_file))\n oldfile = contentdir / 'replies' / date.strftime('%Y'\n ) / date.strftime('%m') / (id_str + '.md')\n if oldfile.exists():\n os.remove(str(oldfile))\n oldfolder = contentdir / 'replies' / date.strftime('%Y'\n ) / date.strftime('%m') / id_str\n if oldfolder.exists():\n shutil.rmtree(str(oldfolder))\n urlmap[orig_tweet_url] = info\n else:\n continue\n idx = idx + 1\n print(idx)\n\n\n<mask token>\n\n\ndef cleanup_videos():\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for d1 in d:\n orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (\n TWITTER_USERNAME, d1['id_str'])\n info = urlmap.get(orig_tweet_url)\n if info is None:\n continue\n for m in d1.get('extended_entities', {}).get('media', []):\n if 'video_info' in m:\n videos = []\n lowest_bitrate = 1000000000000\n lowest_video = ''\n for vi in m['video_info']['variants']:\n if 'bitrate' in vi:\n videos.append(vi['url'])\n bitrate = int(vi['bitrate'])\n if bitrate < lowest_bitrate:\n lowest_video = vi['url']\n lowest_bitrate = bitrate\n mdfile = urlmap_to_mdfile(info)\n if str(mdfile).find('\\\\photos\\\\') >= 0:\n print(mdfile)\n p = PostBuilder.from_mdfile(mdfile)\n p.kind = 'notes'\n p.save()\n container = mdfile.parent\n for f in container.iterdir():\n os.remove(str(f))\n container.rmdir()\n continue\n for v in videos:\n if v == lowest_video:\n continue\n name = Path(v).name\n if name.find('?') >= 0:\n name = name.split('?')[0]\n vfilename = d1['id_str'] + '-' + name\n vfile = container / vfilename\n print(vfile)\n os.remove(str(vfile))\n\n\ndef stats():\n countbysource = {}\n replies = 0\n retweets = 0\n withmedia = 0\n raw = 0\n count_by_year = {}\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for t in d:\n dt = datetime.strptime(t['created_at'], '%a %b %d %H:%M:%S %z %Y')\n count_by_year[dt.year] = count_by_year.get(dt.year, 0) + 1\n print(json.dumps(count_by_year, indent=2))\n\n\n<mask token>\n",
"step-4": "SOURCE_FILE = 'D:\\\\temp\\\\twitter\\\\tweet.js'\nTWITTER_USERNAME = 'roytang'\nauto_tags = ['mtg']\nsyndicated_sources = ['IFTTT', 'Tumblr', 'instagram.com', 'Mailchimp',\n 'Twitter Web', 'TweetDeck', 'mtgstorm']\ndebug_id = None\nimport frontmatter\nimport json\nimport requests\nimport urllib.request\nfrom urllib.parse import urlparse, parse_qs, urldefrag\nfrom urllib.error import HTTPError\nimport sys\nfrom pathlib import Path\nimport os, shutil\nimport inspect\nfrom datetime import datetime\nimport re\nfrom utils import loadurlmap, load_map_from_json, URLResolver, PostBuilder\ncwd = Path.cwd()\ncontentdir = cwd / 'content'\nblogdir = Path(os.environ['HUGO_BLOG_OUTDIR'])\nmediadir = Path('D:\\\\temp\\\\roy_mtg-twitter\\\\tweet_media')\nretweetscache = load_map_from_json('d:\\\\temp\\\\twitter\\\\retweets.json')\nresolver = URLResolver()\n\n\ndef loadurlmap(cleanupdupes=False):\n blogdir = Path(os.environ['HUGO_BLOG_OUTDIR'])\n urlmapfile = blogdir / 'urlmap.json'\n urlmap = {}\n urlmapdupes = {}\n with urlmapfile.open(encoding='UTF-8') as f:\n tempurlmap = json.loads(f.read())\n for u in tempurlmap:\n u1 = tempurlmap[u]\n if 'syndicated' in u1:\n for s in u1['syndicated']:\n if 'url' in s:\n su = s['url']\n if su in urlmap:\n if su not in urlmapdupes:\n urlmapdupes[su] = [u1, urlmap[su]]\n else:\n urlmapdupes[su].append(u1)\n else:\n urlmap[su] = u1\n urlmap[u] = u1\n title = u1.get('title', '').strip()\n if len(title) > 0:\n urlmap[title] = u1\n if cleanupdupes:\n for su in urlmapdupes:\n dupes = urlmapdupes[su]\n canonical = None\n for_deletion = []\n for d in dupes:\n if d['source_path'].startswith('post') or d['source_path'\n ].startswith('links') or len(d['syndicated']) > 2:\n if canonical is not None:\n print(\n '\\n\\r##### WTH. More than one canonical urls were detected for %s'\n % su)\n print(json.dumps(dupes, indent=4))\n canonical = d\n else:\n for_deletion.append(d)\n if canonical is None:\n print(\n '##### Dupes were detected for %s but no canonical url found!'\n % su)\n print(dupes)\n else:\n urlmap[su] = canonical\n for d in for_deletion:\n source_path = Path(d['source_path'])\n full_path = contentdir / source_path\n if full_path.exists():\n os.remove(str(full_path))\n return urlmap\n\n\nurlmap = loadurlmap(False)\n\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n\ndef add_syndication(mdfile, url, stype):\n with mdfile.open(encoding='UTF-8') as f:\n try:\n post = frontmatter.load(f)\n except:\n print('Error parsing file')\n return\n if post.get('syndicated') == None:\n post['syndicated'] = []\n else:\n for s in post['syndicated']:\n if s['type'] == stype and s['url'] == url:\n return\n post['syndicated'].append({'type': stype, 'url': url})\n newfile = frontmatter.dumps(post)\n with mdfile.open('w', encoding='UTF-8') as w:\n w.write(newfile)\n\n\ndef get_content(t):\n content = t['full_text']\n if 'entities' in t:\n raw_urls = re.findall(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n , content)\n for m in t['entities']['user_mentions']:\n screen_name = m['screen_name']\n mdlink = '[@%s](https://twitter.com/%s/)' % (screen_name,\n screen_name)\n content = content.replace('@' + screen_name, mdlink)\n processed_urls = []\n for u in t['entities']['urls']:\n url = u['url']\n processed_urls.append(url)\n expanded_url = u['expanded_url']\n processed_urls.append(expanded_url)\n expanded_url, no_errors = resolver.get_final_url(expanded_url)\n processed_urls.append(expanded_url)\n content = content.replace(url, expanded_url)\n for raw_url in raw_urls:\n if raw_url not in processed_urls:\n expanded_url, no_errors = resolver.get_final_url(raw_url)\n content = content.replace(raw_url, expanded_url)\n return content\n\n\ndef create_post(t):\n id = t['id_str']\n d = datetime.strptime(t['created_at'], '%a %b %d %H:%M:%S %z %Y')\n content = get_content(t)\n post = frontmatter.Post(content)\n post['date'] = d\n post['syndicated'] = [{'type': 'twitter', 'url': \n 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME, t['id'])}]\n kind = 'notes'\n if 'in_reply_to_status_id_str' in t and 'in_reply_to_screen_name' in t:\n kind = 'replies'\n post['reply_to'] = {'type': 'twitter', 'url': \n 'https://twitter.com/%s/statuses/%s/' % (t[\n 'in_reply_to_screen_name'], t['in_reply_to_status_id_str']),\n 'name': t['in_reply_to_screen_name'], 'label': \"%s's tweet\" % t\n ['in_reply_to_screen_name']}\n elif t['full_text'].startswith('RT @'):\n rc = retweetscache.get(id)\n if rc is None:\n pass\n elif 'retweeted_user' in rc:\n kind = 'reposts'\n post['repost_source'] = {'type': 'twitter', 'name': rc[\n 'retweeted_user'], 'url': \n 'https://twitter.com/%s/statuses/%s/' % (rc[\n 'retweeted_user'], rc['retweeted_id'])}\n else:\n pass\n media = []\n for m in t.get('extended_entities', {}).get('media', []):\n media.append(m['media_url_https'])\n if len(media) > 0:\n if kind != 'reposts' and kind != 'replies':\n kind = 'photos'\n tags = []\n for tag in t.get('entites', {}).get('hashtags', []):\n tags.append(tag['text'].lower())\n parsed_tags = re.findall('\\\\s#(\\\\w+)', ' ' + content)\n for tag in parsed_tags:\n if tag not in tags:\n tags.append(tag.lower())\n for tag in auto_tags:\n if tag not in tags:\n tags.append(tag)\n if len(tags) > 0:\n post['tags'] = tags\n post['source'] = 'twitter'\n outdir = contentdir / kind / d.strftime('%Y') / d.strftime('%m')\n if len(media) > 0:\n outdir = outdir / id\n if not outdir.exists():\n outdir.mkdir(parents=True)\n if len(media) > 0:\n outfile = outdir / 'index.md'\n for imgfile in mediadir.glob(id + '*.*'):\n to_file = outdir / imgfile.name\n shutil.copy(str(imgfile), str(to_file))\n else:\n outfile = outdir / (id + '.md')\n newfile = frontmatter.dumps(post)\n with outfile.open('w', encoding='UTF-8') as w:\n w.write(newfile)\n return True\n\n\ndef process_syn_url(d1, raw_url, url):\n orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME,\n d1['id_str'])\n url, no_errors = resolver.get_final_url(url)\n if not no_errors:\n print(d1['full_text'])\n url = url.replace('www.instagram.com', 'instagram.com')\n url = url.replace('/roytang0400', '')\n url = urldefrag(url)[0]\n if url.find('instagram.com') >= 0 and url.find('?') >= 0:\n url = url.split('?')[0]\n if url in urlmap:\n u = urlmap[url]\n source_path = Path(u['source_path'])\n full_path = contentdir / source_path\n add_syndication(full_path, orig_tweet_url, 'twitter')\n return True\n if url.find('://roytang.net') >= 0 or url.find('://mtgstorm.com') >= 0:\n link_url = urlparse(url)\n u = urlmap.get(link_url.path, None)\n if u is None:\n title_search_term = d1['full_text']\n title_search_term = title_search_term.replace('New blog post: ', ''\n )\n title_search_term = title_search_term.replace('New post: ', '')\n title_search_term = title_search_term.replace(raw_url, '')\n title_search_term = title_search_term.strip()\n u = urlmap.get(title_search_term, None)\n if u is not None:\n source_path = Path(u['source_path'])\n full_path = contentdir / source_path\n add_syndication(full_path, orig_tweet_url, 'twitter')\n return True\n else:\n print('######## Unmatched roytang url: %s' % url)\n print(d1['full_text'])\n return True\n return False\n\n\ndef process_tweet(d1):\n orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (TWITTER_USERNAME,\n d1['id_str'])\n if orig_tweet_url in urlmap:\n og = urlmap.get(orig_tweet_url)\n if og['source_path'].startswith('post\\\\') or og['source_path'\n ].startswith('photos\\\\'):\n return True\n tweet_source = d1['source']\n for s in syndicated_sources:\n if tweet_source.find(s) >= 0:\n for u in d1.get('entities', {}).get('urls', []):\n raw_url = u['url']\n url = u['expanded_url']\n if process_syn_url(d1, raw_url, url):\n return True\n raw_urls = re.findall(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n , d1['full_text'])\n for raw_url in raw_urls:\n if process_syn_url(d1, raw_url, raw_url):\n return True\n break\n return create_post(d1)\n\n\ndef import_all():\n countbysource = {}\n replies = 0\n retweets = 0\n withmedia = 0\n raw = 0\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for d1 in d:\n if debug_id is not None and d1['id_str'] != debug_id:\n continue\n if process_tweet(d1):\n continue\n tweet_source = d1['source']\n if tweet_source not in countbysource:\n countbysource[tweet_source] = 1\n else:\n countbysource[tweet_source] = countbysource[tweet_source] + 1\n is_reply = False\n if ('in_reply_to_status_id_str' in d1 and \n 'in_reply_to_screen_name' in d1):\n replies = replies + 1\n is_reply = True\n is_retweet = False\n content = d1['full_text']\n if content.startswith('RT @'):\n retweets = retweets + 1\n is_retweet = True\n media = []\n if 'extended_entities' in d1:\n for m in d1['extended_entities']['media']:\n media.append(m['media_url_https'])\n if len(media) > 0:\n withmedia = withmedia + 1\n if not is_reply and not is_retweet and len(media) == 0:\n raw = raw + 1\n idx = idx + 1\n resolver.save_cache()\n for source in countbysource:\n print('countbysource: %s = %s' % (source, countbysource[source]))\n print('replies: %s' % replies)\n print('retweets: %s' % retweets)\n print('withmedia: %s' % withmedia)\n print('raw: %s' % raw)\n print('total: %s' % idx)\n\n\ndef thread_replies():\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n d = reversed(d)\n for d1 in d:\n is_reply = False\n if ('in_reply_to_status_id_str' in d1 and \n 'in_reply_to_screen_name' in d1):\n is_reply = True\n if not is_reply:\n continue\n id_str = d1['id_str']\n orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (\n TWITTER_USERNAME, id_str)\n if orig_tweet_url in urlmap:\n continue\n date = datetime.strptime(d1['created_at'],\n '%a %b %d %H:%M:%S %z %Y')\n if d1['in_reply_to_screen_name'] == TWITTER_USERNAME:\n replied_to_url = 'https://twitter.com/%s/statuses/%s/' % (d1\n ['in_reply_to_screen_name'], d1[\n 'in_reply_to_status_id_str'])\n info = urlmap[replied_to_url]\n source_path = Path(info['source_path'])\n full_path = contentdir / source_path\n if full_path.name == 'index.md':\n parentdir = full_path.parent\n else:\n parentdir = full_path.parent / full_path.stem\n if not parentdir.exists():\n parentdir.mkdir(parents=True)\n oldfile = full_path\n full_path = parentdir / 'index.md'\n shutil.move(str(oldfile), str(full_path))\n urlmap[replied_to_url]['source_path'] = str(full_path.\n relative_to(contentdir))\n with full_path.open(encoding='UTF-8') as f:\n try:\n post = frontmatter.load(f)\n except:\n print('Error parsing file')\n return\n post['syndicated'].append({'type': 'twitter', 'url':\n orig_tweet_url})\n content = get_content(d1)\n post.content = post.content + '\\n\\r' + content\n newfile = frontmatter.dumps(post)\n with full_path.open('w', encoding='UTF-8') as w:\n w.write(newfile)\n media = []\n for m in d1.get('extended_entities', {}).get('media', []):\n media.append(m['media_url_https'])\n for imgfile in mediadir.glob(d1['id_str'] + '*.*'):\n to_file = parentdir / imgfile.name\n shutil.copy(str(imgfile), str(to_file))\n oldfile = contentdir / 'replies' / date.strftime('%Y'\n ) / date.strftime('%m') / (id_str + '.md')\n if oldfile.exists():\n os.remove(str(oldfile))\n oldfolder = contentdir / 'replies' / date.strftime('%Y'\n ) / date.strftime('%m') / id_str\n if oldfolder.exists():\n shutil.rmtree(str(oldfolder))\n urlmap[orig_tweet_url] = info\n else:\n continue\n idx = idx + 1\n print(idx)\n\n\nfrom utils import urlmap_to_mdfile\n\n\ndef cleanup_videos():\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for d1 in d:\n orig_tweet_url = 'https://twitter.com/%s/statuses/%s/' % (\n TWITTER_USERNAME, d1['id_str'])\n info = urlmap.get(orig_tweet_url)\n if info is None:\n continue\n for m in d1.get('extended_entities', {}).get('media', []):\n if 'video_info' in m:\n videos = []\n lowest_bitrate = 1000000000000\n lowest_video = ''\n for vi in m['video_info']['variants']:\n if 'bitrate' in vi:\n videos.append(vi['url'])\n bitrate = int(vi['bitrate'])\n if bitrate < lowest_bitrate:\n lowest_video = vi['url']\n lowest_bitrate = bitrate\n mdfile = urlmap_to_mdfile(info)\n if str(mdfile).find('\\\\photos\\\\') >= 0:\n print(mdfile)\n p = PostBuilder.from_mdfile(mdfile)\n p.kind = 'notes'\n p.save()\n container = mdfile.parent\n for f in container.iterdir():\n os.remove(str(f))\n container.rmdir()\n continue\n for v in videos:\n if v == lowest_video:\n continue\n name = Path(v).name\n if name.find('?') >= 0:\n name = name.split('?')[0]\n vfilename = d1['id_str'] + '-' + name\n vfile = container / vfilename\n print(vfile)\n os.remove(str(vfile))\n\n\ndef stats():\n countbysource = {}\n replies = 0\n retweets = 0\n withmedia = 0\n raw = 0\n count_by_year = {}\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for t in d:\n dt = datetime.strptime(t['created_at'], '%a %b %d %H:%M:%S %z %Y')\n count_by_year[dt.year] = count_by_year.get(dt.year, 0) + 1\n print(json.dumps(count_by_year, indent=2))\n\n\nstats()\n",
"step-5": "SOURCE_FILE = \"D:\\\\temp\\\\twitter\\\\tweet.js\"\nTWITTER_USERNAME = 'roytang'\nauto_tags = [\"mtg\"]\nsyndicated_sources = [\"IFTTT\", \"Tumblr\", \"instagram.com\", \"Mailchimp\", \"Twitter Web\", \"TweetDeck\", \"mtgstorm\"]\ndebug_id = None\n# debug_id = \"11143081155\" \n\nimport frontmatter\nimport json\nimport requests\nimport urllib.request\nfrom urllib.parse import urlparse, parse_qs, urldefrag\nfrom urllib.error import HTTPError\nimport sys\nfrom pathlib import Path\nimport os, shutil\nimport inspect\nfrom datetime import datetime\nimport re\nfrom utils import loadurlmap, load_map_from_json, URLResolver, PostBuilder\n\ncwd = Path.cwd()\ncontentdir = cwd / \"content\"\nblogdir = Path(os.environ['HUGO_BLOG_OUTDIR'])\nmediadir = Path(\"D:\\\\temp\\\\roy_mtg-twitter\\\\tweet_media\")\n\nretweetscache = load_map_from_json(\"d:\\\\temp\\\\twitter\\\\retweets.json\")\n\nresolver = URLResolver()\n\ndef loadurlmap(cleanupdupes=False):\n blogdir = Path(os.environ['HUGO_BLOG_OUTDIR'])\n urlmapfile = blogdir / \"urlmap.json\"\n urlmap = {}\n urlmapdupes = {}\n with urlmapfile.open(encoding=\"UTF-8\") as f:\n tempurlmap = json.loads(f.read())\n for u in tempurlmap:\n u1 = tempurlmap[u]\n if \"syndicated\" in u1:\n for s in u1['syndicated']:\n if 'url' in s:\n su = s['url']\n if su in urlmap:\n # we expect syndicated urls to be unique, \n # so if it's already in the map,\n # it must be a dupe\n # (This is really just to clean up my own mess!)\n if su not in urlmapdupes:\n urlmapdupes[su] = [u1, urlmap[su]]\n else:\n urlmapdupes[su].append(u1)\n else:\n urlmap[su] = u1\n urlmap[u] = u1\n title = u1.get(\"title\", \"\").strip()\n if len(title) > 0:\n urlmap[title] = u1\n if cleanupdupes:\n # clean up any found dupes by syndicated url\n for su in urlmapdupes:\n dupes = urlmapdupes[su]\n canonical = None\n for_deletion = []\n for d in dupes:\n if d[\"source_path\"].startswith(\"post\") or d[\"source_path\"].startswith(\"links\") or len(d['syndicated']) > 2:\n if canonical is not None:\n print(\"\\n\\r##### WTH. More than one canonical urls were detected for %s\" % (su))\n print(json.dumps(dupes, indent=4))\n canonical = d\n else:\n for_deletion.append(d)\n\n if canonical is None:\n print(\"##### Dupes were detected for %s but no canonical url found!\" % (su))\n print(dupes)\n else:\n urlmap[su] = canonical\n for d in for_deletion:\n source_path = Path(d['source_path'])\n full_path = contentdir / source_path\n if full_path.exists():\n os.remove(str(full_path))\n return urlmap\n\nurlmap = loadurlmap(False)\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\ndef add_syndication(mdfile, url, stype):\n with mdfile.open(encoding=\"UTF-8\") as f:\n try:\n post = frontmatter.load(f)\n except:\n print(\"Error parsing file\")\n return\n\n if post.get('syndicated') == None:\n post['syndicated'] = []\n else:\n for s in post['syndicated']:\n if s[\"type\"] == stype and s[\"url\"] == url:\n # dont add a duplicate!\n return\n\n post['syndicated'].append({\n 'type': stype,\n 'url': url\n })\n newfile = frontmatter.dumps(post)\n with mdfile.open(\"w\", encoding=\"UTF-8\") as w:\n w.write(newfile)\n \ndef get_content(t):\n content = t['full_text']\n if \"entities\" in t:\n # get raw urls in the text\n raw_urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', content)\n # replace mentions with link\n for m in t[\"entities\"][\"user_mentions\"]:\n screen_name = m[\"screen_name\"]\n # replace with markdown link\n mdlink = \"[@%s](https://twitter.com/%s/)\" % (screen_name, screen_name)\n content = content.replace(\"@\"+screen_name, mdlink)\n processed_urls = []\n # clean urls\n for u in t[\"entities\"][\"urls\"]:\n url = u[\"url\"]\n processed_urls.append(url)\n expanded_url = u[\"expanded_url\"]\n processed_urls.append(expanded_url)\n # print(\"##### A URL!!! %s\" % expanded_url)\n expanded_url, no_errors = resolver.get_final_url(expanded_url)\n processed_urls.append(expanded_url)\n content = content.replace(url, expanded_url)\n\n # find urls that were not in the entities\n for raw_url in raw_urls:\n if raw_url not in processed_urls:\n expanded_url, no_errors = resolver.get_final_url(raw_url)\n content = content.replace(raw_url, expanded_url)\n\n return content\n\ndef create_post(t):\n id = t['id_str']\n d = datetime.strptime(t['created_at'], \"%a %b %d %H:%M:%S %z %Y\")\n\n content = get_content(t)\n post = frontmatter.Post(content)\n post['date'] = d\n post['syndicated'] = [\n {\n \"type\": \"twitter\",\n \"url\": \"https://twitter.com/%s/statuses/%s/\" % (TWITTER_USERNAME, t['id'])\n }\n ]\n\n kind = \"notes\"\n if \"in_reply_to_status_id_str\" in t and \"in_reply_to_screen_name\" in t:\n kind = \"replies\"\n post[\"reply_to\"] = {\n \"type\": \"twitter\",\n \"url\": \"https://twitter.com/%s/statuses/%s/\" % (t['in_reply_to_screen_name'], t['in_reply_to_status_id_str']),\n \"name\": t[\"in_reply_to_screen_name\"],\n \"label\": \"%s's tweet\" % (t[\"in_reply_to_screen_name\"]) \n }\n elif t[\"full_text\"].startswith(\"RT @\"):\n rc = retweetscache.get(id)\n if rc is None:\n # RTed status is inaccessible, we'll just render it as an ordinary note\n pass\n else:\n if \"retweeted_user\" in rc:\n kind = \"reposts\"\n post['repost_source'] = {\n \"type\": \"twitter\",\n \"name\": rc[\"retweeted_user\"],\n \"url\": \"https://twitter.com/%s/statuses/%s/\" % (rc['retweeted_user'], rc['retweeted_id'])\n } \n # dont process reposts for now\n # return False\n else:\n # 785744070027030528 fails this\n # RTed status is inaccessible, we'll just render it as an ordinary note\n pass\n\n # else:\n # # dont process others for now\n # return False\n\n media = []\n for m in t.get(\"extended_entities\", {}).get(\"media\", []):\n media.append(m[\"media_url_https\"])\n if len(media) > 0:\n if kind != \"reposts\" and kind != \"replies\":\n kind = \"photos\" \n \n # dont process media for now\n # return False\n\n tags = []\n for tag in t.get('entites', {}).get('hashtags', []):\n tags.append(tag['text'].lower())\n\n parsed_tags = re.findall(r\"\\s#(\\w+)\", \" \" + content)\n for tag in parsed_tags:\n if tag not in tags:\n tags.append(tag.lower())\n\n for tag in auto_tags:\n if tag not in tags:\n tags.append(tag)\n if len(tags) > 0:\n post[\"tags\"] = tags\n\n post[\"source\"] = \"twitter\"\n outdir = contentdir / kind / d.strftime(\"%Y\") / d.strftime(\"%m\")\n if len(media) > 0:\n outdir = outdir / (id)\n\n if not outdir.exists():\n outdir.mkdir(parents=True)\n\n if len(media) > 0:\n outfile = outdir / ( \"index.md\" )\n # find photos\n for imgfile in mediadir.glob(id + \"*.*\"):\n to_file = outdir / imgfile.name\n shutil.copy(str(imgfile), str(to_file)) \n else:\n outfile = outdir / ( id + \".md\" )\n\n newfile = frontmatter.dumps(post)\n with outfile.open(\"w\", encoding=\"UTF-8\") as w:\n w.write(newfile)\n return True\n\ndef process_syn_url(d1, raw_url, url):\n orig_tweet_url = \"https://twitter.com/%s/statuses/%s/\" % (TWITTER_USERNAME, d1['id_str'])\n\n url, no_errors = resolver.get_final_url(url)\n if not no_errors:\n print(d1[\"full_text\"])\n\n url = url.replace(\"www.instagram.com\", \"instagram.com\")\n url = url.replace(\"/roytang0400\", \"\")\n url = urldefrag(url)[0]\n if url.find(\"instagram.com\") >= 0 and url.find(\"?\") >= 0:\n # remove utm and other misc query params from insta urls\n url = url.split(\"?\")[0]\n if url in urlmap:\n u = urlmap[url]\n source_path = Path(u['source_path'])\n full_path = contentdir / source_path\n add_syndication(full_path, orig_tweet_url, \"twitter\")\n return True\n\n if url.find(\"://roytang.net\") >= 0 or url.find(\"://mtgstorm.com\") >= 0:\n link_url = urlparse(url)\n u = urlmap.get(link_url.path, None)\n if u is None:\n # try matching by title\n title_search_term = d1[\"full_text\"]\n title_search_term = title_search_term.replace(\"New blog post: \", \"\")\n title_search_term = title_search_term.replace(\"New post: \", \"\")\n title_search_term = title_search_term.replace(raw_url, \"\")\n title_search_term = title_search_term.strip()\n u = urlmap.get(title_search_term, None)\n if u is not None:\n source_path = Path(u['source_path'])\n full_path = contentdir / source_path\n add_syndication(full_path, orig_tweet_url, \"twitter\")\n return True\n else:\n print(\"######## Unmatched roytang url: %s\" % (url))\n print(d1[\"full_text\"])\n return True\n\n return False\n\ndef process_tweet(d1):\n\n orig_tweet_url = \"https://twitter.com/%s/statuses/%s/\" % (TWITTER_USERNAME, d1['id_str'])\n\n if orig_tweet_url in urlmap:\n og = urlmap.get(orig_tweet_url)\n if og['source_path'].startswith('post\\\\') or og['source_path'].startswith('photos\\\\'):\n # no need to process further any tweets that are already mapped to a post\n return True\n\n tweet_source = d1[\"source\"]\n # print(\"#### %s: %s\" % (tweet_source, orig_tweet_url))\n # detect content syndicated from elsewhere\n # instagram, tumblr, roytang.net\n for s in syndicated_sources:\n if tweet_source.find(s) >= 0:\n for u in d1.get('entities', {}).get(\"urls\", []):\n raw_url = u[\"url\"]\n url = u[\"expanded_url\"]\n if process_syn_url(d1, raw_url, url):\n return True\n # print(\"######## URL = %s\" % (url))\n\n # also process raw urls\n raw_urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', d1[\"full_text\"])\n for raw_url in raw_urls:\n if process_syn_url(d1, raw_url, raw_url):\n return True\n break\n\n return create_post(d1)\n\ndef import_all():\n countbysource = {}\n replies = 0\n retweets = 0\n withmedia = 0\n raw = 0\n\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for d1 in d:\n if debug_id is not None and d1[\"id_str\"] != debug_id:\n continue\n\n if process_tweet(d1):\n continue\n\n tweet_source = d1[\"source\"]\n if tweet_source not in countbysource:\n countbysource[tweet_source] = 1\n else:\n countbysource[tweet_source] = countbysource[tweet_source] + 1\n\n is_reply = False\n if \"in_reply_to_status_id_str\" in d1 and \"in_reply_to_screen_name\" in d1:\n replies = replies + 1\n is_reply = True\n\n # handle retweet\n is_retweet = False\n content = d1[\"full_text\"]\n if content.startswith(\"RT @\"):\n retweets = retweets + 1\n is_retweet = True\n\n media = []\n if \"extended_entities\" in d1:\n for m in d1[\"extended_entities\"][\"media\"]:\n media.append(m[\"media_url_https\"])\n\n if len(media) > 0:\n withmedia = withmedia + 1\n\n if not is_reply and not is_retweet and len(media) == 0:\n raw = raw + 1\n\n idx = idx + 1\n # if idx > 100:\n # break\n\n # save the url cache for future use\n resolver.save_cache()\n\n for source in countbysource:\n print(\"countbysource: %s = %s\" % (source, countbysource[source]))\n print(\"replies: %s\" % (replies))\n print(\"retweets: %s\" % (retweets))\n print(\"withmedia: %s\" % (withmedia))\n print(\"raw: %s\" % (raw))\n print(\"total: %s\" % (idx))\n\ndef thread_replies():\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n # process in reverse order so tweet sequences are in order\n d = reversed(d)\n for d1 in d:\n is_reply = False\n if \"in_reply_to_status_id_str\" in d1 and \"in_reply_to_screen_name\" in d1:\n is_reply = True\n if not is_reply:\n continue\n id_str = d1['id_str']\n # if id_str != \"602009895437737984\" and id_str != \"602009747294924802\":\n # continue\n orig_tweet_url = \"https://twitter.com/%s/statuses/%s/\" % (TWITTER_USERNAME, id_str)\n # dont bother if already syndicated\n if orig_tweet_url in urlmap:\n continue\n date = datetime.strptime(d1['created_at'], \"%a %b %d %H:%M:%S %z %Y\")\n # process replies to myself\n if d1[\"in_reply_to_screen_name\"] == TWITTER_USERNAME:\n replied_to_url = \"https://twitter.com/%s/statuses/%s/\" % (d1['in_reply_to_screen_name'], d1['in_reply_to_status_id_str'])\n info = urlmap[replied_to_url]\n source_path = Path(info['source_path'])\n full_path = contentdir / source_path\n # welp, we might as well move them to bundles\n if full_path.name == \"index.md\":\n parentdir = full_path.parent\n else:\n parentdir = full_path.parent / full_path.stem\n if not parentdir.exists():\n parentdir.mkdir(parents=True)\n oldfile = full_path\n full_path = parentdir / \"index.md\"\n shutil.move(str(oldfile), str(full_path))\n # also update the urlmap!\n urlmap[replied_to_url]['source_path'] = str(full_path.relative_to(contentdir))\n # append the reply to the original post, and add it as a syndication as well\n with full_path.open(encoding=\"UTF-8\") as f:\n try:\n post = frontmatter.load(f)\n except:\n print(\"Error parsing file\")\n return\n post['syndicated'].append({\n 'type': 'twitter',\n 'url': orig_tweet_url\n })\n content = get_content(d1)\n post.content = post.content + \"\\n\\r\" + content\n newfile = frontmatter.dumps(post)\n with full_path.open(\"w\", encoding=\"UTF-8\") as w:\n w.write(newfile)\n # copy over any media from the reply as well\n media = []\n for m in d1.get(\"extended_entities\", {}).get(\"media\", []):\n media.append(m[\"media_url_https\"])\n for imgfile in mediadir.glob(d1[\"id_str\"] + \"*.*\"):\n to_file = parentdir / imgfile.name\n shutil.copy(str(imgfile), str(to_file)) \n # delete any existing file created for this reply\n oldfile = contentdir / \"replies\" / date.strftime(\"%Y\") / date.strftime(\"%m\") / (id_str + \".md\")\n if oldfile.exists():\n os.remove(str(oldfile))\n oldfolder = contentdir / \"replies\" / date.strftime(\"%Y\") / date.strftime(\"%m\") / (id_str)\n if oldfolder.exists():\n shutil.rmtree(str(oldfolder))\n # replace this entry in the urlmap! this is so that succeeding replies can find the correct root tweet to attach to\n urlmap[orig_tweet_url] = info\n else:\n continue\n\n idx = idx + 1\n print(idx)\n\nfrom utils import urlmap_to_mdfile\n\ndef cleanup_videos():\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for d1 in d:\n orig_tweet_url = \"https://twitter.com/%s/statuses/%s/\" % (TWITTER_USERNAME, d1[\"id_str\"])\n info = urlmap.get(orig_tweet_url)\n if info is None:\n continue\n for m in d1.get(\"extended_entities\", {}).get(\"media\", []):\n if \"video_info\" in m:\n videos = []\n lowest_bitrate = 1000000000000\n lowest_video = \"\"\n for vi in m[\"video_info\"][\"variants\"]:\n if 'bitrate' in vi:\n videos.append(vi[\"url\"])\n bitrate = int(vi['bitrate'])\n if bitrate < lowest_bitrate:\n lowest_video = vi[\"url\"]\n lowest_bitrate = bitrate\n \n mdfile = urlmap_to_mdfile(info)\n if str(mdfile).find(\"\\\\photos\\\\\") >= 0:\n print(mdfile)\n # move it to notes, since it's not a photo\n p = PostBuilder.from_mdfile(mdfile)\n p.kind = \"notes\"\n p.save() \n # delete the old files\n container = mdfile.parent\n for f in container.iterdir():\n os.remove(str(f))\n container.rmdir()\n continue\n # delete all the video files except for the one with the lowest bitrate\n for v in videos:\n if v == lowest_video:\n continue\n name = Path(v).name\n if name.find(\"?\") >= 0:\n name = name.split(\"?\")[0]\n vfilename = d1[\"id_str\"] + \"-\" + name\n vfile = container / vfilename\n print(vfile)\n os.remove(str(vfile))\n\ndef stats():\n countbysource = {}\n replies = 0\n retweets = 0\n withmedia = 0\n raw = 0\n\n count_by_year = {}\n with Path(SOURCE_FILE).open(encoding='utf-8') as f:\n d = json.load(f)\n idx = 0\n for t in d:\n dt = datetime.strptime(t['created_at'], \"%a %b %d %H:%M:%S %z %Y\")\n count_by_year[dt.year] = count_by_year.get(dt.year, 0) + 1\n print(json.dumps(count_by_year, indent=2))\n\n\n# thread_replies()\n# import_all()\n# cleanup_videos()\nstats()\n",
"step-ids": [
3,
8,
11,
14,
15
]
}
|
[
3,
8,
11,
14,
15
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.