code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
import visgraph.dbcore as vg_dbcore
dbinfo = {
'user':'visgraph',
'password':'ohhai!',
'database':'vg_test',
}
def vgtest_basic_database():
#vg_dbcore.initGraphDb(dbinfo)
gstore = vg_dbcore.DbGraphStore(dbinfo)
n1 = gstore.addNode(ninfo={'name':'foo', 'size':20})
n2 = gstore.addNode(ninfo={'name':'bar', 'size':300})
n3 = gstore.addNode(ninfo={'name':'baz'})
n4 = gstore.addNode(ninfo={'name':'faz'})
n5 = gstore.addNode(ninfo={'name':'yer'})
n6 = gstore.addNode(ninfo={'name':'mom'})
gstore.addEdge(n3, n4)
gstore.addEdge(n4, n5)
gstore.addEdge(n5, n6)
print gstore.getNodeInfo(n1, 'name')
print gstore.getNodeInfo(n1, 'size')
print gstore.getNodeInfo(n1, 'owoot', 20)
eid = gstore.addEdge(n1, n2, einfo={'etype':'FooEdge'})
print eid
gstore.setEdgeInfo(eid, 'name', 'asdf')
gstore.setEdgeInfo(eid, 'size', 20)
print gstore.getEdgeInfo(eid, 'size')
sg = gstore.buildSubGraph()
sg.useEdges(size=20)
#n3 = sg.addNode(ninfo={'name':'Tom Jones'})
#sg.addEdge(n2, n3, einfo={'etype':'FBFriend'})
#print sg.getRefsFrom(n2)
for eid, fromid, toid, einfo in sg.getRefsFrom(n2):
print 'NAMES: %s -> %s' % (sg.getNodeInfo(fromid, 'name', 'unknown'), sg.getNodeInfo(toid, 'name', 'unknown'))
sg.expandNode(n3, maxdepth=1)
|
normal
|
{
"blob_id": "ffee0b0e00b4cebecefc3671332af3e2ffe7491b",
"index": 8155,
"step-1": "import visgraph.dbcore as vg_dbcore\n\ndbinfo = {\n'user':'visgraph',\n'password':'ohhai!',\n'database':'vg_test',\n}\n\ndef vgtest_basic_database():\n#vg_dbcore.initGraphDb(dbinfo)\n\n gstore = vg_dbcore.DbGraphStore(dbinfo)\n\n n1 = gstore.addNode(ninfo={'name':'foo', 'size':20})\n n2 = gstore.addNode(ninfo={'name':'bar', 'size':300})\n n3 = gstore.addNode(ninfo={'name':'baz'})\n n4 = gstore.addNode(ninfo={'name':'faz'})\n n5 = gstore.addNode(ninfo={'name':'yer'})\n n6 = gstore.addNode(ninfo={'name':'mom'})\n\n gstore.addEdge(n3, n4)\n gstore.addEdge(n4, n5)\n gstore.addEdge(n5, n6)\n\n print gstore.getNodeInfo(n1, 'name')\n print gstore.getNodeInfo(n1, 'size')\n print gstore.getNodeInfo(n1, 'owoot', 20)\n\n eid = gstore.addEdge(n1, n2, einfo={'etype':'FooEdge'})\n print eid\n gstore.setEdgeInfo(eid, 'name', 'asdf')\n gstore.setEdgeInfo(eid, 'size', 20)\n print gstore.getEdgeInfo(eid, 'size')\n\n sg = gstore.buildSubGraph()\n\n sg.useEdges(size=20)\n #n3 = sg.addNode(ninfo={'name':'Tom Jones'})\n #sg.addEdge(n2, n3, einfo={'etype':'FBFriend'})\n\n #print sg.getRefsFrom(n2)\n\n for eid, fromid, toid, einfo in sg.getRefsFrom(n2):\n print 'NAMES: %s -> %s' % (sg.getNodeInfo(fromid, 'name', 'unknown'), sg.getNodeInfo(toid, 'name', 'unknown'))\n\n sg.expandNode(n3, maxdepth=1)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
'''
Created on Jan 19, 2014
@author: felix
'''
import sys
from PyPDF2 import PdfFileReader
from pytagcloud import create_tag_image, make_tags, LAYOUT_HORIZONTAL
from pytagcloud.lang.counter import get_tag_counts
def main():
for i in range(0, len(sys.argv)):
if (sys.argv[i] == '-f'):
try:
content = getPDFContent(sys.argv[i+1])
except:
raise RuntimeError('Something went wrong! Usage: makeCloudFromPdf -f inputfile.pdf')
tags = make_tags(get_tag_counts(content)[1:100], maxsize=100)
create_tag_image(tags, 'cloud_large2.png', size=(1920, 1080), background=(0, 0, 0, 255), layout=LAYOUT_HORIZONTAL, fontname='Vollkorn')
def getPDFContent(path):
content = ""
p = file(path, "rb")
pdf = PdfFileReader(p)
numPages = pdf.getNumPages()
print 'pages:', numPages
for i in range(0, numPages-1):
try:
content += pdf.getPage(i).extractText() + "\n"
except:
content += ""
#content = " ".join(content.replace(u"\xa0", " ").strip().split())
return content
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "899cdb5cbdbd0a57af76a5044d54e1fe2a497847",
"index": 7144,
"step-1": "'''\nCreated on Jan 19, 2014\n\n@author: felix\n'''\nimport sys\nfrom PyPDF2 import PdfFileReader\nfrom pytagcloud import create_tag_image, make_tags, LAYOUT_HORIZONTAL\nfrom pytagcloud.lang.counter import get_tag_counts\n\ndef main():\n for i in range(0, len(sys.argv)):\n if (sys.argv[i] == '-f'):\n try:\n content = getPDFContent(sys.argv[i+1])\n except:\n raise RuntimeError('Something went wrong! Usage: makeCloudFromPdf -f inputfile.pdf') \n tags = make_tags(get_tag_counts(content)[1:100], maxsize=100)\n create_tag_image(tags, 'cloud_large2.png', size=(1920, 1080), background=(0, 0, 0, 255), layout=LAYOUT_HORIZONTAL, fontname='Vollkorn')\n \ndef getPDFContent(path):\n content = \"\"\n p = file(path, \"rb\")\n pdf = PdfFileReader(p)\n numPages = pdf.getNumPages()\n print 'pages:', numPages\n for i in range(0, numPages-1):\n try:\n content += pdf.getPage(i).extractText() + \"\\n\"\n except:\n content += \"\"\n #content = \" \".join(content.replace(u\"\\xa0\", \" \").strip().split()) \n return content \n \nif __name__ == \"__main__\":\n main()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python
##############
#### Your name: Alexis Vincent
##############
import numpy as np
import re
from skimage.color import convert_colorspace
from sklearn.model_selection import GridSearchCV
from sklearn import svm, metrics
from skimage import io, feature, filters, exposure, color
from skimage.feature import hog
import matplotlib.pyplot as plt
class ImageClassifier:
def __init__(self):
self.classifer = None
def imread_convert(self, f):
return io.imread(f).astype(np.uint8)
def load_data_from_folder(self, dir):
# read all images into an image collection
ic = io.ImageCollection(dir + "*.jpg", load_func=self.imread_convert)
# create one large array of image data
data = io.concatenate_images(ic)
# extract labels from image names
labels = np.array(ic.files)
for i, f in enumerate(labels):
m = re.search("_", f)
labels[i] = f[len(dir):m.start()]
return (data, labels)
def extract_image_features(self, data):
# Please do not modify the header above
# extract feature vector from image data
fd = None
for pic in data:
#grey_picture = color.rgb2gray(pic)
#gaussian_picture = filters.gaussian(pic, 1)
rescaled_picture = exposure.rescale_intensity(pic)
feature_data = hog(rescaled_picture,
orientations=11,
#pixels_per_cell=(32, 32),
pixels_per_cell=(20, 20),
cells_per_block=(6, 6),
# transform_sqrt=True,
feature_vector=True,
block_norm='L2-Hys')
# self.print_hog_pics(color.rgb2gray(gaussian_picture))
if fd is None:
fd = feature_data.reshape(1, feature_data.shape[0])
else:
fd = np.concatenate([fd, feature_data.reshape(1, feature_data.shape[0])])
# Please do not modify the return type below
return fd
def train_classifier(self, train_data, train_labels):
# Please do not modify the header above
# train model and save the trained model to self.classifier
clf = svm.SVC(C=1, gamma=0.001, kernel='linear')
self.classifer = clf.fit(train_data, train_labels)
def predict_labels(self, data):
# Please do not modify the header
# predict labels of test data using trained model in self.classifier
# the code below expects output to be stored in predicted_labels
predicted_labels = self.classifer.predict(data)
# Please do not modify the return type below
return predicted_labels
def print_hog_pics(self, image):
#orientations=8, pixels_per_cell=(16, 16) cells_per_block=(1, 1), visualise=True
fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),
cells_per_block=(1, 1), visualise=True)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex='all', sharey='all')
ax1.axis('off')
ax1.imshow(image)
ax1.set_title('Input image')
ax1.set_adjustable('box-forced')
# Rescale histogram for better display
hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 10))
ax2.axis('off')
ax2.imshow(hog_image_rescaled)
ax2.set_title('Histogram of Oriented Gradients')
ax1.set_adjustable('box-forced')
plt.show()
def main():
img_clf = ImageClassifier()
# load images
(train_raw, train_labels) = img_clf.load_data_from_folder('./train/')
(test_raw, test_labels) = img_clf.load_data_from_folder('./test/')
# convert images into features
train_data = img_clf.extract_image_features(train_raw)
test_data = img_clf.extract_image_features(test_raw)
# train model and test on training data
img_clf.train_classifier(train_data, train_labels)
predicted_labels = img_clf.predict_labels(train_data)
print("\nTraining results")
print("=============================")
print("Confusion Matrix:\n", metrics.confusion_matrix(train_labels, predicted_labels))
print("Accuracy: ", metrics.accuracy_score(train_labels, predicted_labels))
print("F1 score: ", metrics.f1_score(train_labels, predicted_labels, average='micro'))
print(predicted_labels)
# test model
predicted_labels = img_clf.predict_labels(test_data)
print("\nTesting results")
print("=============================")
print("Confusion Matrix:\n", metrics.confusion_matrix(test_labels, predicted_labels))
print("Accuracy: ", metrics.accuracy_score(test_labels, predicted_labels))
print("F1 score: ", metrics.f1_score(test_labels, predicted_labels, average='micro'))
print(predicted_labels)
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "58204b4b035aa06015def7529852e882ffdd369a",
"index": 8997,
"step-1": "<mask token>\n\n\nclass ImageClassifier:\n <mask token>\n <mask token>\n <mask token>\n\n def extract_image_features(self, data):\n fd = None\n for pic in data:\n rescaled_picture = exposure.rescale_intensity(pic)\n feature_data = hog(rescaled_picture, orientations=11,\n pixels_per_cell=(20, 20), cells_per_block=(6, 6),\n feature_vector=True, block_norm='L2-Hys')\n if fd is None:\n fd = feature_data.reshape(1, feature_data.shape[0])\n else:\n fd = np.concatenate([fd, feature_data.reshape(1,\n feature_data.shape[0])])\n return fd\n\n def train_classifier(self, train_data, train_labels):\n clf = svm.SVC(C=1, gamma=0.001, kernel='linear')\n self.classifer = clf.fit(train_data, train_labels)\n\n def predict_labels(self, data):\n predicted_labels = self.classifer.predict(data)\n return predicted_labels\n\n def print_hog_pics(self, image):\n fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),\n cells_per_block=(1, 1), visualise=True)\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex='all',\n sharey='all')\n ax1.axis('off')\n ax1.imshow(image)\n ax1.set_title('Input image')\n ax1.set_adjustable('box-forced')\n hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range\n =(0, 10))\n ax2.axis('off')\n ax2.imshow(hog_image_rescaled)\n ax2.set_title('Histogram of Oriented Gradients')\n ax1.set_adjustable('box-forced')\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ImageClassifier:\n\n def __init__(self):\n self.classifer = None\n <mask token>\n\n def load_data_from_folder(self, dir):\n ic = io.ImageCollection(dir + '*.jpg', load_func=self.imread_convert)\n data = io.concatenate_images(ic)\n labels = np.array(ic.files)\n for i, f in enumerate(labels):\n m = re.search('_', f)\n labels[i] = f[len(dir):m.start()]\n return data, labels\n\n def extract_image_features(self, data):\n fd = None\n for pic in data:\n rescaled_picture = exposure.rescale_intensity(pic)\n feature_data = hog(rescaled_picture, orientations=11,\n pixels_per_cell=(20, 20), cells_per_block=(6, 6),\n feature_vector=True, block_norm='L2-Hys')\n if fd is None:\n fd = feature_data.reshape(1, feature_data.shape[0])\n else:\n fd = np.concatenate([fd, feature_data.reshape(1,\n feature_data.shape[0])])\n return fd\n\n def train_classifier(self, train_data, train_labels):\n clf = svm.SVC(C=1, gamma=0.001, kernel='linear')\n self.classifer = clf.fit(train_data, train_labels)\n\n def predict_labels(self, data):\n predicted_labels = self.classifer.predict(data)\n return predicted_labels\n\n def print_hog_pics(self, image):\n fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),\n cells_per_block=(1, 1), visualise=True)\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex='all',\n sharey='all')\n ax1.axis('off')\n ax1.imshow(image)\n ax1.set_title('Input image')\n ax1.set_adjustable('box-forced')\n hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range\n =(0, 10))\n ax2.axis('off')\n ax2.imshow(hog_image_rescaled)\n ax2.set_title('Histogram of Oriented Gradients')\n ax1.set_adjustable('box-forced')\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ImageClassifier:\n\n def __init__(self):\n self.classifer = None\n\n def imread_convert(self, f):\n return io.imread(f).astype(np.uint8)\n\n def load_data_from_folder(self, dir):\n ic = io.ImageCollection(dir + '*.jpg', load_func=self.imread_convert)\n data = io.concatenate_images(ic)\n labels = np.array(ic.files)\n for i, f in enumerate(labels):\n m = re.search('_', f)\n labels[i] = f[len(dir):m.start()]\n return data, labels\n\n def extract_image_features(self, data):\n fd = None\n for pic in data:\n rescaled_picture = exposure.rescale_intensity(pic)\n feature_data = hog(rescaled_picture, orientations=11,\n pixels_per_cell=(20, 20), cells_per_block=(6, 6),\n feature_vector=True, block_norm='L2-Hys')\n if fd is None:\n fd = feature_data.reshape(1, feature_data.shape[0])\n else:\n fd = np.concatenate([fd, feature_data.reshape(1,\n feature_data.shape[0])])\n return fd\n\n def train_classifier(self, train_data, train_labels):\n clf = svm.SVC(C=1, gamma=0.001, kernel='linear')\n self.classifer = clf.fit(train_data, train_labels)\n\n def predict_labels(self, data):\n predicted_labels = self.classifer.predict(data)\n return predicted_labels\n\n def print_hog_pics(self, image):\n fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),\n cells_per_block=(1, 1), visualise=True)\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex='all',\n sharey='all')\n ax1.axis('off')\n ax1.imshow(image)\n ax1.set_title('Input image')\n ax1.set_adjustable('box-forced')\n hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range\n =(0, 10))\n ax2.axis('off')\n ax2.imshow(hog_image_rescaled)\n ax2.set_title('Histogram of Oriented Gradients')\n ax1.set_adjustable('box-forced')\n plt.show()\n\n\ndef main():\n img_clf = ImageClassifier()\n train_raw, train_labels = img_clf.load_data_from_folder('./train/')\n test_raw, test_labels = img_clf.load_data_from_folder('./test/')\n train_data = img_clf.extract_image_features(train_raw)\n test_data = img_clf.extract_image_features(test_raw)\n img_clf.train_classifier(train_data, train_labels)\n predicted_labels = img_clf.predict_labels(train_data)\n print('\\nTraining results')\n print('=============================')\n print('Confusion Matrix:\\n', metrics.confusion_matrix(train_labels,\n predicted_labels))\n print('Accuracy: ', metrics.accuracy_score(train_labels, predicted_labels))\n print('F1 score: ', metrics.f1_score(train_labels, predicted_labels,\n average='micro'))\n print(predicted_labels)\n predicted_labels = img_clf.predict_labels(test_data)\n print('\\nTesting results')\n print('=============================')\n print('Confusion Matrix:\\n', metrics.confusion_matrix(test_labels,\n predicted_labels))\n print('Accuracy: ', metrics.accuracy_score(test_labels, predicted_labels))\n print('F1 score: ', metrics.f1_score(test_labels, predicted_labels,\n average='micro'))\n print(predicted_labels)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ImageClassifier:\n\n def __init__(self):\n self.classifer = None\n\n def imread_convert(self, f):\n return io.imread(f).astype(np.uint8)\n\n def load_data_from_folder(self, dir):\n ic = io.ImageCollection(dir + '*.jpg', load_func=self.imread_convert)\n data = io.concatenate_images(ic)\n labels = np.array(ic.files)\n for i, f in enumerate(labels):\n m = re.search('_', f)\n labels[i] = f[len(dir):m.start()]\n return data, labels\n\n def extract_image_features(self, data):\n fd = None\n for pic in data:\n rescaled_picture = exposure.rescale_intensity(pic)\n feature_data = hog(rescaled_picture, orientations=11,\n pixels_per_cell=(20, 20), cells_per_block=(6, 6),\n feature_vector=True, block_norm='L2-Hys')\n if fd is None:\n fd = feature_data.reshape(1, feature_data.shape[0])\n else:\n fd = np.concatenate([fd, feature_data.reshape(1,\n feature_data.shape[0])])\n return fd\n\n def train_classifier(self, train_data, train_labels):\n clf = svm.SVC(C=1, gamma=0.001, kernel='linear')\n self.classifer = clf.fit(train_data, train_labels)\n\n def predict_labels(self, data):\n predicted_labels = self.classifer.predict(data)\n return predicted_labels\n\n def print_hog_pics(self, image):\n fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),\n cells_per_block=(1, 1), visualise=True)\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex='all',\n sharey='all')\n ax1.axis('off')\n ax1.imshow(image)\n ax1.set_title('Input image')\n ax1.set_adjustable('box-forced')\n hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range\n =(0, 10))\n ax2.axis('off')\n ax2.imshow(hog_image_rescaled)\n ax2.set_title('Histogram of Oriented Gradients')\n ax1.set_adjustable('box-forced')\n plt.show()\n\n\ndef main():\n img_clf = ImageClassifier()\n train_raw, train_labels = img_clf.load_data_from_folder('./train/')\n test_raw, test_labels = img_clf.load_data_from_folder('./test/')\n train_data = img_clf.extract_image_features(train_raw)\n test_data = img_clf.extract_image_features(test_raw)\n img_clf.train_classifier(train_data, train_labels)\n predicted_labels = img_clf.predict_labels(train_data)\n print('\\nTraining results')\n print('=============================')\n print('Confusion Matrix:\\n', metrics.confusion_matrix(train_labels,\n predicted_labels))\n print('Accuracy: ', metrics.accuracy_score(train_labels, predicted_labels))\n print('F1 score: ', metrics.f1_score(train_labels, predicted_labels,\n average='micro'))\n print(predicted_labels)\n predicted_labels = img_clf.predict_labels(test_data)\n print('\\nTesting results')\n print('=============================')\n print('Confusion Matrix:\\n', metrics.confusion_matrix(test_labels,\n predicted_labels))\n print('Accuracy: ', metrics.accuracy_score(test_labels, predicted_labels))\n print('F1 score: ', metrics.f1_score(test_labels, predicted_labels,\n average='micro'))\n print(predicted_labels)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\r\n\r\n\r\n\r\n##############\r\n\r\n#### Your name: Alexis Vincent\r\n\r\n##############\r\n\r\n\r\n\r\nimport numpy as np\r\n\r\nimport re\r\n\r\nfrom skimage.color import convert_colorspace\r\nfrom sklearn.model_selection import GridSearchCV\r\n\r\nfrom sklearn import svm, metrics\r\n\r\nfrom skimage import io, feature, filters, exposure, color\r\n\r\nfrom skimage.feature import hog\r\n\r\n\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\n\r\n\r\nclass ImageClassifier:\r\n\r\n def __init__(self):\r\n\r\n self.classifer = None\r\n\r\n\r\n\r\n def imread_convert(self, f):\r\n\r\n return io.imread(f).astype(np.uint8)\r\n\r\n\r\n\r\n def load_data_from_folder(self, dir):\r\n\r\n # read all images into an image collection\r\n\r\n ic = io.ImageCollection(dir + \"*.jpg\", load_func=self.imread_convert)\r\n\r\n\r\n\r\n # create one large array of image data\r\n\r\n data = io.concatenate_images(ic)\r\n\r\n # extract labels from image names\r\n\r\n labels = np.array(ic.files)\r\n\r\n for i, f in enumerate(labels):\r\n\r\n m = re.search(\"_\", f)\r\n\r\n labels[i] = f[len(dir):m.start()]\r\n\r\n\r\n\r\n return (data, labels)\r\n\r\n\r\n\r\n def extract_image_features(self, data):\r\n\r\n # Please do not modify the header above\r\n\r\n # extract feature vector from image data\r\n\r\n fd = None\r\n\r\n for pic in data:\r\n\r\n #grey_picture = color.rgb2gray(pic)\r\n\r\n #gaussian_picture = filters.gaussian(pic, 1)\r\n\r\n rescaled_picture = exposure.rescale_intensity(pic)\r\n\r\n\r\n\r\n feature_data = hog(rescaled_picture,\r\n\r\n orientations=11,\r\n\r\n #pixels_per_cell=(32, 32),\r\n pixels_per_cell=(20, 20),\r\n cells_per_block=(6, 6),\r\n\r\n # transform_sqrt=True,\r\n\r\n feature_vector=True,\r\n\r\n block_norm='L2-Hys')\r\n\r\n # self.print_hog_pics(color.rgb2gray(gaussian_picture))\r\n\r\n if fd is None:\r\n\r\n fd = feature_data.reshape(1, feature_data.shape[0])\r\n\r\n else:\r\n\r\n fd = np.concatenate([fd, feature_data.reshape(1, feature_data.shape[0])])\r\n\r\n # Please do not modify the return type below\r\n\r\n return fd\r\n\r\n\r\n\r\n def train_classifier(self, train_data, train_labels):\r\n\r\n # Please do not modify the header above\r\n\r\n # train model and save the trained model to self.classifier\r\n\r\n clf = svm.SVC(C=1, gamma=0.001, kernel='linear')\r\n\r\n self.classifer = clf.fit(train_data, train_labels)\r\n\r\n\r\n\r\n def predict_labels(self, data):\r\n\r\n # Please do not modify the header\r\n\r\n # predict labels of test data using trained model in self.classifier\r\n\r\n # the code below expects output to be stored in predicted_labels\r\n\r\n predicted_labels = self.classifer.predict(data)\r\n\r\n # Please do not modify the return type below\r\n\r\n return predicted_labels\r\n\r\n\r\n\r\n def print_hog_pics(self, image):\r\n #orientations=8, pixels_per_cell=(16, 16) cells_per_block=(1, 1), visualise=True\r\n fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),\r\n\r\n cells_per_block=(1, 1), visualise=True)\r\n\r\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex='all', sharey='all')\r\n\r\n\r\n\r\n ax1.axis('off')\r\n\r\n ax1.imshow(image)\r\n\r\n ax1.set_title('Input image')\r\n\r\n ax1.set_adjustable('box-forced')\r\n\r\n\r\n\r\n # Rescale histogram for better display\r\n\r\n hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 10))\r\n\r\n\r\n\r\n ax2.axis('off')\r\n\r\n ax2.imshow(hog_image_rescaled)\r\n\r\n ax2.set_title('Histogram of Oriented Gradients')\r\n\r\n ax1.set_adjustable('box-forced')\r\n\r\n plt.show()\r\n\r\n\r\n\r\n\r\n\r\ndef main():\r\n\r\n img_clf = ImageClassifier()\r\n\r\n\r\n\r\n # load images\r\n\r\n (train_raw, train_labels) = img_clf.load_data_from_folder('./train/')\r\n\r\n (test_raw, test_labels) = img_clf.load_data_from_folder('./test/')\r\n\r\n\r\n\r\n # convert images into features\r\n\r\n train_data = img_clf.extract_image_features(train_raw)\r\n\r\n test_data = img_clf.extract_image_features(test_raw)\r\n\r\n\r\n\r\n # train model and test on training data\r\n\r\n img_clf.train_classifier(train_data, train_labels)\r\n\r\n\r\n\r\n predicted_labels = img_clf.predict_labels(train_data)\r\n\r\n print(\"\\nTraining results\")\r\n\r\n print(\"=============================\")\r\n\r\n print(\"Confusion Matrix:\\n\", metrics.confusion_matrix(train_labels, predicted_labels))\r\n\r\n print(\"Accuracy: \", metrics.accuracy_score(train_labels, predicted_labels))\r\n\r\n print(\"F1 score: \", metrics.f1_score(train_labels, predicted_labels, average='micro'))\r\n print(predicted_labels)\r\n\r\n\r\n\r\n # test model\r\n\r\n predicted_labels = img_clf.predict_labels(test_data)\r\n\r\n print(\"\\nTesting results\")\r\n\r\n print(\"=============================\")\r\n\r\n print(\"Confusion Matrix:\\n\", metrics.confusion_matrix(test_labels, predicted_labels))\r\n\r\n print(\"Accuracy: \", metrics.accuracy_score(test_labels, predicted_labels))\r\n\r\n print(\"F1 score: \", metrics.f1_score(test_labels, predicted_labels, average='micro'))\r\n print(predicted_labels)\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n main()",
"step-ids": [
5,
7,
9,
10,
12
]
}
|
[
5,
7,
9,
10,
12
] |
import os
import sys
import shutil
import re
dl_dir = '/home/acarnec/Downloads/'
college_dir = '/home/acarnec/Documents/3rdYear'
latex_dir = '/home/acarnec/Documents/Latex/'
modules = ['mta', 'ana', 'met', 'log', 'mat',
'lin', 'min', 'pol', 'mic', 'mte']
michaelmas = ['mta', 'ana', 'met', 'log', 'mat']
hilary = ['lin', 'min', 'pol', 'mic', 'mte']
types = ['A', 'H', 'Q', 'N', 'R', 'T', 'S', 'M']
nonlinkables = ['Q','R','T', 'S', 'M']
exts = ['pdf', 'tex', 'djvu', 'xlsx', 'epub']
#script, path = sys.argv
def change_directory(path):
"""
Changes directory to path.
"""
os.chdir(path)
def list_files(path):
"""
Returns a list of the filenames in the directory.
"""
ls_output = os.listdir(path)
return ls_output
def move_file_to_dir(f, dest_dir):
"""
moves a file to dest_dir if it not already there
"""
ls = list_files(dest_dir)
if f not in ls:
shutil.move(f, dest_dir)
def get_key_from_values(f, catalog):
"""
Gets the full associated with the filename in the catalog.
"""
L_keys = []
L_values = []
for path, files in catalog.items():
if f in catalog[path]:
return path
def make_sym_links(f, source, dest_dir, catalog):
"""
Gets file origin and makes symlink at destination.
"""
source = get_key_from_values(f, catalog)
source_path = f"{source}/{f}"
dest_path = f"{dest_dir}/{f}"
ls = os.listdir(dest_dir)
if f not in ls:
os.symlink(source_path, dest_path)
def define_regex(module_names=modules, doc_types=types, exts=exts):
"""
Defines a regex in function of the global lists at the top of the program.
"""
letters = ''
module_codes = ''
file_exts = ''
# Populate code letter String
for letter in doc_types:
letters += letter
# Populate extension string
for ext in exts:
if ext != exts[-1]:
file_exts += f"{ext}|"
else:
file_exts += ext
# Populate modules string
for module in modules:
if module != modules[-1]:
module_codes += f"{module}|"
else:
module_codes += module
regex = r"(" + module_codes + "){1}[" + letters + "]{1}\_[^.]*\.(" + file_exts + ")"
return regex
def recognize_files(list_of_filenames):
"""
Matches list of filenames for pattern defined by the regex
and returns a list of those files
"""
reg_exp = define_regex()
pattern = re.compile(reg_exp)
matched = []
for filename in list_of_filenames:
match = pattern.match(filename)
if match != None:
matched.append(filename)
return matched
def catalog_files(directory):
"""
Returns a dictionary of matched files as values and
their respective directories as keys.
"""
catalog = {}
for dirpath, filename, files in os.walk(directory):
catalog[dirpath] = files
for dirpath, files in catalog.items():
matched_files = recognize_files(files)
catalog[dirpath] = matched_files
return catalog
def sort_into_modules(catalog, modules=modules, types=types):
"""
Returns a dictionary with module:associated file list as kv pair.
"""
subject_dict = {}
for code in modules:
subject_dict[code] = []
for files in catalog.values():
for f in files:
for code in modules:
if code == f[:3]:
subject_dict[code].append(f)
return subject_dict
def sort_into_type(subject_dict, modules=modules, types=types):
"""
Returns dictionary with (module, type code):associated file list
as kv pair.
"""
subject_type_dict = {}
for code in modules:
for t in types:
subject_type_dict[(code,t)] = []
for files in subject_dict.values():
for f in files:
# f[:3] is module and f[3] is type
subject_type_dict[(f[:3] ,f[3])].append(f)
# Take out empty lists
subject_type_dict = {t:l for t, l in subject_type_dict.items() if l != []}
return subject_type_dict
def sort_to_dest_dir(subject_type_dict,
catalog,
dest_dir=college_dir,
sym_link_check=nonlinkables,
michaelmas=michaelmas):
"""
Iterates through module type dictionary and specifies destination
in college_dir in accordance to filename, if file is of a certain
type, it is moved to destination, otherwise a symbolic link is made.
"""
for code_type, files in subject_type_dict.items():
if code_type[0] in michaelmas:
destination = f"{college_dir}/Michaelmas_Term/{code_type[0]}/{code_type[1]}"
else:
destination = f"{college_dir}/Hilary_Term/{code_type[0]}/{code_type[1]}"
for f in files:
if code_type[1] in nonlinkables:
move_file_to_dir(f, destination)
else:
if '.pdf' in f:
source = get_key_from_values(f, catalog)
make_sym_links(f, source, destination, catalog)
else:
pass
def main(sort_origin):
script, argument = sys.argv
# Change to directory to operate upon files
change_directory(sort_origin)
# Make a dictionary to track file location in sort_origin
catalog = catalog_files(sort_origin)
# Sort into dict with module:files
subject_dict = sort_into_modules(catalog)
# Sort into dict with (module, type):files
subject_type_dict = sort_into_type(subject_dict)
# Sort files into their respective folders
sort_to_dest_dir(subject_type_dict, catalog)
script, argument = sys.argv
if argument == 'lat':
sort_origin = latex_dir
main(sort_origin)
elif argument == 'dl':
sort_origin = dl_dir
main(sort_origin)
elif argument == 'all':
main(sort_origin=latex_dir)
main(sort_origin=dl_dir)
else:
msg = "dl for downloads\nlat for latex\nall for both"
print(msg)
|
normal
|
{
"blob_id": "3989b4c2a15fa8cd54fef86f9d7150fbd0fb74cf",
"index": 8724,
"step-1": "<mask token>\n\n\ndef change_directory(path):\n \"\"\" \n Changes directory to path. \n \"\"\"\n os.chdir(path)\n\n\n<mask token>\n\n\ndef get_key_from_values(f, catalog):\n \"\"\"\n Gets the full associated with the filename in the catalog.\n \"\"\"\n L_keys = []\n L_values = []\n for path, files in catalog.items():\n if f in catalog[path]:\n return path\n\n\ndef make_sym_links(f, source, dest_dir, catalog):\n \"\"\"\n Gets file origin and makes symlink at destination.\n \"\"\"\n source = get_key_from_values(f, catalog)\n source_path = f'{source}/{f}'\n dest_path = f'{dest_dir}/{f}'\n ls = os.listdir(dest_dir)\n if f not in ls:\n os.symlink(source_path, dest_path)\n\n\ndef define_regex(module_names=modules, doc_types=types, exts=exts):\n \"\"\"\n Defines a regex in function of the global lists at the top of the program.\n \"\"\"\n letters = ''\n module_codes = ''\n file_exts = ''\n for letter in doc_types:\n letters += letter\n for ext in exts:\n if ext != exts[-1]:\n file_exts += f'{ext}|'\n else:\n file_exts += ext\n for module in modules:\n if module != modules[-1]:\n module_codes += f'{module}|'\n else:\n module_codes += module\n regex = ('(' + module_codes + '){1}[' + letters + ']{1}\\\\_[^.]*\\\\.(' +\n file_exts + ')')\n return regex\n\n\n<mask token>\n\n\ndef sort_into_modules(catalog, modules=modules, types=types):\n \"\"\"\n Returns a dictionary with module:associated file list as kv pair.\n \"\"\"\n subject_dict = {}\n for code in modules:\n subject_dict[code] = []\n for files in catalog.values():\n for f in files:\n for code in modules:\n if code == f[:3]:\n subject_dict[code].append(f)\n return subject_dict\n\n\ndef sort_into_type(subject_dict, modules=modules, types=types):\n \"\"\"\n Returns dictionary with (module, type code):associated file list\n as kv pair.\n \"\"\"\n subject_type_dict = {}\n for code in modules:\n for t in types:\n subject_type_dict[code, t] = []\n for files in subject_dict.values():\n for f in files:\n subject_type_dict[f[:3], f[3]].append(f)\n subject_type_dict = {t: l for t, l in subject_type_dict.items() if l != []}\n return subject_type_dict\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef change_directory(path):\n \"\"\" \n Changes directory to path. \n \"\"\"\n os.chdir(path)\n\n\n<mask token>\n\n\ndef move_file_to_dir(f, dest_dir):\n \"\"\"\n moves a file to dest_dir if it not already there\n \"\"\"\n ls = list_files(dest_dir)\n if f not in ls:\n shutil.move(f, dest_dir)\n\n\ndef get_key_from_values(f, catalog):\n \"\"\"\n Gets the full associated with the filename in the catalog.\n \"\"\"\n L_keys = []\n L_values = []\n for path, files in catalog.items():\n if f in catalog[path]:\n return path\n\n\ndef make_sym_links(f, source, dest_dir, catalog):\n \"\"\"\n Gets file origin and makes symlink at destination.\n \"\"\"\n source = get_key_from_values(f, catalog)\n source_path = f'{source}/{f}'\n dest_path = f'{dest_dir}/{f}'\n ls = os.listdir(dest_dir)\n if f not in ls:\n os.symlink(source_path, dest_path)\n\n\ndef define_regex(module_names=modules, doc_types=types, exts=exts):\n \"\"\"\n Defines a regex in function of the global lists at the top of the program.\n \"\"\"\n letters = ''\n module_codes = ''\n file_exts = ''\n for letter in doc_types:\n letters += letter\n for ext in exts:\n if ext != exts[-1]:\n file_exts += f'{ext}|'\n else:\n file_exts += ext\n for module in modules:\n if module != modules[-1]:\n module_codes += f'{module}|'\n else:\n module_codes += module\n regex = ('(' + module_codes + '){1}[' + letters + ']{1}\\\\_[^.]*\\\\.(' +\n file_exts + ')')\n return regex\n\n\n<mask token>\n\n\ndef catalog_files(directory):\n \"\"\"\n Returns a dictionary of matched files as values and\n their respective directories as keys.\n \"\"\"\n catalog = {}\n for dirpath, filename, files in os.walk(directory):\n catalog[dirpath] = files\n for dirpath, files in catalog.items():\n matched_files = recognize_files(files)\n catalog[dirpath] = matched_files\n return catalog\n\n\ndef sort_into_modules(catalog, modules=modules, types=types):\n \"\"\"\n Returns a dictionary with module:associated file list as kv pair.\n \"\"\"\n subject_dict = {}\n for code in modules:\n subject_dict[code] = []\n for files in catalog.values():\n for f in files:\n for code in modules:\n if code == f[:3]:\n subject_dict[code].append(f)\n return subject_dict\n\n\ndef sort_into_type(subject_dict, modules=modules, types=types):\n \"\"\"\n Returns dictionary with (module, type code):associated file list\n as kv pair.\n \"\"\"\n subject_type_dict = {}\n for code in modules:\n for t in types:\n subject_type_dict[code, t] = []\n for files in subject_dict.values():\n for f in files:\n subject_type_dict[f[:3], f[3]].append(f)\n subject_type_dict = {t: l for t, l in subject_type_dict.items() if l != []}\n return subject_type_dict\n\n\n<mask token>\n\n\ndef main(sort_origin):\n script, argument = sys.argv\n change_directory(sort_origin)\n catalog = catalog_files(sort_origin)\n subject_dict = sort_into_modules(catalog)\n subject_type_dict = sort_into_type(subject_dict)\n sort_to_dest_dir(subject_type_dict, catalog)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef change_directory(path):\n \"\"\" \n Changes directory to path. \n \"\"\"\n os.chdir(path)\n\n\ndef list_files(path):\n \"\"\"\n Returns a list of the filenames in the directory. \n \"\"\"\n ls_output = os.listdir(path)\n return ls_output\n\n\ndef move_file_to_dir(f, dest_dir):\n \"\"\"\n moves a file to dest_dir if it not already there\n \"\"\"\n ls = list_files(dest_dir)\n if f not in ls:\n shutil.move(f, dest_dir)\n\n\ndef get_key_from_values(f, catalog):\n \"\"\"\n Gets the full associated with the filename in the catalog.\n \"\"\"\n L_keys = []\n L_values = []\n for path, files in catalog.items():\n if f in catalog[path]:\n return path\n\n\ndef make_sym_links(f, source, dest_dir, catalog):\n \"\"\"\n Gets file origin and makes symlink at destination.\n \"\"\"\n source = get_key_from_values(f, catalog)\n source_path = f'{source}/{f}'\n dest_path = f'{dest_dir}/{f}'\n ls = os.listdir(dest_dir)\n if f not in ls:\n os.symlink(source_path, dest_path)\n\n\ndef define_regex(module_names=modules, doc_types=types, exts=exts):\n \"\"\"\n Defines a regex in function of the global lists at the top of the program.\n \"\"\"\n letters = ''\n module_codes = ''\n file_exts = ''\n for letter in doc_types:\n letters += letter\n for ext in exts:\n if ext != exts[-1]:\n file_exts += f'{ext}|'\n else:\n file_exts += ext\n for module in modules:\n if module != modules[-1]:\n module_codes += f'{module}|'\n else:\n module_codes += module\n regex = ('(' + module_codes + '){1}[' + letters + ']{1}\\\\_[^.]*\\\\.(' +\n file_exts + ')')\n return regex\n\n\ndef recognize_files(list_of_filenames):\n \"\"\"\n Matches list of filenames for pattern defined by the regex\n and returns a list of those files\n \"\"\"\n reg_exp = define_regex()\n pattern = re.compile(reg_exp)\n matched = []\n for filename in list_of_filenames:\n match = pattern.match(filename)\n if match != None:\n matched.append(filename)\n return matched\n\n\ndef catalog_files(directory):\n \"\"\"\n Returns a dictionary of matched files as values and\n their respective directories as keys.\n \"\"\"\n catalog = {}\n for dirpath, filename, files in os.walk(directory):\n catalog[dirpath] = files\n for dirpath, files in catalog.items():\n matched_files = recognize_files(files)\n catalog[dirpath] = matched_files\n return catalog\n\n\ndef sort_into_modules(catalog, modules=modules, types=types):\n \"\"\"\n Returns a dictionary with module:associated file list as kv pair.\n \"\"\"\n subject_dict = {}\n for code in modules:\n subject_dict[code] = []\n for files in catalog.values():\n for f in files:\n for code in modules:\n if code == f[:3]:\n subject_dict[code].append(f)\n return subject_dict\n\n\ndef sort_into_type(subject_dict, modules=modules, types=types):\n \"\"\"\n Returns dictionary with (module, type code):associated file list\n as kv pair.\n \"\"\"\n subject_type_dict = {}\n for code in modules:\n for t in types:\n subject_type_dict[code, t] = []\n for files in subject_dict.values():\n for f in files:\n subject_type_dict[f[:3], f[3]].append(f)\n subject_type_dict = {t: l for t, l in subject_type_dict.items() if l != []}\n return subject_type_dict\n\n\ndef sort_to_dest_dir(subject_type_dict, catalog, dest_dir=college_dir,\n sym_link_check=nonlinkables, michaelmas=michaelmas):\n \"\"\"\n Iterates through module type dictionary and specifies destination \n in college_dir in accordance to filename, if file is of a certain \n type, it is moved to destination, otherwise a symbolic link is made. \n \"\"\"\n for code_type, files in subject_type_dict.items():\n if code_type[0] in michaelmas:\n destination = (\n f'{college_dir}/Michaelmas_Term/{code_type[0]}/{code_type[1]}')\n else:\n destination = (\n f'{college_dir}/Hilary_Term/{code_type[0]}/{code_type[1]}')\n for f in files:\n if code_type[1] in nonlinkables:\n move_file_to_dir(f, destination)\n elif '.pdf' in f:\n source = get_key_from_values(f, catalog)\n make_sym_links(f, source, destination, catalog)\n else:\n pass\n\n\ndef main(sort_origin):\n script, argument = sys.argv\n change_directory(sort_origin)\n catalog = catalog_files(sort_origin)\n subject_dict = sort_into_modules(catalog)\n subject_type_dict = sort_into_type(subject_dict)\n sort_to_dest_dir(subject_type_dict, catalog)\n\n\n<mask token>\nif argument == 'lat':\n sort_origin = latex_dir\n main(sort_origin)\nelif argument == 'dl':\n sort_origin = dl_dir\n main(sort_origin)\nelif argument == 'all':\n main(sort_origin=latex_dir)\n main(sort_origin=dl_dir)\nelse:\n msg = 'dl for downloads\\nlat for latex\\nall for both'\n print(msg)\n",
"step-4": "import os\nimport sys\nimport shutil\nimport re\ndl_dir = '/home/acarnec/Downloads/'\ncollege_dir = '/home/acarnec/Documents/3rdYear'\nlatex_dir = '/home/acarnec/Documents/Latex/'\nmodules = ['mta', 'ana', 'met', 'log', 'mat', 'lin', 'min', 'pol', 'mic', 'mte'\n ]\nmichaelmas = ['mta', 'ana', 'met', 'log', 'mat']\nhilary = ['lin', 'min', 'pol', 'mic', 'mte']\ntypes = ['A', 'H', 'Q', 'N', 'R', 'T', 'S', 'M']\nnonlinkables = ['Q', 'R', 'T', 'S', 'M']\nexts = ['pdf', 'tex', 'djvu', 'xlsx', 'epub']\n\n\ndef change_directory(path):\n \"\"\" \n Changes directory to path. \n \"\"\"\n os.chdir(path)\n\n\ndef list_files(path):\n \"\"\"\n Returns a list of the filenames in the directory. \n \"\"\"\n ls_output = os.listdir(path)\n return ls_output\n\n\ndef move_file_to_dir(f, dest_dir):\n \"\"\"\n moves a file to dest_dir if it not already there\n \"\"\"\n ls = list_files(dest_dir)\n if f not in ls:\n shutil.move(f, dest_dir)\n\n\ndef get_key_from_values(f, catalog):\n \"\"\"\n Gets the full associated with the filename in the catalog.\n \"\"\"\n L_keys = []\n L_values = []\n for path, files in catalog.items():\n if f in catalog[path]:\n return path\n\n\ndef make_sym_links(f, source, dest_dir, catalog):\n \"\"\"\n Gets file origin and makes symlink at destination.\n \"\"\"\n source = get_key_from_values(f, catalog)\n source_path = f'{source}/{f}'\n dest_path = f'{dest_dir}/{f}'\n ls = os.listdir(dest_dir)\n if f not in ls:\n os.symlink(source_path, dest_path)\n\n\ndef define_regex(module_names=modules, doc_types=types, exts=exts):\n \"\"\"\n Defines a regex in function of the global lists at the top of the program.\n \"\"\"\n letters = ''\n module_codes = ''\n file_exts = ''\n for letter in doc_types:\n letters += letter\n for ext in exts:\n if ext != exts[-1]:\n file_exts += f'{ext}|'\n else:\n file_exts += ext\n for module in modules:\n if module != modules[-1]:\n module_codes += f'{module}|'\n else:\n module_codes += module\n regex = ('(' + module_codes + '){1}[' + letters + ']{1}\\\\_[^.]*\\\\.(' +\n file_exts + ')')\n return regex\n\n\ndef recognize_files(list_of_filenames):\n \"\"\"\n Matches list of filenames for pattern defined by the regex\n and returns a list of those files\n \"\"\"\n reg_exp = define_regex()\n pattern = re.compile(reg_exp)\n matched = []\n for filename in list_of_filenames:\n match = pattern.match(filename)\n if match != None:\n matched.append(filename)\n return matched\n\n\ndef catalog_files(directory):\n \"\"\"\n Returns a dictionary of matched files as values and\n their respective directories as keys.\n \"\"\"\n catalog = {}\n for dirpath, filename, files in os.walk(directory):\n catalog[dirpath] = files\n for dirpath, files in catalog.items():\n matched_files = recognize_files(files)\n catalog[dirpath] = matched_files\n return catalog\n\n\ndef sort_into_modules(catalog, modules=modules, types=types):\n \"\"\"\n Returns a dictionary with module:associated file list as kv pair.\n \"\"\"\n subject_dict = {}\n for code in modules:\n subject_dict[code] = []\n for files in catalog.values():\n for f in files:\n for code in modules:\n if code == f[:3]:\n subject_dict[code].append(f)\n return subject_dict\n\n\ndef sort_into_type(subject_dict, modules=modules, types=types):\n \"\"\"\n Returns dictionary with (module, type code):associated file list\n as kv pair.\n \"\"\"\n subject_type_dict = {}\n for code in modules:\n for t in types:\n subject_type_dict[code, t] = []\n for files in subject_dict.values():\n for f in files:\n subject_type_dict[f[:3], f[3]].append(f)\n subject_type_dict = {t: l for t, l in subject_type_dict.items() if l != []}\n return subject_type_dict\n\n\ndef sort_to_dest_dir(subject_type_dict, catalog, dest_dir=college_dir,\n sym_link_check=nonlinkables, michaelmas=michaelmas):\n \"\"\"\n Iterates through module type dictionary and specifies destination \n in college_dir in accordance to filename, if file is of a certain \n type, it is moved to destination, otherwise a symbolic link is made. \n \"\"\"\n for code_type, files in subject_type_dict.items():\n if code_type[0] in michaelmas:\n destination = (\n f'{college_dir}/Michaelmas_Term/{code_type[0]}/{code_type[1]}')\n else:\n destination = (\n f'{college_dir}/Hilary_Term/{code_type[0]}/{code_type[1]}')\n for f in files:\n if code_type[1] in nonlinkables:\n move_file_to_dir(f, destination)\n elif '.pdf' in f:\n source = get_key_from_values(f, catalog)\n make_sym_links(f, source, destination, catalog)\n else:\n pass\n\n\ndef main(sort_origin):\n script, argument = sys.argv\n change_directory(sort_origin)\n catalog = catalog_files(sort_origin)\n subject_dict = sort_into_modules(catalog)\n subject_type_dict = sort_into_type(subject_dict)\n sort_to_dest_dir(subject_type_dict, catalog)\n\n\nscript, argument = sys.argv\nif argument == 'lat':\n sort_origin = latex_dir\n main(sort_origin)\nelif argument == 'dl':\n sort_origin = dl_dir\n main(sort_origin)\nelif argument == 'all':\n main(sort_origin=latex_dir)\n main(sort_origin=dl_dir)\nelse:\n msg = 'dl for downloads\\nlat for latex\\nall for both'\n print(msg)\n",
"step-5": "import os \nimport sys\n\nimport shutil \nimport re\n\ndl_dir = '/home/acarnec/Downloads/'\ncollege_dir = '/home/acarnec/Documents/3rdYear'\nlatex_dir = '/home/acarnec/Documents/Latex/'\n\nmodules = ['mta', 'ana', 'met', 'log', 'mat',\n 'lin', 'min', 'pol', 'mic', 'mte']\n\nmichaelmas = ['mta', 'ana', 'met', 'log', 'mat']\nhilary = ['lin', 'min', 'pol', 'mic', 'mte']\n\ntypes = ['A', 'H', 'Q', 'N', 'R', 'T', 'S', 'M']\nnonlinkables = ['Q','R','T', 'S', 'M']\n\nexts = ['pdf', 'tex', 'djvu', 'xlsx', 'epub']\n#script, path = sys.argv \n\ndef change_directory(path):\n \"\"\" \n Changes directory to path. \n \"\"\"\n os.chdir(path)\n\ndef list_files(path):\n \"\"\"\n Returns a list of the filenames in the directory. \n \"\"\"\n ls_output = os.listdir(path)\n return ls_output\n\ndef move_file_to_dir(f, dest_dir):\n \"\"\"\n moves a file to dest_dir if it not already there\n \"\"\"\n ls = list_files(dest_dir)\n if f not in ls:\n shutil.move(f, dest_dir)\n\ndef get_key_from_values(f, catalog):\n \"\"\"\n Gets the full associated with the filename in the catalog.\n \"\"\"\n L_keys = []\n L_values = []\n for path, files in catalog.items():\n if f in catalog[path]:\n return path\n\ndef make_sym_links(f, source, dest_dir, catalog):\n \"\"\"\n Gets file origin and makes symlink at destination.\n \"\"\"\n source = get_key_from_values(f, catalog)\n source_path = f\"{source}/{f}\"\n dest_path = f\"{dest_dir}/{f}\"\n ls = os.listdir(dest_dir)\n if f not in ls: \n os.symlink(source_path, dest_path)\n\ndef define_regex(module_names=modules, doc_types=types, exts=exts):\n \"\"\"\n Defines a regex in function of the global lists at the top of the program.\n \"\"\"\n letters = ''\n module_codes = ''\n file_exts = ''\n # Populate code letter String\n for letter in doc_types:\n letters += letter\n # Populate extension string\n for ext in exts:\n if ext != exts[-1]:\n file_exts += f\"{ext}|\"\n else:\n file_exts += ext\n # Populate modules string\n for module in modules:\n if module != modules[-1]:\n module_codes += f\"{module}|\"\n else:\n module_codes += module\n regex = r\"(\" + module_codes + \"){1}[\" + letters + \"]{1}\\_[^.]*\\.(\" + file_exts + \")\" \n return regex\n\n\ndef recognize_files(list_of_filenames): \n \"\"\"\n Matches list of filenames for pattern defined by the regex\n and returns a list of those files\n \"\"\"\n reg_exp = define_regex()\n pattern = re.compile(reg_exp) \n matched = []\n for filename in list_of_filenames:\n match = pattern.match(filename)\n if match != None:\n matched.append(filename)\n return matched\n\ndef catalog_files(directory):\n \"\"\"\n Returns a dictionary of matched files as values and\n their respective directories as keys.\n \"\"\"\n catalog = {}\n for dirpath, filename, files in os.walk(directory):\n catalog[dirpath] = files\n for dirpath, files in catalog.items():\n matched_files = recognize_files(files)\n catalog[dirpath] = matched_files\n return catalog\n\n\ndef sort_into_modules(catalog, modules=modules, types=types):\n \"\"\"\n Returns a dictionary with module:associated file list as kv pair.\n \"\"\"\n subject_dict = {}\n for code in modules:\n subject_dict[code] = []\n for files in catalog.values():\n for f in files:\n for code in modules:\n if code == f[:3]:\n subject_dict[code].append(f)\n\n return subject_dict\n\n\ndef sort_into_type(subject_dict, modules=modules, types=types):\n \"\"\"\n Returns dictionary with (module, type code):associated file list\n as kv pair.\n \"\"\"\n subject_type_dict = {}\n for code in modules:\n for t in types:\n subject_type_dict[(code,t)] = []\n \n for files in subject_dict.values():\n for f in files:\n # f[:3] is module and f[3] is type\n subject_type_dict[(f[:3] ,f[3])].append(f)\n # Take out empty lists\n subject_type_dict = {t:l for t, l in subject_type_dict.items() if l != []}\n \n return subject_type_dict\n\ndef sort_to_dest_dir(subject_type_dict,\n catalog,\n dest_dir=college_dir, \n sym_link_check=nonlinkables, \n michaelmas=michaelmas):\n \"\"\"\n Iterates through module type dictionary and specifies destination \n in college_dir in accordance to filename, if file is of a certain \n type, it is moved to destination, otherwise a symbolic link is made. \n \"\"\"\n for code_type, files in subject_type_dict.items():\n if code_type[0] in michaelmas:\n destination = f\"{college_dir}/Michaelmas_Term/{code_type[0]}/{code_type[1]}\"\n else:\n destination = f\"{college_dir}/Hilary_Term/{code_type[0]}/{code_type[1]}\"\n for f in files:\n if code_type[1] in nonlinkables:\n move_file_to_dir(f, destination) \n else:\n if '.pdf' in f:\n source = get_key_from_values(f, catalog)\n make_sym_links(f, source, destination, catalog)\n else:\n pass\n \n\n\n\ndef main(sort_origin):\n script, argument = sys.argv\n # Change to directory to operate upon files\n change_directory(sort_origin)\n # Make a dictionary to track file location in sort_origin\n catalog = catalog_files(sort_origin)\n # Sort into dict with module:files\n subject_dict = sort_into_modules(catalog)\n # Sort into dict with (module, type):files\n subject_type_dict = sort_into_type(subject_dict)\n # Sort files into their respective folders\n sort_to_dest_dir(subject_type_dict, catalog) \n\nscript, argument = sys.argv\n\nif argument == 'lat':\n sort_origin = latex_dir\n main(sort_origin)\nelif argument == 'dl':\n sort_origin = dl_dir\n main(sort_origin)\nelif argument == 'all':\n main(sort_origin=latex_dir)\n main(sort_origin=dl_dir)\nelse:\n msg = \"dl for downloads\\nlat for latex\\nall for both\"\n print(msg)\n",
"step-ids": [
6,
9,
13,
15,
16
]
}
|
[
6,
9,
13,
15,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LuckDrawActivity(Base):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LuckDrawActivity(Base):
@allure.step('参加抽奖活动')
def join_luck_draw_activity(self, driver, activity_name):
self.go_user_center()
time.sleep(2)
integral_start = PagePersonalCenter(driver).get_my_integral()
coupon_amount_start = PagePersonalCenter(driver).get_my_coupon_amount()
self.go_activity()
time.sleep(2)
PageActivity(driver).click_search()
PageActivity(driver).input_activity_name(activity_name)
time.sleep(2)
PageActivity(driver).click_activity()
time.sleep(2)
return integral_start, coupon_amount_start
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import time
import allure
from libs.selenium_libs.common.base import Base
from libs.selenium_libs.page_object.page_activity import PageActivity
from libs.selenium_libs.page_object.page_personal_center import PagePersonalCenter
class LuckDrawActivity(Base):
@allure.step('参加抽奖活动')
def join_luck_draw_activity(self, driver, activity_name):
self.go_user_center()
time.sleep(2)
integral_start = PagePersonalCenter(driver).get_my_integral()
coupon_amount_start = PagePersonalCenter(driver).get_my_coupon_amount()
self.go_activity()
time.sleep(2)
PageActivity(driver).click_search()
PageActivity(driver).input_activity_name(activity_name)
time.sleep(2)
PageActivity(driver).click_activity()
time.sleep(2)
return integral_start, coupon_amount_start
<|reserved_special_token_1|>
# -*- coding=utf-8 -*-
# ! /usr/bin/env python3
"""
抽奖活动-摇一摇活动
"""
import time
import allure
from libs.selenium_libs.common.base import Base
from libs.selenium_libs.page_object.page_activity import PageActivity
from libs.selenium_libs.page_object.page_personal_center import PagePersonalCenter
class LuckDrawActivity(Base):
@allure.step('参加抽奖活动')
def join_luck_draw_activity(self, driver,activity_name):
# 进入个人中心页面
self.go_user_center()
time.sleep(2)
# 获取当前积分
integral_start = PagePersonalCenter(driver).get_my_integral()
# 获取当前卡券数
coupon_amount_start = PagePersonalCenter(driver).get_my_coupon_amount()
# 进入活动页面,等待
self.go_activity()
time.sleep(2)
# 点击搜索框
PageActivity(driver).click_search()
# 在搜索框中输入活动名称
PageActivity(driver).input_activity_name(activity_name)
time.sleep(2)
# 点击活动搜索后的第一个活动
PageActivity(driver).click_activity()
time.sleep(2)
return integral_start, coupon_amount_start
# # 检查参与活动方式
# time.sleep(1)
# way = self.driver.find_elements_by_xpath(loc.Activity.loc_luck_draw_rule)[-1].text
# # 判断抽奖形式
# if '凯德星会员即可参与抽奖' in way:
# print('免费抽奖')
# self.click_ele(loc.Activity.loc_draw_immediately)
# # 判断奖励类型
# # if self.driver.find_element_by_xpath('//div[@class="result_txt"]').text =="恭喜您抽中奖品是积分,立即兑换你想要的礼品吧!":
# # 进入个人中心页面
# self.go_user_center()
# time.sleep(2)
# # 获取当前积分
# integral_end = PagePersonalCenter.get_my_integral()
# # 获取当前卡券数
# coupon_amount_end = PagePersonalCenter.get_my_coupon_amount()
# elif '每次抽奖消耗' in way:
# print('消耗积分抽奖')
# integral_end = 0
# coupon_amount_end = 0
# else:
# print('验证积分抽奖')
# integral_end = 0
# coupon_amount_end = 0
# return integral_start, coupon_amount_start, integral_end, coupon_amount_end
# integral = filter(way.isdigit, way)
|
flexible
|
{
"blob_id": "6b1970ee2b0d24504f4dea1f2ad22a165101bfbe",
"index": 8958,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass LuckDrawActivity(Base):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass LuckDrawActivity(Base):\n\n @allure.step('参加抽奖活动')\n def join_luck_draw_activity(self, driver, activity_name):\n self.go_user_center()\n time.sleep(2)\n integral_start = PagePersonalCenter(driver).get_my_integral()\n coupon_amount_start = PagePersonalCenter(driver).get_my_coupon_amount()\n self.go_activity()\n time.sleep(2)\n PageActivity(driver).click_search()\n PageActivity(driver).input_activity_name(activity_name)\n time.sleep(2)\n PageActivity(driver).click_activity()\n time.sleep(2)\n return integral_start, coupon_amount_start\n",
"step-4": "<mask token>\nimport time\nimport allure\nfrom libs.selenium_libs.common.base import Base\nfrom libs.selenium_libs.page_object.page_activity import PageActivity\nfrom libs.selenium_libs.page_object.page_personal_center import PagePersonalCenter\n\n\nclass LuckDrawActivity(Base):\n\n @allure.step('参加抽奖活动')\n def join_luck_draw_activity(self, driver, activity_name):\n self.go_user_center()\n time.sleep(2)\n integral_start = PagePersonalCenter(driver).get_my_integral()\n coupon_amount_start = PagePersonalCenter(driver).get_my_coupon_amount()\n self.go_activity()\n time.sleep(2)\n PageActivity(driver).click_search()\n PageActivity(driver).input_activity_name(activity_name)\n time.sleep(2)\n PageActivity(driver).click_activity()\n time.sleep(2)\n return integral_start, coupon_amount_start\n",
"step-5": "# -*- coding=utf-8 -*-\n# ! /usr/bin/env python3\n\n\"\"\"\n抽奖活动-摇一摇活动\n\"\"\"\n\nimport time\nimport allure\nfrom libs.selenium_libs.common.base import Base\nfrom libs.selenium_libs.page_object.page_activity import PageActivity\nfrom libs.selenium_libs.page_object.page_personal_center import PagePersonalCenter\n\n\nclass LuckDrawActivity(Base):\n @allure.step('参加抽奖活动')\n def join_luck_draw_activity(self, driver,activity_name):\n # 进入个人中心页面\n self.go_user_center()\n time.sleep(2)\n # 获取当前积分\n integral_start = PagePersonalCenter(driver).get_my_integral()\n # 获取当前卡券数\n coupon_amount_start = PagePersonalCenter(driver).get_my_coupon_amount()\n # 进入活动页面,等待\n self.go_activity()\n time.sleep(2)\n # 点击搜索框\n PageActivity(driver).click_search()\n # 在搜索框中输入活动名称\n PageActivity(driver).input_activity_name(activity_name)\n time.sleep(2)\n # 点击活动搜索后的第一个活动\n PageActivity(driver).click_activity()\n time.sleep(2)\n return integral_start, coupon_amount_start\n # # 检查参与活动方式\n # time.sleep(1)\n # way = self.driver.find_elements_by_xpath(loc.Activity.loc_luck_draw_rule)[-1].text\n # # 判断抽奖形式\n # if '凯德星会员即可参与抽奖' in way:\n # print('免费抽奖')\n # self.click_ele(loc.Activity.loc_draw_immediately)\n # # 判断奖励类型\n # # if self.driver.find_element_by_xpath('//div[@class=\"result_txt\"]').text ==\"恭喜您抽中奖品是积分,立即兑换你想要的礼品吧!\":\n # # 进入个人中心页面\n # self.go_user_center()\n # time.sleep(2)\n # # 获取当前积分\n # integral_end = PagePersonalCenter.get_my_integral()\n # # 获取当前卡券数\n # coupon_amount_end = PagePersonalCenter.get_my_coupon_amount()\n # elif '每次抽奖消耗' in way:\n # print('消耗积分抽奖')\n # integral_end = 0\n # coupon_amount_end = 0\n # else:\n # print('验证积分抽奖')\n # integral_end = 0\n # coupon_amount_end = 0\n # return integral_start, coupon_amount_start, integral_end, coupon_amount_end\n\n # integral = filter(way.isdigit, way)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Install and activate pre-commit and its hooks into virtual environment."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import sys
# if sys.version_info[0] > 2 or sys.version_info[1] < 7:
# print("Python 2.7 required")
# sys.exit(1)
VENV_NAME = 'VIRTUAL_ENV'
VENV = ''
try:
VENV = os.environ[VENV_NAME]
if VENV == '':
print("Environment variable '%s' is empty" % VENV_NAME)
print('Please activate your virtualenv first')
sys.exit(3)
if not os.path.isdir(VENV):
print("Virtual environment '%s' does not exist" % VENV)
print('Please activate a valid virtualenv first')
sys.exit(2)
except KeyError:
print('No virtualenv defined')
print('Please activate a virtualenv (with mkvirtualenv, workon, or pyenv)')
sys.exit(1)
if os.system('git config diff.userdata.textconv $PWD/userdata_decode.py'):
print('Problem configuring Git diff filter for userdata')
if os.system('pre-commit --version'):
os.system('pip install pre-commit')
if os.system('pre-commit install'):
print('Error setting up pre-commit hooks, try updating with '
'pip install -U pre-commit')
sys.exit(4)
if os.system('pre-commit run --all-files'):
print('Problem running pre-commit hooks, check .pre-commit-config.yaml')
sys.exit(5)
sys.exit(0)
|
normal
|
{
"blob_id": "210d1a184d338d77d4c41327d0a9e2a5a56eb2ae",
"index": 2724,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n VENV = os.environ[VENV_NAME]\n if VENV == '':\n print(\"Environment variable '%s' is empty\" % VENV_NAME)\n print('Please activate your virtualenv first')\n sys.exit(3)\n if not os.path.isdir(VENV):\n print(\"Virtual environment '%s' does not exist\" % VENV)\n print('Please activate a valid virtualenv first')\n sys.exit(2)\nexcept KeyError:\n print('No virtualenv defined')\n print('Please activate a virtualenv (with mkvirtualenv, workon, or pyenv)')\n sys.exit(1)\nif os.system('git config diff.userdata.textconv $PWD/userdata_decode.py'):\n print('Problem configuring Git diff filter for userdata')\nif os.system('pre-commit --version'):\n os.system('pip install pre-commit')\nif os.system('pre-commit install'):\n print(\n 'Error setting up pre-commit hooks, try updating with pip install -U pre-commit'\n )\n sys.exit(4)\nif os.system('pre-commit run --all-files'):\n print('Problem running pre-commit hooks, check .pre-commit-config.yaml')\n sys.exit(5)\nsys.exit(0)\n",
"step-3": "<mask token>\nVENV_NAME = 'VIRTUAL_ENV'\nVENV = ''\ntry:\n VENV = os.environ[VENV_NAME]\n if VENV == '':\n print(\"Environment variable '%s' is empty\" % VENV_NAME)\n print('Please activate your virtualenv first')\n sys.exit(3)\n if not os.path.isdir(VENV):\n print(\"Virtual environment '%s' does not exist\" % VENV)\n print('Please activate a valid virtualenv first')\n sys.exit(2)\nexcept KeyError:\n print('No virtualenv defined')\n print('Please activate a virtualenv (with mkvirtualenv, workon, or pyenv)')\n sys.exit(1)\nif os.system('git config diff.userdata.textconv $PWD/userdata_decode.py'):\n print('Problem configuring Git diff filter for userdata')\nif os.system('pre-commit --version'):\n os.system('pip install pre-commit')\nif os.system('pre-commit install'):\n print(\n 'Error setting up pre-commit hooks, try updating with pip install -U pre-commit'\n )\n sys.exit(4)\nif os.system('pre-commit run --all-files'):\n print('Problem running pre-commit hooks, check .pre-commit-config.yaml')\n sys.exit(5)\nsys.exit(0)\n",
"step-4": "<mask token>\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport os\nimport sys\nVENV_NAME = 'VIRTUAL_ENV'\nVENV = ''\ntry:\n VENV = os.environ[VENV_NAME]\n if VENV == '':\n print(\"Environment variable '%s' is empty\" % VENV_NAME)\n print('Please activate your virtualenv first')\n sys.exit(3)\n if not os.path.isdir(VENV):\n print(\"Virtual environment '%s' does not exist\" % VENV)\n print('Please activate a valid virtualenv first')\n sys.exit(2)\nexcept KeyError:\n print('No virtualenv defined')\n print('Please activate a virtualenv (with mkvirtualenv, workon, or pyenv)')\n sys.exit(1)\nif os.system('git config diff.userdata.textconv $PWD/userdata_decode.py'):\n print('Problem configuring Git diff filter for userdata')\nif os.system('pre-commit --version'):\n os.system('pip install pre-commit')\nif os.system('pre-commit install'):\n print(\n 'Error setting up pre-commit hooks, try updating with pip install -U pre-commit'\n )\n sys.exit(4)\nif os.system('pre-commit run --all-files'):\n print('Problem running pre-commit hooks, check .pre-commit-config.yaml')\n sys.exit(5)\nsys.exit(0)\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Install and activate pre-commit and its hooks into virtual environment.\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport os\nimport sys\n\n# if sys.version_info[0] > 2 or sys.version_info[1] < 7:\n# print(\"Python 2.7 required\")\n# sys.exit(1)\n\nVENV_NAME = 'VIRTUAL_ENV'\nVENV = ''\ntry:\n VENV = os.environ[VENV_NAME]\n if VENV == '':\n print(\"Environment variable '%s' is empty\" % VENV_NAME)\n print('Please activate your virtualenv first')\n sys.exit(3)\n if not os.path.isdir(VENV):\n print(\"Virtual environment '%s' does not exist\" % VENV)\n print('Please activate a valid virtualenv first')\n sys.exit(2)\n\nexcept KeyError:\n print('No virtualenv defined')\n print('Please activate a virtualenv (with mkvirtualenv, workon, or pyenv)')\n sys.exit(1)\n\nif os.system('git config diff.userdata.textconv $PWD/userdata_decode.py'):\n print('Problem configuring Git diff filter for userdata')\n\nif os.system('pre-commit --version'):\n os.system('pip install pre-commit')\n\nif os.system('pre-commit install'):\n print('Error setting up pre-commit hooks, try updating with '\n 'pip install -U pre-commit')\n sys.exit(4)\n\nif os.system('pre-commit run --all-files'):\n print('Problem running pre-commit hooks, check .pre-commit-config.yaml')\n sys.exit(5)\n\nsys.exit(0)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
fresh_tomatoes.open_movies_page(movies)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
alien_covenant = media.Movie('Alien: Covenant',
'The crew of a colony ship, bound for a remote planet, discover an uncharted paradise with a threat beyondtheir imagination,and must attempt a harrowing escape.'
,
'https://upload.wikimedia.org/wikipedia/en/3/33/Alien_Covenant_Teaser_Poster.jpg'
, 'https://www.youtube.com/watch?v=H0VW6sg50Pk', 'R',
'Science fiction horror', 'Ridley Scott', '123 Minutes')
avatar = media.Movie('Avatar', 'A marine on an alien planet',
'http://upload.wikimedia.org/wikipedia/en/b/b0/Avatar-Teaser-Poster.jpg',
'http://www.youtube.com/watch?v=5PSNL1qE6VY', 'PG-13',
'Epic science fiction', 'James Cameron', '162 Minutes')
okja = media.Movie('Okja',
'A young girl named Mija risks everything to prevent a powerful, multi-national company from kidnapping her best friend,a massive animal named Okja'
, 'https://upload.wikimedia.org/wikipedia/en/f/f6/Okja.png',
'https://www.youtube.com/watch?v=AjCebKn4iic', 'R', 'Action-Adventure',
'Bong Joon-ho', '120 Minutes')
gonegirl = media.Movie('Gone Girl', 'A sad story',
'http://upload.wikimedia.org/wikipedia/en/0/05/Gone_Girl_Poster.jpg',
'http://www.youtube.com/watch?v=Ym3LB0lOJ0o', 'R', 'Crime',
'David Fincher', '149 Minutes')
avenger = media.Movie('Avenger', 'A story about superheroes',
'http://upload.wikimedia.org/wikipedia/en/3/37/Captain_America_The_First_Avenger_poster.jpg'
, 'http://www.youtube.com/watch?v=hIR8Ar-Z4hw', 'PG-13', 'Action',
'Joss Whedon', '143 Minutes')
dark_knight = media.Movie('Dark knight rises', 'A story about batman',
'http://upload.wikimedia.org/wikipedia/en/8/83/Dark_knight_rises_poster.jpg'
, 'http://www.youtube.com/watch?v=g8evyE9TuYk', 'PG-13', 'Action',
'Christopher Nolan', '165 Minutes')
movies = [alien_covenant, avatar, okja, gonegirl, avenger, dark_knight]
fresh_tomatoes.open_movies_page(movies)
<|reserved_special_token_1|>
import fresh_tomatoes
import media
alien_covenant = media.Movie('Alien: Covenant',
'The crew of a colony ship, bound for a remote planet, discover an uncharted paradise with a threat beyondtheir imagination,and must attempt a harrowing escape.'
,
'https://upload.wikimedia.org/wikipedia/en/3/33/Alien_Covenant_Teaser_Poster.jpg'
, 'https://www.youtube.com/watch?v=H0VW6sg50Pk', 'R',
'Science fiction horror', 'Ridley Scott', '123 Minutes')
avatar = media.Movie('Avatar', 'A marine on an alien planet',
'http://upload.wikimedia.org/wikipedia/en/b/b0/Avatar-Teaser-Poster.jpg',
'http://www.youtube.com/watch?v=5PSNL1qE6VY', 'PG-13',
'Epic science fiction', 'James Cameron', '162 Minutes')
okja = media.Movie('Okja',
'A young girl named Mija risks everything to prevent a powerful, multi-national company from kidnapping her best friend,a massive animal named Okja'
, 'https://upload.wikimedia.org/wikipedia/en/f/f6/Okja.png',
'https://www.youtube.com/watch?v=AjCebKn4iic', 'R', 'Action-Adventure',
'Bong Joon-ho', '120 Minutes')
gonegirl = media.Movie('Gone Girl', 'A sad story',
'http://upload.wikimedia.org/wikipedia/en/0/05/Gone_Girl_Poster.jpg',
'http://www.youtube.com/watch?v=Ym3LB0lOJ0o', 'R', 'Crime',
'David Fincher', '149 Minutes')
avenger = media.Movie('Avenger', 'A story about superheroes',
'http://upload.wikimedia.org/wikipedia/en/3/37/Captain_America_The_First_Avenger_poster.jpg'
, 'http://www.youtube.com/watch?v=hIR8Ar-Z4hw', 'PG-13', 'Action',
'Joss Whedon', '143 Minutes')
dark_knight = media.Movie('Dark knight rises', 'A story about batman',
'http://upload.wikimedia.org/wikipedia/en/8/83/Dark_knight_rises_poster.jpg'
, 'http://www.youtube.com/watch?v=g8evyE9TuYk', 'PG-13', 'Action',
'Christopher Nolan', '165 Minutes')
movies = [alien_covenant, avatar, okja, gonegirl, avenger, dark_knight]
fresh_tomatoes.open_movies_page(movies)
<|reserved_special_token_1|>
# This is main file where we create the instances of Movie class
# and run the file to view the movie website page
# we have to import media where class Movie is defined and
# fresh_tomatoes python files
import fresh_tomatoes
import media
# Each instance has 8 arguments: Title, story line, poster image,
# trailer url, rating, category, director, duration
alien_covenant = media.Movie("Alien: Covenant", "The crew of a colony ship, "
"bound for a remote planet, discover an "
"uncharted paradise with a threat beyond"
"their imagination,"
"and must attempt a harrowing escape.",
"https://upload.wikimedia.org/wikipedia/en/3/33/"
"Alien_Covenant_Teaser_Poster.jpg",
"https://www.youtube.com/watch?v=H0VW6sg50Pk",
"R",
"Science fiction horror",
"Ridley Scott",
"123 Minutes")
avatar = media.Movie("Avatar", "A marine on an alien planet",
"http://upload.wikimedia.org/wikipedia/en/"
"b/b0/Avatar-Teaser-Poster.jpg",
"http://www.youtube.com/watch?v=5PSNL1qE6VY",
"PG-13",
"Epic science fiction",
"James Cameron",
"162 Minutes")
okja = media.Movie("Okja", "A young girl named Mija risks everything to "
"prevent a powerful, multi-national company "
"from kidnapping her best friend,"
"a massive animal named Okja",
"https://upload.wikimedia.org/wikipedia/en/f/f6/Okja.png",
"https://www.youtube.com/watch?v=AjCebKn4iic",
"R",
"Action-Adventure",
"Bong Joon-ho",
"120 Minutes")
gonegirl = media.Movie("Gone Girl",
"A sad story",
"http://upload.wikimedia.org/wikipedia/en/0/05/"
"Gone_Girl_Poster.jpg",
"http://www.youtube.com/watch?v=Ym3LB0lOJ0o",
"R",
"Crime",
"David Fincher",
"149 Minutes")
avenger = media.Movie("Avenger",
"A story about superheroes",
"http://upload.wikimedia.org/wikipedia/en/3/37/"
"Captain_America_The_First_Avenger_poster.jpg",
"http://www.youtube.com/watch?v=hIR8Ar-Z4hw",
"PG-13",
"Action",
"Joss Whedon",
"143 Minutes")
dark_knight = media.Movie("Dark knight rises",
"A story about batman",
"http://upload.wikimedia.org/wikipedia/en/8/83/"
"Dark_knight_rises_poster.jpg",
"http://www.youtube.com/watch?v=g8evyE9TuYk",
"PG-13",
"Action",
"Christopher Nolan",
"165 Minutes")
# Creating a list of all instances
movies = [alien_covenant, avatar, okja, gonegirl, avenger, dark_knight]
# Calling open_movies_page function to create fresh_tomatoes.html
# file which contains a movie web page
fresh_tomatoes.open_movies_page(movies)
|
flexible
|
{
"blob_id": "9dfc8414628a8b09de3c24c504dd4163efdd3d35",
"index": 6010,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfresh_tomatoes.open_movies_page(movies)\n",
"step-3": "<mask token>\nalien_covenant = media.Movie('Alien: Covenant',\n 'The crew of a colony ship, bound for a remote planet, discover an uncharted paradise with a threat beyondtheir imagination,and must attempt a harrowing escape.'\n ,\n 'https://upload.wikimedia.org/wikipedia/en/3/33/Alien_Covenant_Teaser_Poster.jpg'\n , 'https://www.youtube.com/watch?v=H0VW6sg50Pk', 'R',\n 'Science fiction horror', 'Ridley Scott', '123 Minutes')\navatar = media.Movie('Avatar', 'A marine on an alien planet',\n 'http://upload.wikimedia.org/wikipedia/en/b/b0/Avatar-Teaser-Poster.jpg',\n 'http://www.youtube.com/watch?v=5PSNL1qE6VY', 'PG-13',\n 'Epic science fiction', 'James Cameron', '162 Minutes')\nokja = media.Movie('Okja',\n 'A young girl named Mija risks everything to prevent a powerful, multi-national company from kidnapping her best friend,a massive animal named Okja'\n , 'https://upload.wikimedia.org/wikipedia/en/f/f6/Okja.png',\n 'https://www.youtube.com/watch?v=AjCebKn4iic', 'R', 'Action-Adventure',\n 'Bong Joon-ho', '120 Minutes')\ngonegirl = media.Movie('Gone Girl', 'A sad story',\n 'http://upload.wikimedia.org/wikipedia/en/0/05/Gone_Girl_Poster.jpg',\n 'http://www.youtube.com/watch?v=Ym3LB0lOJ0o', 'R', 'Crime',\n 'David Fincher', '149 Minutes')\navenger = media.Movie('Avenger', 'A story about superheroes',\n 'http://upload.wikimedia.org/wikipedia/en/3/37/Captain_America_The_First_Avenger_poster.jpg'\n , 'http://www.youtube.com/watch?v=hIR8Ar-Z4hw', 'PG-13', 'Action',\n 'Joss Whedon', '143 Minutes')\ndark_knight = media.Movie('Dark knight rises', 'A story about batman',\n 'http://upload.wikimedia.org/wikipedia/en/8/83/Dark_knight_rises_poster.jpg'\n , 'http://www.youtube.com/watch?v=g8evyE9TuYk', 'PG-13', 'Action',\n 'Christopher Nolan', '165 Minutes')\nmovies = [alien_covenant, avatar, okja, gonegirl, avenger, dark_knight]\nfresh_tomatoes.open_movies_page(movies)\n",
"step-4": "import fresh_tomatoes\nimport media\nalien_covenant = media.Movie('Alien: Covenant',\n 'The crew of a colony ship, bound for a remote planet, discover an uncharted paradise with a threat beyondtheir imagination,and must attempt a harrowing escape.'\n ,\n 'https://upload.wikimedia.org/wikipedia/en/3/33/Alien_Covenant_Teaser_Poster.jpg'\n , 'https://www.youtube.com/watch?v=H0VW6sg50Pk', 'R',\n 'Science fiction horror', 'Ridley Scott', '123 Minutes')\navatar = media.Movie('Avatar', 'A marine on an alien planet',\n 'http://upload.wikimedia.org/wikipedia/en/b/b0/Avatar-Teaser-Poster.jpg',\n 'http://www.youtube.com/watch?v=5PSNL1qE6VY', 'PG-13',\n 'Epic science fiction', 'James Cameron', '162 Minutes')\nokja = media.Movie('Okja',\n 'A young girl named Mija risks everything to prevent a powerful, multi-national company from kidnapping her best friend,a massive animal named Okja'\n , 'https://upload.wikimedia.org/wikipedia/en/f/f6/Okja.png',\n 'https://www.youtube.com/watch?v=AjCebKn4iic', 'R', 'Action-Adventure',\n 'Bong Joon-ho', '120 Minutes')\ngonegirl = media.Movie('Gone Girl', 'A sad story',\n 'http://upload.wikimedia.org/wikipedia/en/0/05/Gone_Girl_Poster.jpg',\n 'http://www.youtube.com/watch?v=Ym3LB0lOJ0o', 'R', 'Crime',\n 'David Fincher', '149 Minutes')\navenger = media.Movie('Avenger', 'A story about superheroes',\n 'http://upload.wikimedia.org/wikipedia/en/3/37/Captain_America_The_First_Avenger_poster.jpg'\n , 'http://www.youtube.com/watch?v=hIR8Ar-Z4hw', 'PG-13', 'Action',\n 'Joss Whedon', '143 Minutes')\ndark_knight = media.Movie('Dark knight rises', 'A story about batman',\n 'http://upload.wikimedia.org/wikipedia/en/8/83/Dark_knight_rises_poster.jpg'\n , 'http://www.youtube.com/watch?v=g8evyE9TuYk', 'PG-13', 'Action',\n 'Christopher Nolan', '165 Minutes')\nmovies = [alien_covenant, avatar, okja, gonegirl, avenger, dark_knight]\nfresh_tomatoes.open_movies_page(movies)\n",
"step-5": "# This is main file where we create the instances of Movie class\n# and run the file to view the movie website page\n\n# we have to import media where class Movie is defined and\n# fresh_tomatoes python files\nimport fresh_tomatoes\nimport media\n\n# Each instance has 8 arguments: Title, story line, poster image,\n# trailer url, rating, category, director, duration\nalien_covenant = media.Movie(\"Alien: Covenant\", \"The crew of a colony ship, \"\n \"bound for a remote planet, discover an \"\n \"uncharted paradise with a threat beyond\"\n \"their imagination,\"\n \"and must attempt a harrowing escape.\",\n \"https://upload.wikimedia.org/wikipedia/en/3/33/\"\n \"Alien_Covenant_Teaser_Poster.jpg\",\n \"https://www.youtube.com/watch?v=H0VW6sg50Pk\",\n \"R\",\n \"Science fiction horror\",\n \"Ridley Scott\",\n \"123 Minutes\")\n\navatar = media.Movie(\"Avatar\", \"A marine on an alien planet\",\n \"http://upload.wikimedia.org/wikipedia/en/\"\n \"b/b0/Avatar-Teaser-Poster.jpg\",\n \"http://www.youtube.com/watch?v=5PSNL1qE6VY\",\n \"PG-13\",\n \"Epic science fiction\",\n \"James Cameron\",\n \"162 Minutes\")\n\nokja = media.Movie(\"Okja\", \"A young girl named Mija risks everything to \"\n \"prevent a powerful, multi-national company \"\n \"from kidnapping her best friend,\"\n \"a massive animal named Okja\",\n \"https://upload.wikimedia.org/wikipedia/en/f/f6/Okja.png\",\n \"https://www.youtube.com/watch?v=AjCebKn4iic\",\n \"R\",\n \"Action-Adventure\",\n \"Bong Joon-ho\",\n \"120 Minutes\")\n\ngonegirl = media.Movie(\"Gone Girl\",\n \"A sad story\",\n \"http://upload.wikimedia.org/wikipedia/en/0/05/\"\n \"Gone_Girl_Poster.jpg\",\n \"http://www.youtube.com/watch?v=Ym3LB0lOJ0o\",\n \"R\",\n \"Crime\",\n \"David Fincher\",\n \"149 Minutes\")\n\navenger = media.Movie(\"Avenger\",\n \"A story about superheroes\",\n \"http://upload.wikimedia.org/wikipedia/en/3/37/\"\n \"Captain_America_The_First_Avenger_poster.jpg\",\n \"http://www.youtube.com/watch?v=hIR8Ar-Z4hw\",\n \"PG-13\",\n \"Action\",\n \"Joss Whedon\",\n \"143 Minutes\")\n\ndark_knight = media.Movie(\"Dark knight rises\",\n \"A story about batman\",\n \"http://upload.wikimedia.org/wikipedia/en/8/83/\"\n \"Dark_knight_rises_poster.jpg\",\n \"http://www.youtube.com/watch?v=g8evyE9TuYk\",\n \"PG-13\",\n \"Action\",\n \"Christopher Nolan\",\n \"165 Minutes\")\n\n\n# Creating a list of all instances\nmovies = [alien_covenant, avatar, okja, gonegirl, avenger, dark_knight]\n\n# Calling open_movies_page function to create fresh_tomatoes.html\n# file which contains a movie web page\nfresh_tomatoes.open_movies_page(movies)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Set up path references and dependencies.
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
sys.path.append(os.path.join(parentdir, "utils"))
# Import important helper libraries.
from flask import Flask, render_template
import numpy as np
import plotly
import plotly.graph_objs as pgo
import json
# Import modules created to serve the project.
#from utils import DB_interface as DBI
#from utils import path_config as pc
from utils import model
app = Flask(__name__)
# Global variable
#DAYS = 500
@app.route('/')
def index():
result_plot = compute_model_output()
return render_template("index.html", graphJSON=result_plot)
def compute_model_output():
num_steps = 500
init_inf = 5
t_inc = 5
t_inf = 9
r_t = 2.5 #np.random.normal(2.5, 1.0)
rho = 1.0
kappa_0 = 0.0
kappa = 0.0
n_pop = 2000
seir = model.SEIRModel(num_steps,n_pop, init_inf, t_inc, t_inf, r_t, rho, kappa_0, kappa)
s, e, i, r = seir.run()
days = np.linspace(0, num_steps, num_steps)
trace_0 = pgo.Scatter(x=days, y=s, mode='lines', name='s', line=dict(color='rgba(128, 223, 255, 1)'))
trace_1 = pgo.Scatter(x=days, y=e, mode='lines', name='e', line=dict(color='rgba(200, 100, 0, 1)'))
trace_2 = pgo.Scatter(x=days, y=i, mode='lines', name='i', line=dict(color='rgba(180, 0, 0, 1)'))
trace_3 = pgo.Scatter(x=days, y=r, mode='lines', name='r', line=dict(color='rgba(0, 100, 50, 1)'))
data = [trace_0, trace_1, trace_2, trace_3]
graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)
return (graphJSON)
"""
@app.callback(
Output('test','children')
[Input('val_num_steps', 'num_steps')]
)
@app.route('/start_bckgrnd_update')
def start_bckgrnd_update():
p = Process(target=bckgrnd_update, name="background_update")
p.start()
#p.join()
now = datetime.now()
user = {'username': 'MSE!'}
posts = [
{
'author': {'username': 'Paul'},
'body': 'Henrik has the update just been started?'
},
{
'author': {'username': 'Henrik'},
'body': 'You bet your sweet ass it has!'
},
{
'author': {'username': 'Paul'},
'body': 'So what time was is when it started?'
},
{
'author': {'username': 'Henrik'},
'body': 'It was exactly %s !' % now
}
]
return render_template("start_bckgrnd_update.html", title="home", user = user, posts=posts)
def bckgrnd_update():
global updating
updating = True
while updating:
print(datetime.now())
print("updating RKI DBs now")
DB = DBI.DB_interface()
DB.update_RKI_csv()
DB.update_RKI_landkreise_csv()
day = 24 * 3600
time.sleep(day)
"""
if __name__ == "__main__":
app.run(debug=True)
|
normal
|
{
"blob_id": "7d099012584b84e9767bf0ce9d9df1596ca3bbab",
"index": 542,
"step-1": "<mask token>\n\n\n@app.route('/')\ndef index():\n result_plot = compute_model_output()\n return render_template('index.html', graphJSON=result_plot)\n\n\ndef compute_model_output():\n num_steps = 500\n init_inf = 5\n t_inc = 5\n t_inf = 9\n r_t = 2.5\n rho = 1.0\n kappa_0 = 0.0\n kappa = 0.0\n n_pop = 2000\n seir = model.SEIRModel(num_steps, n_pop, init_inf, t_inc, t_inf, r_t,\n rho, kappa_0, kappa)\n s, e, i, r = seir.run()\n days = np.linspace(0, num_steps, num_steps)\n trace_0 = pgo.Scatter(x=days, y=s, mode='lines', name='s', line=dict(\n color='rgba(128, 223, 255, 1)'))\n trace_1 = pgo.Scatter(x=days, y=e, mode='lines', name='e', line=dict(\n color='rgba(200, 100, 0, 1)'))\n trace_2 = pgo.Scatter(x=days, y=i, mode='lines', name='i', line=dict(\n color='rgba(180, 0, 0, 1)'))\n trace_3 = pgo.Scatter(x=days, y=r, mode='lines', name='r', line=dict(\n color='rgba(0, 100, 50, 1)'))\n data = [trace_0, trace_1, trace_2, trace_3]\n graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)\n return graphJSON\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.insert(0, parentdir)\nsys.path.append(os.path.join(parentdir, 'utils'))\n<mask token>\n\n\n@app.route('/')\ndef index():\n result_plot = compute_model_output()\n return render_template('index.html', graphJSON=result_plot)\n\n\ndef compute_model_output():\n num_steps = 500\n init_inf = 5\n t_inc = 5\n t_inf = 9\n r_t = 2.5\n rho = 1.0\n kappa_0 = 0.0\n kappa = 0.0\n n_pop = 2000\n seir = model.SEIRModel(num_steps, n_pop, init_inf, t_inc, t_inf, r_t,\n rho, kappa_0, kappa)\n s, e, i, r = seir.run()\n days = np.linspace(0, num_steps, num_steps)\n trace_0 = pgo.Scatter(x=days, y=s, mode='lines', name='s', line=dict(\n color='rgba(128, 223, 255, 1)'))\n trace_1 = pgo.Scatter(x=days, y=e, mode='lines', name='e', line=dict(\n color='rgba(200, 100, 0, 1)'))\n trace_2 = pgo.Scatter(x=days, y=i, mode='lines', name='i', line=dict(\n color='rgba(180, 0, 0, 1)'))\n trace_3 = pgo.Scatter(x=days, y=r, mode='lines', name='r', line=dict(\n color='rgba(0, 100, 50, 1)'))\n data = [trace_0, trace_1, trace_2, trace_3]\n graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)\n return graphJSON\n\n\n<mask token>\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-3": "<mask token>\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.\n currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0, parentdir)\nsys.path.append(os.path.join(parentdir, 'utils'))\n<mask token>\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n result_plot = compute_model_output()\n return render_template('index.html', graphJSON=result_plot)\n\n\ndef compute_model_output():\n num_steps = 500\n init_inf = 5\n t_inc = 5\n t_inf = 9\n r_t = 2.5\n rho = 1.0\n kappa_0 = 0.0\n kappa = 0.0\n n_pop = 2000\n seir = model.SEIRModel(num_steps, n_pop, init_inf, t_inc, t_inf, r_t,\n rho, kappa_0, kappa)\n s, e, i, r = seir.run()\n days = np.linspace(0, num_steps, num_steps)\n trace_0 = pgo.Scatter(x=days, y=s, mode='lines', name='s', line=dict(\n color='rgba(128, 223, 255, 1)'))\n trace_1 = pgo.Scatter(x=days, y=e, mode='lines', name='e', line=dict(\n color='rgba(200, 100, 0, 1)'))\n trace_2 = pgo.Scatter(x=days, y=i, mode='lines', name='i', line=dict(\n color='rgba(180, 0, 0, 1)'))\n trace_3 = pgo.Scatter(x=days, y=r, mode='lines', name='r', line=dict(\n color='rgba(0, 100, 50, 1)'))\n data = [trace_0, trace_1, trace_2, trace_3]\n graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)\n return graphJSON\n\n\n<mask token>\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "import os, sys, inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.\n currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0, parentdir)\nsys.path.append(os.path.join(parentdir, 'utils'))\nfrom flask import Flask, render_template\nimport numpy as np\nimport plotly\nimport plotly.graph_objs as pgo\nimport json\nfrom utils import model\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n result_plot = compute_model_output()\n return render_template('index.html', graphJSON=result_plot)\n\n\ndef compute_model_output():\n num_steps = 500\n init_inf = 5\n t_inc = 5\n t_inf = 9\n r_t = 2.5\n rho = 1.0\n kappa_0 = 0.0\n kappa = 0.0\n n_pop = 2000\n seir = model.SEIRModel(num_steps, n_pop, init_inf, t_inc, t_inf, r_t,\n rho, kappa_0, kappa)\n s, e, i, r = seir.run()\n days = np.linspace(0, num_steps, num_steps)\n trace_0 = pgo.Scatter(x=days, y=s, mode='lines', name='s', line=dict(\n color='rgba(128, 223, 255, 1)'))\n trace_1 = pgo.Scatter(x=days, y=e, mode='lines', name='e', line=dict(\n color='rgba(200, 100, 0, 1)'))\n trace_2 = pgo.Scatter(x=days, y=i, mode='lines', name='i', line=dict(\n color='rgba(180, 0, 0, 1)'))\n trace_3 = pgo.Scatter(x=days, y=r, mode='lines', name='r', line=dict(\n color='rgba(0, 100, 50, 1)'))\n data = [trace_0, trace_1, trace_2, trace_3]\n graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)\n return graphJSON\n\n\n<mask token>\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "# Set up path references and dependencies.\nimport os, sys, inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0, parentdir)\nsys.path.append(os.path.join(parentdir, \"utils\"))\n\n# Import important helper libraries.\nfrom flask import Flask, render_template\nimport numpy as np\n\nimport plotly\nimport plotly.graph_objs as pgo\nimport json\n\n# Import modules created to serve the project.\n#from utils import DB_interface as DBI\n#from utils import path_config as pc\nfrom utils import model\n\napp = Flask(__name__)\n\n# Global variable\n#DAYS = 500\n\n@app.route('/')\ndef index():\n result_plot = compute_model_output()\n return render_template(\"index.html\", graphJSON=result_plot)\n\ndef compute_model_output():\n num_steps = 500\n init_inf = 5\n t_inc = 5\n t_inf = 9\n r_t = 2.5 #np.random.normal(2.5, 1.0)\n rho = 1.0\n kappa_0 = 0.0\n kappa = 0.0\n\n n_pop = 2000\n\n seir = model.SEIRModel(num_steps,n_pop, init_inf, t_inc, t_inf, r_t, rho, kappa_0, kappa)\n\n s, e, i, r = seir.run()\n\n days = np.linspace(0, num_steps, num_steps)\n\n trace_0 = pgo.Scatter(x=days, y=s, mode='lines', name='s', line=dict(color='rgba(128, 223, 255, 1)'))\n trace_1 = pgo.Scatter(x=days, y=e, mode='lines', name='e', line=dict(color='rgba(200, 100, 0, 1)'))\n trace_2 = pgo.Scatter(x=days, y=i, mode='lines', name='i', line=dict(color='rgba(180, 0, 0, 1)'))\n trace_3 = pgo.Scatter(x=days, y=r, mode='lines', name='r', line=dict(color='rgba(0, 100, 50, 1)'))\n\n data = [trace_0, trace_1, trace_2, trace_3]\n\n graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)\n\n return (graphJSON)\n\n\"\"\"\n@app.callback(\n Output('test','children')\n [Input('val_num_steps', 'num_steps')]\n)\n\n\n@app.route('/start_bckgrnd_update')\ndef start_bckgrnd_update():\n p = Process(target=bckgrnd_update, name=\"background_update\")\n p.start()\n #p.join()\n now = datetime.now()\n user = {'username': 'MSE!'}\n posts = [\n {\n 'author': {'username': 'Paul'},\n 'body': 'Henrik has the update just been started?'\n },\n {\n 'author': {'username': 'Henrik'},\n 'body': 'You bet your sweet ass it has!'\n },\n {\n 'author': {'username': 'Paul'},\n 'body': 'So what time was is when it started?'\n },\n {\n 'author': {'username': 'Henrik'},\n 'body': 'It was exactly %s !' % now\n }\n\n ]\n return render_template(\"start_bckgrnd_update.html\", title=\"home\", user = user, posts=posts)\n\ndef bckgrnd_update():\n global updating\n updating = True\n while updating:\n print(datetime.now())\n print(\"updating RKI DBs now\")\n DB = DBI.DB_interface()\n DB.update_RKI_csv()\n DB.update_RKI_landkreise_csv()\n day = 24 * 3600\n time.sleep(day)\n\"\"\"\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def truncateFile(fileName):
fileTemp = open(fileName, 'w')
fileTemp.truncate()
fileTemp.close()
def getConnBySecName(dbConf, secName):
descSec = ''
secs = dbConf.sections()
for sec in secs:
if sec == secName:
descSec = sec
conn = getConnOne(dbConf, descSec)
return conn
<|reserved_special_token_0|>
def getConnOne(dbConf, sec):
host = dbConf.get(sec, 'host')
port = dbConf.getint(sec, 'port')
user = dbConf.get(sec, 'user')
password = dbConf.get(sec, 'password')
database = dbConf.get(sec, 'database')
conn = commonutil.getConn(host, port, user, password, database)
if conn is None:
time.sleep(5)
conn = commonutil.getConn(host, port, user, password, database)
if conn is None:
commonutil.fatal('fail to get connection : sec=[{0}]'.format(sec))
return conn
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def readFile(fileName):
infile = open(fileName, 'r')
content = infile.read()
infile.close()
return content
def truncateFile(fileName):
fileTemp = open(fileName, 'w')
fileTemp.truncate()
fileTemp.close()
def getConnBySecName(dbConf, secName):
descSec = ''
secs = dbConf.sections()
for sec in secs:
if sec == secName:
descSec = sec
conn = getConnOne(dbConf, descSec)
return conn
<|reserved_special_token_0|>
def getConnOne(dbConf, sec):
host = dbConf.get(sec, 'host')
port = dbConf.getint(sec, 'port')
user = dbConf.get(sec, 'user')
password = dbConf.get(sec, 'password')
database = dbConf.get(sec, 'database')
conn = commonutil.getConn(host, port, user, password, database)
if conn is None:
time.sleep(5)
conn = commonutil.getConn(host, port, user, password, database)
if conn is None:
commonutil.fatal('fail to get connection : sec=[{0}]'.format(sec))
return conn
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getConf(cfgfile):
config = ConfigParser.ConfigParser()
with open(cfgfile, 'r') as cfgfile:
config.readfp(cfgfile)
return config
def readFile(fileName):
infile = open(fileName, 'r')
content = infile.read()
infile.close()
return content
def truncateFile(fileName):
fileTemp = open(fileName, 'w')
fileTemp.truncate()
fileTemp.close()
def getConnBySecName(dbConf, secName):
descSec = ''
secs = dbConf.sections()
for sec in secs:
if sec == secName:
descSec = sec
conn = getConnOne(dbConf, descSec)
return conn
def getConns(dbConf):
secs = dbConf.sections()
conns = {}
for sec in secs:
conn = getConnOne(dbConf, sec)
if conn is not None:
conns[sec] = conn
return conns
def getConnOne(dbConf, sec):
host = dbConf.get(sec, 'host')
port = dbConf.getint(sec, 'port')
user = dbConf.get(sec, 'user')
password = dbConf.get(sec, 'password')
database = dbConf.get(sec, 'database')
conn = commonutil.getConn(host, port, user, password, database)
if conn is None:
time.sleep(5)
conn = commonutil.getConn(host, port, user, password, database)
if conn is None:
commonutil.fatal('fail to get connection : sec=[{0}]'.format(sec))
return conn
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from datetime import datetime, timedelta
from optparse import OptionParser
import argparse
import ConfigParser
import sys
import os
import time
import commonutil
def getConf(cfgfile):
config = ConfigParser.ConfigParser()
with open(cfgfile, 'r') as cfgfile:
config.readfp(cfgfile)
return config
def readFile(fileName):
infile = open(fileName, 'r')
content = infile.read()
infile.close()
return content
def truncateFile(fileName):
fileTemp = open(fileName, 'w')
fileTemp.truncate()
fileTemp.close()
def getConnBySecName(dbConf, secName):
descSec = ''
secs = dbConf.sections()
for sec in secs:
if sec == secName:
descSec = sec
conn = getConnOne(dbConf, descSec)
return conn
def getConns(dbConf):
secs = dbConf.sections()
conns = {}
for sec in secs:
conn = getConnOne(dbConf, sec)
if conn is not None:
conns[sec] = conn
return conns
def getConnOne(dbConf, sec):
host = dbConf.get(sec, 'host')
port = dbConf.getint(sec, 'port')
user = dbConf.get(sec, 'user')
password = dbConf.get(sec, 'password')
database = dbConf.get(sec, 'database')
conn = commonutil.getConn(host, port, user, password, database)
if conn is None:
time.sleep(5)
conn = commonutil.getConn(host, port, user, password, database)
if conn is None:
commonutil.fatal('fail to get connection : sec=[{0}]'.format(sec))
return conn
<|reserved_special_token_1|>
# -*- coding: utf8 -*-
'''
dump data from mysql/hive to load into mysql
'''
from datetime import datetime,timedelta
from optparse import OptionParser
import argparse
import ConfigParser
import sys
import os
import time
import commonutil
def getConf(cfgfile):
config = ConfigParser.ConfigParser()
with open(cfgfile, 'r') as cfgfile:
config.readfp(cfgfile)
return config
def readFile(fileName):
infile = open(fileName, "r")
content = infile.read()
infile.close()
return content
def truncateFile(fileName):
fileTemp = open(fileName, "w")
fileTemp.truncate()
fileTemp.close()
def getConnBySecName(dbConf,secName):
descSec = ''
secs = dbConf.sections()
for sec in secs:
if sec == secName:
descSec = sec
conn = getConnOne(dbConf,descSec)
return conn
def getConns(dbConf):
secs = dbConf.sections()
conns = {}
for sec in secs:
conn = getConnOne(dbConf, sec)
if conn is not None:
conns[sec]=conn
return conns
def getConnOne(dbConf, sec):
host = dbConf.get(sec,"host")
port = dbConf.getint(sec,"port")
user = dbConf.get(sec,"user")
password = dbConf.get(sec,"password")
database = dbConf.get(sec,"database")
conn = commonutil.getConn(host, port, user, password, database)
if conn is None:
time.sleep(5)
conn = commonutil.getConn(host, port, user, password, database)
if conn is None:
commonutil.fatal("fail to get connection : sec=[{0}]".format(sec))
return conn
|
flexible
|
{
"blob_id": "df984939c109662bebbd1556c12223fce8f643e6",
"index": 1773,
"step-1": "<mask token>\n\n\ndef truncateFile(fileName):\n fileTemp = open(fileName, 'w')\n fileTemp.truncate()\n fileTemp.close()\n\n\ndef getConnBySecName(dbConf, secName):\n descSec = ''\n secs = dbConf.sections()\n for sec in secs:\n if sec == secName:\n descSec = sec\n conn = getConnOne(dbConf, descSec)\n return conn\n\n\n<mask token>\n\n\ndef getConnOne(dbConf, sec):\n host = dbConf.get(sec, 'host')\n port = dbConf.getint(sec, 'port')\n user = dbConf.get(sec, 'user')\n password = dbConf.get(sec, 'password')\n database = dbConf.get(sec, 'database')\n conn = commonutil.getConn(host, port, user, password, database)\n if conn is None:\n time.sleep(5)\n conn = commonutil.getConn(host, port, user, password, database)\n if conn is None:\n commonutil.fatal('fail to get connection : sec=[{0}]'.format(sec))\n return conn\n",
"step-2": "<mask token>\n\n\ndef readFile(fileName):\n infile = open(fileName, 'r')\n content = infile.read()\n infile.close()\n return content\n\n\ndef truncateFile(fileName):\n fileTemp = open(fileName, 'w')\n fileTemp.truncate()\n fileTemp.close()\n\n\ndef getConnBySecName(dbConf, secName):\n descSec = ''\n secs = dbConf.sections()\n for sec in secs:\n if sec == secName:\n descSec = sec\n conn = getConnOne(dbConf, descSec)\n return conn\n\n\n<mask token>\n\n\ndef getConnOne(dbConf, sec):\n host = dbConf.get(sec, 'host')\n port = dbConf.getint(sec, 'port')\n user = dbConf.get(sec, 'user')\n password = dbConf.get(sec, 'password')\n database = dbConf.get(sec, 'database')\n conn = commonutil.getConn(host, port, user, password, database)\n if conn is None:\n time.sleep(5)\n conn = commonutil.getConn(host, port, user, password, database)\n if conn is None:\n commonutil.fatal('fail to get connection : sec=[{0}]'.format(sec))\n return conn\n",
"step-3": "<mask token>\n\n\ndef getConf(cfgfile):\n config = ConfigParser.ConfigParser()\n with open(cfgfile, 'r') as cfgfile:\n config.readfp(cfgfile)\n return config\n\n\ndef readFile(fileName):\n infile = open(fileName, 'r')\n content = infile.read()\n infile.close()\n return content\n\n\ndef truncateFile(fileName):\n fileTemp = open(fileName, 'w')\n fileTemp.truncate()\n fileTemp.close()\n\n\ndef getConnBySecName(dbConf, secName):\n descSec = ''\n secs = dbConf.sections()\n for sec in secs:\n if sec == secName:\n descSec = sec\n conn = getConnOne(dbConf, descSec)\n return conn\n\n\ndef getConns(dbConf):\n secs = dbConf.sections()\n conns = {}\n for sec in secs:\n conn = getConnOne(dbConf, sec)\n if conn is not None:\n conns[sec] = conn\n return conns\n\n\ndef getConnOne(dbConf, sec):\n host = dbConf.get(sec, 'host')\n port = dbConf.getint(sec, 'port')\n user = dbConf.get(sec, 'user')\n password = dbConf.get(sec, 'password')\n database = dbConf.get(sec, 'database')\n conn = commonutil.getConn(host, port, user, password, database)\n if conn is None:\n time.sleep(5)\n conn = commonutil.getConn(host, port, user, password, database)\n if conn is None:\n commonutil.fatal('fail to get connection : sec=[{0}]'.format(sec))\n return conn\n",
"step-4": "<mask token>\nfrom datetime import datetime, timedelta\nfrom optparse import OptionParser\nimport argparse\nimport ConfigParser\nimport sys\nimport os\nimport time\nimport commonutil\n\n\ndef getConf(cfgfile):\n config = ConfigParser.ConfigParser()\n with open(cfgfile, 'r') as cfgfile:\n config.readfp(cfgfile)\n return config\n\n\ndef readFile(fileName):\n infile = open(fileName, 'r')\n content = infile.read()\n infile.close()\n return content\n\n\ndef truncateFile(fileName):\n fileTemp = open(fileName, 'w')\n fileTemp.truncate()\n fileTemp.close()\n\n\ndef getConnBySecName(dbConf, secName):\n descSec = ''\n secs = dbConf.sections()\n for sec in secs:\n if sec == secName:\n descSec = sec\n conn = getConnOne(dbConf, descSec)\n return conn\n\n\ndef getConns(dbConf):\n secs = dbConf.sections()\n conns = {}\n for sec in secs:\n conn = getConnOne(dbConf, sec)\n if conn is not None:\n conns[sec] = conn\n return conns\n\n\ndef getConnOne(dbConf, sec):\n host = dbConf.get(sec, 'host')\n port = dbConf.getint(sec, 'port')\n user = dbConf.get(sec, 'user')\n password = dbConf.get(sec, 'password')\n database = dbConf.get(sec, 'database')\n conn = commonutil.getConn(host, port, user, password, database)\n if conn is None:\n time.sleep(5)\n conn = commonutil.getConn(host, port, user, password, database)\n if conn is None:\n commonutil.fatal('fail to get connection : sec=[{0}]'.format(sec))\n return conn\n",
"step-5": "# -*- coding: utf8 -*-\r\n'''\r\ndump data from mysql/hive to load into mysql\r\n'''\r\nfrom datetime import datetime,timedelta\r\nfrom optparse import OptionParser\r\nimport argparse\r\nimport ConfigParser\r\nimport sys\r\nimport os\r\nimport time\r\nimport commonutil\r\n\r\ndef getConf(cfgfile):\r\n config = ConfigParser.ConfigParser()\r\n with open(cfgfile, 'r') as cfgfile:\r\n config.readfp(cfgfile)\r\n return config\r\n\r\ndef readFile(fileName):\r\n infile = open(fileName, \"r\")\r\n content = infile.read()\r\n infile.close()\r\n return content\r\n\r\ndef truncateFile(fileName):\r\n fileTemp = open(fileName, \"w\")\r\n fileTemp.truncate()\r\n fileTemp.close()\r\n\r\ndef getConnBySecName(dbConf,secName):\r\n descSec = ''\r\n secs = dbConf.sections()\r\n for sec in secs:\r\n if sec == secName:\r\n descSec = sec\r\n conn = getConnOne(dbConf,descSec)\r\n return conn\r\n \r\ndef getConns(dbConf):\r\n secs = dbConf.sections()\r\n conns = {}\r\n for sec in secs:\r\n conn = getConnOne(dbConf, sec)\r\n if conn is not None:\r\n conns[sec]=conn\r\n return conns\r\n\r\ndef getConnOne(dbConf, sec):\r\n host = dbConf.get(sec,\"host\")\r\n port = dbConf.getint(sec,\"port\")\r\n user = dbConf.get(sec,\"user\")\r\n password = dbConf.get(sec,\"password\")\r\n database = dbConf.get(sec,\"database\")\r\n conn = commonutil.getConn(host, port, user, password, database)\r\n if conn is None:\r\n time.sleep(5)\r\n conn = commonutil.getConn(host, port, user, password, database)\r\n if conn is None:\r\n commonutil.fatal(\"fail to get connection : sec=[{0}]\".format(sec))\r\n return conn\r\n \r\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(res.read().decode('utf-8'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
context = ssl._create_unverified_context()
url = (
'https://oauth.51job.com/get_login.php?client_id=000001&redirect_uri=https%3A%2F%2Funion.yingjiesheng.com%2Fapi_login.php&from_domain=yjs_web&display=default&state=7c893ec1be7b355a91bdc3c474087add--368ba30db1d6217cc18f7dfe0bd27a79&partner='
)
headers = {'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'
}
data = {'loginname_encrypt': '/Pjp1Ki1S3j65+QC2J2pkg==', 'password_encrypt':
'hiqxe1qVXCoVuCrSwYM+eg=='}
data = bytes(parse.urlencode(data), 'utf-8')
req = request.Request(url, data=data, headers=headers, method='POST')
res = request.urlopen(req, context=context)
print(res.read().decode('utf-8'))
<|reserved_special_token_1|>
import ssl
import urllib
from urllib import request, response, error, parse, robotparser
context = ssl._create_unverified_context()
url = (
'https://oauth.51job.com/get_login.php?client_id=000001&redirect_uri=https%3A%2F%2Funion.yingjiesheng.com%2Fapi_login.php&from_domain=yjs_web&display=default&state=7c893ec1be7b355a91bdc3c474087add--368ba30db1d6217cc18f7dfe0bd27a79&partner='
)
headers = {'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'
}
data = {'loginname_encrypt': '/Pjp1Ki1S3j65+QC2J2pkg==', 'password_encrypt':
'hiqxe1qVXCoVuCrSwYM+eg=='}
data = bytes(parse.urlencode(data), 'utf-8')
req = request.Request(url, data=data, headers=headers, method='POST')
res = request.urlopen(req, context=context)
print(res.read().decode('utf-8'))
<|reserved_special_token_1|>
import ssl
import urllib
from urllib import request, response, error, parse, robotparser
context = ssl._create_unverified_context()
url = 'https://oauth.51job.com/get_login.php?client_id=000001&redirect_uri=https%3A%2F%2Funion.yingjiesheng.com%2Fapi_login.php&from_domain=yjs_web&display=default&state=7c893ec1be7b355a91bdc3c474087add--368ba30db1d6217cc18f7dfe0bd27a79&partner='
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'
}
data = {
'loginname_encrypt': '/Pjp1Ki1S3j65+QC2J2pkg==',
'password_encrypt': 'hiqxe1qVXCoVuCrSwYM+eg=='
}
data = bytes(parse.urlencode(data), 'utf-8')
req = request.Request(url, data=data, headers=headers, method='POST')
res = request.urlopen(req, context=context)
print(res.read().decode('utf-8'))
|
flexible
|
{
"blob_id": "2a37d02c7a0840e855a80adced4794fd757e353a",
"index": 2917,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(res.read().decode('utf-8'))\n",
"step-3": "<mask token>\ncontext = ssl._create_unverified_context()\nurl = (\n 'https://oauth.51job.com/get_login.php?client_id=000001&redirect_uri=https%3A%2F%2Funion.yingjiesheng.com%2Fapi_login.php&from_domain=yjs_web&display=default&state=7c893ec1be7b355a91bdc3c474087add--368ba30db1d6217cc18f7dfe0bd27a79&partner='\n )\nheaders = {'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'\n }\ndata = {'loginname_encrypt': '/Pjp1Ki1S3j65+QC2J2pkg==', 'password_encrypt':\n 'hiqxe1qVXCoVuCrSwYM+eg=='}\ndata = bytes(parse.urlencode(data), 'utf-8')\nreq = request.Request(url, data=data, headers=headers, method='POST')\nres = request.urlopen(req, context=context)\nprint(res.read().decode('utf-8'))\n",
"step-4": "import ssl\nimport urllib\nfrom urllib import request, response, error, parse, robotparser\ncontext = ssl._create_unverified_context()\nurl = (\n 'https://oauth.51job.com/get_login.php?client_id=000001&redirect_uri=https%3A%2F%2Funion.yingjiesheng.com%2Fapi_login.php&from_domain=yjs_web&display=default&state=7c893ec1be7b355a91bdc3c474087add--368ba30db1d6217cc18f7dfe0bd27a79&partner='\n )\nheaders = {'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'\n }\ndata = {'loginname_encrypt': '/Pjp1Ki1S3j65+QC2J2pkg==', 'password_encrypt':\n 'hiqxe1qVXCoVuCrSwYM+eg=='}\ndata = bytes(parse.urlencode(data), 'utf-8')\nreq = request.Request(url, data=data, headers=headers, method='POST')\nres = request.urlopen(req, context=context)\nprint(res.read().decode('utf-8'))\n",
"step-5": "import ssl\nimport urllib\nfrom urllib import request, response, error, parse, robotparser\ncontext = ssl._create_unverified_context()\nurl = 'https://oauth.51job.com/get_login.php?client_id=000001&redirect_uri=https%3A%2F%2Funion.yingjiesheng.com%2Fapi_login.php&from_domain=yjs_web&display=default&state=7c893ec1be7b355a91bdc3c474087add--368ba30db1d6217cc18f7dfe0bd27a79&partner='\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'\n}\ndata = {\n 'loginname_encrypt': '/Pjp1Ki1S3j65+QC2J2pkg==',\n 'password_encrypt': 'hiqxe1qVXCoVuCrSwYM+eg=='\n}\ndata = bytes(parse.urlencode(data), 'utf-8')\nreq = request.Request(url, data=data, headers=headers, method='POST')\nres = request.urlopen(req, context=context)\nprint(res.read().decode('utf-8'))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
'''
quarter = 0.25
dime = 0.10
nickel = 0.05
penny = 0.01
'''
#def poschg(dollar_amount,number):
|
flexible
|
{
"blob_id": "0deec9058c6f7b77ba4fa3bfc0269c8596ce9612",
"index": 1215,
"step-1": "<mask token>\n",
"step-2": "'''\nquarter = 0.25\ndime = 0.10\nnickel = 0.05\npenny = 0.01\n'''\n\n#def poschg(dollar_amount,number):\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
"""
Solution to Codeforces problem 50A
Copyright (c) GeneralMing. All rights reserved.
https://github.com/GeneralMing/codeforces
"""
n = input().split()
n[0] = int(n[0])
n[1] = int(n[1])
print((n[0]*n[1])//2)
|
normal
|
{
"blob_id": "41a80feeb1fdc8ad783706ad261f5fc1124371d6",
"index": 8216,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(n[0] * n[1] // 2)\n",
"step-3": "<mask token>\nn = input().split()\nn[0] = int(n[0])\nn[1] = int(n[1])\nprint(n[0] * n[1] // 2)\n",
"step-4": "\"\"\"\n\tSolution to Codeforces problem 50A\n\tCopyright (c) GeneralMing. All rights reserved.\n\n\thttps://github.com/GeneralMing/codeforces\n\"\"\"\n\nn = input().split()\nn[0] = int(n[0])\nn[1] = int(n[1])\nprint((n[0]*n[1])//2)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#Array In Python
from array import array
numbers = array("i",[1,2,3])
numbers[0] = 0
print(list(numbers))
|
normal
|
{
"blob_id": "ae5f87f1c383478ea5f370af1c85d63a472a7788",
"index": 455,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(list(numbers))\n",
"step-3": "<mask token>\nnumbers = array('i', [1, 2, 3])\nnumbers[0] = 0\nprint(list(numbers))\n",
"step-4": "from array import array\nnumbers = array('i', [1, 2, 3])\nnumbers[0] = 0\nprint(list(numbers))\n",
"step-5": "#Array In Python\n\nfrom array import array\n\nnumbers = array(\"i\",[1,2,3])\nnumbers[0] = 0\nprint(list(numbers))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import json
subjects = []
with open("sub.json", 'r') as subject_file:
subjects = json.load(subject_file)
print(json.dumps(subjects, separators=(',',':')))
|
normal
|
{
"blob_id": "98bd4eb25a76fb9184f9abfcb920a6fbe46b9394",
"index": 631,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('sub.json', 'r') as subject_file:\n subjects = json.load(subject_file)\nprint(json.dumps(subjects, separators=(',', ':')))\n",
"step-3": "<mask token>\nsubjects = []\nwith open('sub.json', 'r') as subject_file:\n subjects = json.load(subject_file)\nprint(json.dumps(subjects, separators=(',', ':')))\n",
"step-4": "import json\nsubjects = []\nwith open('sub.json', 'r') as subject_file:\n subjects = json.load(subject_file)\nprint(json.dumps(subjects, separators=(',', ':')))\n",
"step-5": "import json\n\nsubjects = []\n\nwith open(\"sub.json\", 'r') as subject_file:\n\tsubjects = json.load(subject_file)\n\nprint(json.dumps(subjects, separators=(',',':')))\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .kahfm_batch import KaHFMBatch
|
flexible
|
{
"blob_id": "8e317d4d8ae8dc3d692d237e7e0abfaf37aecbb6",
"index": 7017,
"step-1": "<mask token>\n",
"step-2": "from .kahfm_batch import KaHFMBatch\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def test_createColorVector():
k = 2
no_sec_peak = 1
template = 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE'
kmer_counts = {'EE': 5, 'ES': 7, 'SS': 20, 'SI': 10, 'II': 15, 'IS': 11,
'SB': 5, 'BB': 6, 'BS': 5, 'SH': 4, 'HH': 5, 'HS': 4, 'SE': 7}
template_sTree = STree.STree(template)
normalization_vector1 = None
color_hm = {str(i): (0) for i in range(1, len(template) + 1)}
new_color_hm1, not_matched1, color_domain_max1 = createColorVector(k,
template_sTree, kmer_counts, color_hm, no_sec_peak,
normalization_vector1)
assert len(color_hm) == len(new_color_hm1)
for i in color_hm.keys():
x = color_hm[i]
if x > 0:
assert new_color_hm1[i] == math.log(x, 2)
else:
assert new_color_hm1[i] == 0
assert len(not_matched1) == 0
assert color_domain_max1 == 4.954196310386876
normalization_vector2 = {'EE': 0, 'ES': 0, 'SS': 0.7, 'SI': 0.1, 'II':
0.2, 'IS': 0, 'SB': 0, 'BB': 0, 'BS': 0, 'SH': 0, 'HH': 0, 'HS': 0,
'SE': 0}
color_hm = {str(i): (0) for i in range(1, len(template) + 1)}
new_color_hm2, not_matched2, color_domain_max2 = createColorVector(k,
template_sTree, kmer_counts, color_hm, no_sec_peak,
normalization_vector2)
last_idx = -1
last_kmer = ''
test_color_hm = {str(i): (0) for i in range(1, len(template) + 1)}
for kmer in normalization_vector2:
indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.
escape(kmer)), template)]
indices_list.sort()
norm = normalization_vector2[kmer]
if norm == 0:
norm = 1
for idx in indices_list:
for i in range(0, k):
current_idx = str(idx + i + 1)
if last_idx + 2 == int(current_idx) and last_kmer == kmer:
continue
test_color_hm[current_idx] += kmer_counts[kmer] / norm
last_idx = idx
last_kmer = kmer
test_color_hm = {x: (math.log(y, 2) if y > 0 else y) for x, y in
test_color_hm.items()}
test_color_domain_max = max(test_color_hm.values())
assert new_color_hm1 is not new_color_hm2
assert len(color_hm) == len(new_color_hm2)
assert len(not_matched2) == 0
assert color_domain_max2 == test_color_domain_max
for i in new_color_hm2.keys():
assert new_color_hm2[i] == test_color_hm[i]
kmer_counts2 = {'Ee': 5, 'eS': 7, 'sS': 20, 'Si': 10, 'iI': 15, 'iS':
11, 'Sb': 5, 'Bb': 6, 'bS': 5, 'sH': 4, 'Hh': 5, 'hS': 4, 'Se': 7}
no_sec_peak2 = 0
color_hm = {str(i): (0) for i in range(1, len(template) + 1)}
new_color_hm3, not_matched3, color_domain_max3 = createColorVector(k,
template_sTree, kmer_counts2, color_hm, no_sec_peak2,
normalization_vector2)
test_color_hm2 = {str(i): (0) for i in range(1, len(template) + 1)}
for kmer in kmer_counts2.keys():
indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.
escape(kmer.upper())), template)]
indices_list.sort()
norm = normalization_vector2[kmer.upper()]
if norm == 0:
norm = 1
for idx in indices_list:
idx = [(idx + i) for i in range(0, len(kmer)) if kmer[i].isupper()
][0]
test_color_hm2[str(idx + 1)] += kmer_counts2[kmer] / norm
test_color_hm2 = {x: (math.log(y, 2) if y > 0 else y) for x, y in
test_color_hm2.items()}
test_color_domain_max2 = max(test_color_hm2.values())
assert len(not_matched3) == 0
assert new_color_hm2 is not new_color_hm3
assert len(color_hm) == len(new_color_hm3)
for i in test_color_hm2:
assert test_color_hm2[i] == new_color_hm3[i]
assert test_color_domain_max2 == color_domain_max3
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_createColorVector():
k = 2
no_sec_peak = 1
template = 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE'
kmer_counts = {'EE': 5, 'ES': 7, 'SS': 20, 'SI': 10, 'II': 15, 'IS': 11,
'SB': 5, 'BB': 6, 'BS': 5, 'SH': 4, 'HH': 5, 'HS': 4, 'SE': 7}
template_sTree = STree.STree(template)
normalization_vector1 = None
color_hm = {str(i): (0) for i in range(1, len(template) + 1)}
new_color_hm1, not_matched1, color_domain_max1 = createColorVector(k,
template_sTree, kmer_counts, color_hm, no_sec_peak,
normalization_vector1)
assert len(color_hm) == len(new_color_hm1)
for i in color_hm.keys():
x = color_hm[i]
if x > 0:
assert new_color_hm1[i] == math.log(x, 2)
else:
assert new_color_hm1[i] == 0
assert len(not_matched1) == 0
assert color_domain_max1 == 4.954196310386876
normalization_vector2 = {'EE': 0, 'ES': 0, 'SS': 0.7, 'SI': 0.1, 'II':
0.2, 'IS': 0, 'SB': 0, 'BB': 0, 'BS': 0, 'SH': 0, 'HH': 0, 'HS': 0,
'SE': 0}
color_hm = {str(i): (0) for i in range(1, len(template) + 1)}
new_color_hm2, not_matched2, color_domain_max2 = createColorVector(k,
template_sTree, kmer_counts, color_hm, no_sec_peak,
normalization_vector2)
last_idx = -1
last_kmer = ''
test_color_hm = {str(i): (0) for i in range(1, len(template) + 1)}
for kmer in normalization_vector2:
indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.
escape(kmer)), template)]
indices_list.sort()
norm = normalization_vector2[kmer]
if norm == 0:
norm = 1
for idx in indices_list:
for i in range(0, k):
current_idx = str(idx + i + 1)
if last_idx + 2 == int(current_idx) and last_kmer == kmer:
continue
test_color_hm[current_idx] += kmer_counts[kmer] / norm
last_idx = idx
last_kmer = kmer
test_color_hm = {x: (math.log(y, 2) if y > 0 else y) for x, y in
test_color_hm.items()}
test_color_domain_max = max(test_color_hm.values())
assert new_color_hm1 is not new_color_hm2
assert len(color_hm) == len(new_color_hm2)
assert len(not_matched2) == 0
assert color_domain_max2 == test_color_domain_max
for i in new_color_hm2.keys():
assert new_color_hm2[i] == test_color_hm[i]
kmer_counts2 = {'Ee': 5, 'eS': 7, 'sS': 20, 'Si': 10, 'iI': 15, 'iS':
11, 'Sb': 5, 'Bb': 6, 'bS': 5, 'sH': 4, 'Hh': 5, 'hS': 4, 'Se': 7}
no_sec_peak2 = 0
color_hm = {str(i): (0) for i in range(1, len(template) + 1)}
new_color_hm3, not_matched3, color_domain_max3 = createColorVector(k,
template_sTree, kmer_counts2, color_hm, no_sec_peak2,
normalization_vector2)
test_color_hm2 = {str(i): (0) for i in range(1, len(template) + 1)}
for kmer in kmer_counts2.keys():
indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.
escape(kmer.upper())), template)]
indices_list.sort()
norm = normalization_vector2[kmer.upper()]
if norm == 0:
norm = 1
for idx in indices_list:
idx = [(idx + i) for i in range(0, len(kmer)) if kmer[i].isupper()
][0]
test_color_hm2[str(idx + 1)] += kmer_counts2[kmer] / norm
test_color_hm2 = {x: (math.log(y, 2) if y > 0 else y) for x, y in
test_color_hm2.items()}
test_color_domain_max2 = max(test_color_hm2.values())
assert len(not_matched3) == 0
assert new_color_hm2 is not new_color_hm3
assert len(color_hm) == len(new_color_hm3)
for i in test_color_hm2:
assert test_color_hm2[i] == new_color_hm3[i]
assert test_color_domain_max2 == color_domain_max3
def test_helpAddIBloop():
k = 3
template1 = ['EEE']
internalloop = True
bulge = True
forward = True
new_template1 = helpAddIBloop(k, template1, internalloop, bulge, forward)
template2 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH']
internalloop = True
bulge = True
forward = False
new_template2 = helpAddIBloop(k, template2, internalloop, bulge, forward)
template3_f = ['EEE']
template3_b = ['EEE', 'SSS', 'III', 'SSS', 'HHH']
internalloop = True
bulge = False
forward = True
new_template3_f = helpAddIBloop(k, template3_f, internalloop, bulge,
forward)
forward = False
new_template3_b = helpAddIBloop(k, template3_b, internalloop, bulge,
forward)
template4_f = ['EEE']
template4_b = ['EEE', 'SSS', 'BBB', 'SSS', 'HHH']
internalloop = False
bulge = True
forward = True
new_template4_f = helpAddIBloop(k, template4_f, internalloop, bulge,
forward)
forward = False
new_template4_b = helpAddIBloop(k, template4_b, internalloop, bulge,
forward)
assert new_template1 == ['EEE', 'SSS', 'III', 'SSS', 'BBB']
assert new_template2 == ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS',
'HHH', 'SSS', 'SSS', 'III']
assert new_template3_f == ['EEE', 'SSS', 'III']
assert new_template3_b == ['EEE', 'SSS', 'III', 'SSS', 'HHH', 'SSS', 'III']
assert new_template4_f == ['EEE', 'SSS', 'BBB']
assert new_template4_b == ['EEE', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS']
def test_element2dotbracket():
k3 = 3
k2 = 2
k4 = 4
elem_list1 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS',
'SSS', 'III', 'SSS', 'EEE']
dotbracket_string1 = '...(((...(((...(((...))))))...)))...'
elem_list2 = ['EE', 'SS', 'II', 'SS', 'HH', 'SS', 'II', 'SS', 'MM',
'SS', 'BB', 'SS', 'HH', 'SS', 'SS', 'EE']
dotbracket_string2 = '..((..((..))..))..((..((..))))..'
elem_list3 = ['EEEE', 'SSSS', 'SSSS', 'EEEE']
dotbracket_string3 = '....(((())))....'
elem_list4 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS',
'SSS', 'III', 'SSS', 'MMM', 'SSS', 'HHH', 'SSS', 'EEE']
dotbracket_string4 = '...(((...(((...(((...))))))...)))...(((...)))...'
db1 = []
db1.extend(element2dotbracket(elem_list1, k3, 0, 6, True))
db1.extend(element2dotbracket(elem_list1, k3, 7, len(elem_list1) - 1,
False))
db1 = ''.join(db1)
db2 = []
db2.extend(element2dotbracket(elem_list2, k2, 0, 4, True))
db2.extend(element2dotbracket(elem_list2, k2, 5, 8, False))
db2.extend(element2dotbracket(elem_list2, k2, 9, 12, True))
db2.extend(element2dotbracket(elem_list2, k2, 13, len(elem_list2) - 1,
False))
db2 = ''.join(db2)
db3 = []
db3.extend(element2dotbracket(elem_list3, k4, 0, 1, True))
db3.extend(element2dotbracket(elem_list3, k4, 2, len(elem_list3) - 1,
False))
db3 = ''.join(db3)
db4 = []
db4.extend(element2dotbracket(elem_list4, k3, 0, 6, True))
db4.extend(element2dotbracket(elem_list4, k3, 7, 11, False))
db4.extend(element2dotbracket(elem_list4, k3, 12, 13, True))
db4.extend(element2dotbracket(elem_list4, k3, 14, len(elem_list4) - 1,
False))
db4 = ''.join(db4)
assert db1 == dotbracket_string1
assert db2 == dotbracket_string2
assert db3 == dotbracket_string3
assert db4 == dotbracket_string4
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_processData():
data = ['example/example1.fa', 'example/example2.fa']
struct_data = ['example/exampleStrucData/exampleStructuralData1.fa',
'example/exampleStrucData/exampleStructuralData2.fa']
k = 3
top = 10
peak = None
feature = None
cmd = False
no_sec_peak = 1
process = SecStructure(data, data, k, peak, top, feature, cmd,
struct_data, no_sec_peak)
alphabet1 = process.getStructProfile1().getAlphabet()
alphabet2 = process.getStructProfile2().getAlphabet()
kmer_counts1 = process.getStructProfile1().getProfile()
kmer_counts2 = process.getStructProfile2().getProfile()
results = SecStructure.processData(process)
template1 = results[0][0]
template2 = results[1][0]
dotbracket_string1 = results[0][1]
dotbracket_string2 = results[1][1]
assert len(alphabet1) == 6
for e in ['S', 'H', 'B', 'I', 'M', 'E']:
assert e in alphabet1
assert len(alphabet2) == 2
assert 'S' in alphabet2
assert 'E' in alphabet2
assert kmer_counts1 == {'EE': 4, 'ES': 1, 'SS': 11, 'SH': 1, 'HH': 3,
'II': 4, 'IS': 1, 'SM': 1, 'MM': 1, 'BB': 4, 'BS': 1}
assert kmer_counts2 == {'SS': 20, 'EE': 7, 'ES': 3, 'SE': 2}
assert template1 == 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSMMMSSSHHHSSSEEE'
assert dotbracket_string1 == '...(((...(((...(((...))))))...)))...(((...)))...'
assert template2 == 'EEESSSSSSEEE'
assert dotbracket_string2 == '...((()))...'
no_sec_peak = 0
process2 = SecStructure(data, data, k, peak, top, feature, cmd,
struct_data, no_sec_peak)
alphabet1 = process2.getStructProfile1().getAlphabet()
alphabet2 = process2.getStructProfile2().getAlphabet()
kmer_counts1 = process2.getStructProfile1().getProfile()
kmer_counts2 = process2.getStructProfile2().getProfile()
results = SecStructure.processData(process2)
template1 = results[0][0]
template2 = results[1][0]
dotbracket_string1 = results[0][1]
dotbracket_string2 = results[1][1]
assert len(alphabet1) == 10
for e in ['s', 'h', 'b', 'i', 'm', 'E', 'S', 'B', 'I', 'E']:
assert e in alphabet1
assert len(alphabet2) == 4
for e in ['s', 'S', 'e', 'E']:
assert e in alphabet2
assert kmer_counts1 == {'eE': 1, 'Es': 1, 'sS': 1, 'Sh': 1, 'iI': 1,
'Is': 1, 'bB': 1, 'Bs': 1}
assert kmer_counts2 == {'sS': 3, 'Ss': 2, 'sE': 1, 'Ee': 1, 'Se': 1}
assert template1 == 'EEESSSIIISSSBBBSSSSSSSSSIIISSSEEE'
assert dotbracket_string1 == '...(((...(((...((())))))...)))...'
assert template2 == 'EEESSSSSSEEE'
assert dotbracket_string2 == '...((()))...'
sProfile1 = process.getStructProfile1()
sProfile2 = process.getStructProfile2()
alphabet3 = ['S', 'B', 'E']
alphabet4 = ['S', 'I', 'E']
sProfile1.setAlphabet(alphabet3)
sProfile2.setAlphabet(alphabet4)
results = SecStructure.processData(process)
template1 = results[0][0]
template2 = results[1][0]
dotbracket_string1 = results[0][1]
dotbracket_string2 = results[1][1]
assert template1 == 'EEESSSBBBSSSSSSSSSEEE'
assert dotbracket_string1 == '...(((...((())))))...'
assert template2 == 'EEESSSIIISSSSSSIIISSSEEE'
assert dotbracket_string2 == '...(((...((()))...)))...'
alphabet5 = ['S', 'H', 'E']
alphabet6 = ['S', 'H', 'M', 'E']
sProfile1.setAlphabet(alphabet5)
sProfile2.setAlphabet(alphabet6)
results = SecStructure.processData(process)
template1 = results[0][0]
template2 = results[1][0]
dotbracket_string1 = results[0][1]
dotbracket_string2 = results[1][1]
assert template1 == 'EEESSSHHHSSSEEE'
assert dotbracket_string1 == '...(((...)))...'
assert template2 == 'EEESSSHHHSSSMMMSSSHHHSSSEEE'
assert dotbracket_string2 == '...(((...)))...(((...)))...'
alphabet7 = ['S', 'H', 'E', 'B', 'I']
alphabet8 = ['S', 'M', 'E']
sProfile1.setAlphabet(alphabet7)
sProfile2.setAlphabet(alphabet8)
results = SecStructure.processData(process)
template1 = results[0][0]
template2 = results[1][0]
dotbracket_string1 = results[0][1]
dotbracket_string2 = results[1][1]
assert template1 == 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE'
assert dotbracket_string1 == '...(((...(((...(((...))))))...)))...'
assert template2 == 'EEESSSSSSEEE'
assert dotbracket_string2 == '...((()))...'
def test_createColorVector():
k = 2
no_sec_peak = 1
template = 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE'
kmer_counts = {'EE': 5, 'ES': 7, 'SS': 20, 'SI': 10, 'II': 15, 'IS': 11,
'SB': 5, 'BB': 6, 'BS': 5, 'SH': 4, 'HH': 5, 'HS': 4, 'SE': 7}
template_sTree = STree.STree(template)
normalization_vector1 = None
color_hm = {str(i): (0) for i in range(1, len(template) + 1)}
new_color_hm1, not_matched1, color_domain_max1 = createColorVector(k,
template_sTree, kmer_counts, color_hm, no_sec_peak,
normalization_vector1)
assert len(color_hm) == len(new_color_hm1)
for i in color_hm.keys():
x = color_hm[i]
if x > 0:
assert new_color_hm1[i] == math.log(x, 2)
else:
assert new_color_hm1[i] == 0
assert len(not_matched1) == 0
assert color_domain_max1 == 4.954196310386876
normalization_vector2 = {'EE': 0, 'ES': 0, 'SS': 0.7, 'SI': 0.1, 'II':
0.2, 'IS': 0, 'SB': 0, 'BB': 0, 'BS': 0, 'SH': 0, 'HH': 0, 'HS': 0,
'SE': 0}
color_hm = {str(i): (0) for i in range(1, len(template) + 1)}
new_color_hm2, not_matched2, color_domain_max2 = createColorVector(k,
template_sTree, kmer_counts, color_hm, no_sec_peak,
normalization_vector2)
last_idx = -1
last_kmer = ''
test_color_hm = {str(i): (0) for i in range(1, len(template) + 1)}
for kmer in normalization_vector2:
indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.
escape(kmer)), template)]
indices_list.sort()
norm = normalization_vector2[kmer]
if norm == 0:
norm = 1
for idx in indices_list:
for i in range(0, k):
current_idx = str(idx + i + 1)
if last_idx + 2 == int(current_idx) and last_kmer == kmer:
continue
test_color_hm[current_idx] += kmer_counts[kmer] / norm
last_idx = idx
last_kmer = kmer
test_color_hm = {x: (math.log(y, 2) if y > 0 else y) for x, y in
test_color_hm.items()}
test_color_domain_max = max(test_color_hm.values())
assert new_color_hm1 is not new_color_hm2
assert len(color_hm) == len(new_color_hm2)
assert len(not_matched2) == 0
assert color_domain_max2 == test_color_domain_max
for i in new_color_hm2.keys():
assert new_color_hm2[i] == test_color_hm[i]
kmer_counts2 = {'Ee': 5, 'eS': 7, 'sS': 20, 'Si': 10, 'iI': 15, 'iS':
11, 'Sb': 5, 'Bb': 6, 'bS': 5, 'sH': 4, 'Hh': 5, 'hS': 4, 'Se': 7}
no_sec_peak2 = 0
color_hm = {str(i): (0) for i in range(1, len(template) + 1)}
new_color_hm3, not_matched3, color_domain_max3 = createColorVector(k,
template_sTree, kmer_counts2, color_hm, no_sec_peak2,
normalization_vector2)
test_color_hm2 = {str(i): (0) for i in range(1, len(template) + 1)}
for kmer in kmer_counts2.keys():
indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.
escape(kmer.upper())), template)]
indices_list.sort()
norm = normalization_vector2[kmer.upper()]
if norm == 0:
norm = 1
for idx in indices_list:
idx = [(idx + i) for i in range(0, len(kmer)) if kmer[i].isupper()
][0]
test_color_hm2[str(idx + 1)] += kmer_counts2[kmer] / norm
test_color_hm2 = {x: (math.log(y, 2) if y > 0 else y) for x, y in
test_color_hm2.items()}
test_color_domain_max2 = max(test_color_hm2.values())
assert len(not_matched3) == 0
assert new_color_hm2 is not new_color_hm3
assert len(color_hm) == len(new_color_hm3)
for i in test_color_hm2:
assert test_color_hm2[i] == new_color_hm3[i]
assert test_color_domain_max2 == color_domain_max3
def test_helpAddIBloop():
k = 3
template1 = ['EEE']
internalloop = True
bulge = True
forward = True
new_template1 = helpAddIBloop(k, template1, internalloop, bulge, forward)
template2 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH']
internalloop = True
bulge = True
forward = False
new_template2 = helpAddIBloop(k, template2, internalloop, bulge, forward)
template3_f = ['EEE']
template3_b = ['EEE', 'SSS', 'III', 'SSS', 'HHH']
internalloop = True
bulge = False
forward = True
new_template3_f = helpAddIBloop(k, template3_f, internalloop, bulge,
forward)
forward = False
new_template3_b = helpAddIBloop(k, template3_b, internalloop, bulge,
forward)
template4_f = ['EEE']
template4_b = ['EEE', 'SSS', 'BBB', 'SSS', 'HHH']
internalloop = False
bulge = True
forward = True
new_template4_f = helpAddIBloop(k, template4_f, internalloop, bulge,
forward)
forward = False
new_template4_b = helpAddIBloop(k, template4_b, internalloop, bulge,
forward)
assert new_template1 == ['EEE', 'SSS', 'III', 'SSS', 'BBB']
assert new_template2 == ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS',
'HHH', 'SSS', 'SSS', 'III']
assert new_template3_f == ['EEE', 'SSS', 'III']
assert new_template3_b == ['EEE', 'SSS', 'III', 'SSS', 'HHH', 'SSS', 'III']
assert new_template4_f == ['EEE', 'SSS', 'BBB']
assert new_template4_b == ['EEE', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS']
def test_element2dotbracket():
k3 = 3
k2 = 2
k4 = 4
elem_list1 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS',
'SSS', 'III', 'SSS', 'EEE']
dotbracket_string1 = '...(((...(((...(((...))))))...)))...'
elem_list2 = ['EE', 'SS', 'II', 'SS', 'HH', 'SS', 'II', 'SS', 'MM',
'SS', 'BB', 'SS', 'HH', 'SS', 'SS', 'EE']
dotbracket_string2 = '..((..((..))..))..((..((..))))..'
elem_list3 = ['EEEE', 'SSSS', 'SSSS', 'EEEE']
dotbracket_string3 = '....(((())))....'
elem_list4 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS',
'SSS', 'III', 'SSS', 'MMM', 'SSS', 'HHH', 'SSS', 'EEE']
dotbracket_string4 = '...(((...(((...(((...))))))...)))...(((...)))...'
db1 = []
db1.extend(element2dotbracket(elem_list1, k3, 0, 6, True))
db1.extend(element2dotbracket(elem_list1, k3, 7, len(elem_list1) - 1,
False))
db1 = ''.join(db1)
db2 = []
db2.extend(element2dotbracket(elem_list2, k2, 0, 4, True))
db2.extend(element2dotbracket(elem_list2, k2, 5, 8, False))
db2.extend(element2dotbracket(elem_list2, k2, 9, 12, True))
db2.extend(element2dotbracket(elem_list2, k2, 13, len(elem_list2) - 1,
False))
db2 = ''.join(db2)
db3 = []
db3.extend(element2dotbracket(elem_list3, k4, 0, 1, True))
db3.extend(element2dotbracket(elem_list3, k4, 2, len(elem_list3) - 1,
False))
db3 = ''.join(db3)
db4 = []
db4.extend(element2dotbracket(elem_list4, k3, 0, 6, True))
db4.extend(element2dotbracket(elem_list4, k3, 7, 11, False))
db4.extend(element2dotbracket(elem_list4, k3, 12, 13, True))
db4.extend(element2dotbracket(elem_list4, k3, 14, len(elem_list4) - 1,
False))
db4 = ''.join(db4)
assert db1 == dotbracket_string1
assert db2 == dotbracket_string2
assert db3 == dotbracket_string3
assert db4 == dotbracket_string4
<|reserved_special_token_1|>
from src.secStructure import *
from suffix_trees import STree
import math
import re
def test_processData():
data = ['example/example1.fa', 'example/example2.fa']
struct_data = ['example/exampleStrucData/exampleStructuralData1.fa',
'example/exampleStrucData/exampleStructuralData2.fa']
k = 3
top = 10
peak = None
feature = None
cmd = False
no_sec_peak = 1
process = SecStructure(data, data, k, peak, top, feature, cmd,
struct_data, no_sec_peak)
alphabet1 = process.getStructProfile1().getAlphabet()
alphabet2 = process.getStructProfile2().getAlphabet()
kmer_counts1 = process.getStructProfile1().getProfile()
kmer_counts2 = process.getStructProfile2().getProfile()
results = SecStructure.processData(process)
template1 = results[0][0]
template2 = results[1][0]
dotbracket_string1 = results[0][1]
dotbracket_string2 = results[1][1]
assert len(alphabet1) == 6
for e in ['S', 'H', 'B', 'I', 'M', 'E']:
assert e in alphabet1
assert len(alphabet2) == 2
assert 'S' in alphabet2
assert 'E' in alphabet2
assert kmer_counts1 == {'EE': 4, 'ES': 1, 'SS': 11, 'SH': 1, 'HH': 3,
'II': 4, 'IS': 1, 'SM': 1, 'MM': 1, 'BB': 4, 'BS': 1}
assert kmer_counts2 == {'SS': 20, 'EE': 7, 'ES': 3, 'SE': 2}
assert template1 == 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSMMMSSSHHHSSSEEE'
assert dotbracket_string1 == '...(((...(((...(((...))))))...)))...(((...)))...'
assert template2 == 'EEESSSSSSEEE'
assert dotbracket_string2 == '...((()))...'
no_sec_peak = 0
process2 = SecStructure(data, data, k, peak, top, feature, cmd,
struct_data, no_sec_peak)
alphabet1 = process2.getStructProfile1().getAlphabet()
alphabet2 = process2.getStructProfile2().getAlphabet()
kmer_counts1 = process2.getStructProfile1().getProfile()
kmer_counts2 = process2.getStructProfile2().getProfile()
results = SecStructure.processData(process2)
template1 = results[0][0]
template2 = results[1][0]
dotbracket_string1 = results[0][1]
dotbracket_string2 = results[1][1]
assert len(alphabet1) == 10
for e in ['s', 'h', 'b', 'i', 'm', 'E', 'S', 'B', 'I', 'E']:
assert e in alphabet1
assert len(alphabet2) == 4
for e in ['s', 'S', 'e', 'E']:
assert e in alphabet2
assert kmer_counts1 == {'eE': 1, 'Es': 1, 'sS': 1, 'Sh': 1, 'iI': 1,
'Is': 1, 'bB': 1, 'Bs': 1}
assert kmer_counts2 == {'sS': 3, 'Ss': 2, 'sE': 1, 'Ee': 1, 'Se': 1}
assert template1 == 'EEESSSIIISSSBBBSSSSSSSSSIIISSSEEE'
assert dotbracket_string1 == '...(((...(((...((())))))...)))...'
assert template2 == 'EEESSSSSSEEE'
assert dotbracket_string2 == '...((()))...'
sProfile1 = process.getStructProfile1()
sProfile2 = process.getStructProfile2()
alphabet3 = ['S', 'B', 'E']
alphabet4 = ['S', 'I', 'E']
sProfile1.setAlphabet(alphabet3)
sProfile2.setAlphabet(alphabet4)
results = SecStructure.processData(process)
template1 = results[0][0]
template2 = results[1][0]
dotbracket_string1 = results[0][1]
dotbracket_string2 = results[1][1]
assert template1 == 'EEESSSBBBSSSSSSSSSEEE'
assert dotbracket_string1 == '...(((...((())))))...'
assert template2 == 'EEESSSIIISSSSSSIIISSSEEE'
assert dotbracket_string2 == '...(((...((()))...)))...'
alphabet5 = ['S', 'H', 'E']
alphabet6 = ['S', 'H', 'M', 'E']
sProfile1.setAlphabet(alphabet5)
sProfile2.setAlphabet(alphabet6)
results = SecStructure.processData(process)
template1 = results[0][0]
template2 = results[1][0]
dotbracket_string1 = results[0][1]
dotbracket_string2 = results[1][1]
assert template1 == 'EEESSSHHHSSSEEE'
assert dotbracket_string1 == '...(((...)))...'
assert template2 == 'EEESSSHHHSSSMMMSSSHHHSSSEEE'
assert dotbracket_string2 == '...(((...)))...(((...)))...'
alphabet7 = ['S', 'H', 'E', 'B', 'I']
alphabet8 = ['S', 'M', 'E']
sProfile1.setAlphabet(alphabet7)
sProfile2.setAlphabet(alphabet8)
results = SecStructure.processData(process)
template1 = results[0][0]
template2 = results[1][0]
dotbracket_string1 = results[0][1]
dotbracket_string2 = results[1][1]
assert template1 == 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE'
assert dotbracket_string1 == '...(((...(((...(((...))))))...)))...'
assert template2 == 'EEESSSSSSEEE'
assert dotbracket_string2 == '...((()))...'
def test_createColorVector():
k = 2
no_sec_peak = 1
template = 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE'
kmer_counts = {'EE': 5, 'ES': 7, 'SS': 20, 'SI': 10, 'II': 15, 'IS': 11,
'SB': 5, 'BB': 6, 'BS': 5, 'SH': 4, 'HH': 5, 'HS': 4, 'SE': 7}
template_sTree = STree.STree(template)
normalization_vector1 = None
color_hm = {str(i): (0) for i in range(1, len(template) + 1)}
new_color_hm1, not_matched1, color_domain_max1 = createColorVector(k,
template_sTree, kmer_counts, color_hm, no_sec_peak,
normalization_vector1)
assert len(color_hm) == len(new_color_hm1)
for i in color_hm.keys():
x = color_hm[i]
if x > 0:
assert new_color_hm1[i] == math.log(x, 2)
else:
assert new_color_hm1[i] == 0
assert len(not_matched1) == 0
assert color_domain_max1 == 4.954196310386876
normalization_vector2 = {'EE': 0, 'ES': 0, 'SS': 0.7, 'SI': 0.1, 'II':
0.2, 'IS': 0, 'SB': 0, 'BB': 0, 'BS': 0, 'SH': 0, 'HH': 0, 'HS': 0,
'SE': 0}
color_hm = {str(i): (0) for i in range(1, len(template) + 1)}
new_color_hm2, not_matched2, color_domain_max2 = createColorVector(k,
template_sTree, kmer_counts, color_hm, no_sec_peak,
normalization_vector2)
last_idx = -1
last_kmer = ''
test_color_hm = {str(i): (0) for i in range(1, len(template) + 1)}
for kmer in normalization_vector2:
indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.
escape(kmer)), template)]
indices_list.sort()
norm = normalization_vector2[kmer]
if norm == 0:
norm = 1
for idx in indices_list:
for i in range(0, k):
current_idx = str(idx + i + 1)
if last_idx + 2 == int(current_idx) and last_kmer == kmer:
continue
test_color_hm[current_idx] += kmer_counts[kmer] / norm
last_idx = idx
last_kmer = kmer
test_color_hm = {x: (math.log(y, 2) if y > 0 else y) for x, y in
test_color_hm.items()}
test_color_domain_max = max(test_color_hm.values())
assert new_color_hm1 is not new_color_hm2
assert len(color_hm) == len(new_color_hm2)
assert len(not_matched2) == 0
assert color_domain_max2 == test_color_domain_max
for i in new_color_hm2.keys():
assert new_color_hm2[i] == test_color_hm[i]
kmer_counts2 = {'Ee': 5, 'eS': 7, 'sS': 20, 'Si': 10, 'iI': 15, 'iS':
11, 'Sb': 5, 'Bb': 6, 'bS': 5, 'sH': 4, 'Hh': 5, 'hS': 4, 'Se': 7}
no_sec_peak2 = 0
color_hm = {str(i): (0) for i in range(1, len(template) + 1)}
new_color_hm3, not_matched3, color_domain_max3 = createColorVector(k,
template_sTree, kmer_counts2, color_hm, no_sec_peak2,
normalization_vector2)
test_color_hm2 = {str(i): (0) for i in range(1, len(template) + 1)}
for kmer in kmer_counts2.keys():
indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.
escape(kmer.upper())), template)]
indices_list.sort()
norm = normalization_vector2[kmer.upper()]
if norm == 0:
norm = 1
for idx in indices_list:
idx = [(idx + i) for i in range(0, len(kmer)) if kmer[i].isupper()
][0]
test_color_hm2[str(idx + 1)] += kmer_counts2[kmer] / norm
test_color_hm2 = {x: (math.log(y, 2) if y > 0 else y) for x, y in
test_color_hm2.items()}
test_color_domain_max2 = max(test_color_hm2.values())
assert len(not_matched3) == 0
assert new_color_hm2 is not new_color_hm3
assert len(color_hm) == len(new_color_hm3)
for i in test_color_hm2:
assert test_color_hm2[i] == new_color_hm3[i]
assert test_color_domain_max2 == color_domain_max3
def test_helpAddIBloop():
k = 3
template1 = ['EEE']
internalloop = True
bulge = True
forward = True
new_template1 = helpAddIBloop(k, template1, internalloop, bulge, forward)
template2 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH']
internalloop = True
bulge = True
forward = False
new_template2 = helpAddIBloop(k, template2, internalloop, bulge, forward)
template3_f = ['EEE']
template3_b = ['EEE', 'SSS', 'III', 'SSS', 'HHH']
internalloop = True
bulge = False
forward = True
new_template3_f = helpAddIBloop(k, template3_f, internalloop, bulge,
forward)
forward = False
new_template3_b = helpAddIBloop(k, template3_b, internalloop, bulge,
forward)
template4_f = ['EEE']
template4_b = ['EEE', 'SSS', 'BBB', 'SSS', 'HHH']
internalloop = False
bulge = True
forward = True
new_template4_f = helpAddIBloop(k, template4_f, internalloop, bulge,
forward)
forward = False
new_template4_b = helpAddIBloop(k, template4_b, internalloop, bulge,
forward)
assert new_template1 == ['EEE', 'SSS', 'III', 'SSS', 'BBB']
assert new_template2 == ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS',
'HHH', 'SSS', 'SSS', 'III']
assert new_template3_f == ['EEE', 'SSS', 'III']
assert new_template3_b == ['EEE', 'SSS', 'III', 'SSS', 'HHH', 'SSS', 'III']
assert new_template4_f == ['EEE', 'SSS', 'BBB']
assert new_template4_b == ['EEE', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS']
def test_element2dotbracket():
k3 = 3
k2 = 2
k4 = 4
elem_list1 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS',
'SSS', 'III', 'SSS', 'EEE']
dotbracket_string1 = '...(((...(((...(((...))))))...)))...'
elem_list2 = ['EE', 'SS', 'II', 'SS', 'HH', 'SS', 'II', 'SS', 'MM',
'SS', 'BB', 'SS', 'HH', 'SS', 'SS', 'EE']
dotbracket_string2 = '..((..((..))..))..((..((..))))..'
elem_list3 = ['EEEE', 'SSSS', 'SSSS', 'EEEE']
dotbracket_string3 = '....(((())))....'
elem_list4 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS',
'SSS', 'III', 'SSS', 'MMM', 'SSS', 'HHH', 'SSS', 'EEE']
dotbracket_string4 = '...(((...(((...(((...))))))...)))...(((...)))...'
db1 = []
db1.extend(element2dotbracket(elem_list1, k3, 0, 6, True))
db1.extend(element2dotbracket(elem_list1, k3, 7, len(elem_list1) - 1,
False))
db1 = ''.join(db1)
db2 = []
db2.extend(element2dotbracket(elem_list2, k2, 0, 4, True))
db2.extend(element2dotbracket(elem_list2, k2, 5, 8, False))
db2.extend(element2dotbracket(elem_list2, k2, 9, 12, True))
db2.extend(element2dotbracket(elem_list2, k2, 13, len(elem_list2) - 1,
False))
db2 = ''.join(db2)
db3 = []
db3.extend(element2dotbracket(elem_list3, k4, 0, 1, True))
db3.extend(element2dotbracket(elem_list3, k4, 2, len(elem_list3) - 1,
False))
db3 = ''.join(db3)
db4 = []
db4.extend(element2dotbracket(elem_list4, k3, 0, 6, True))
db4.extend(element2dotbracket(elem_list4, k3, 7, 11, False))
db4.extend(element2dotbracket(elem_list4, k3, 12, 13, True))
db4.extend(element2dotbracket(elem_list4, k3, 14, len(elem_list4) - 1,
False))
db4 = ''.join(db4)
assert db1 == dotbracket_string1
assert db2 == dotbracket_string2
assert db3 == dotbracket_string3
assert db4 == dotbracket_string4
<|reserved_special_token_1|>
from src.secStructure import *
from suffix_trees import STree
import math
import re
def test_processData():
# Test1: ignoring peak position
data = ['example/example1.fa', 'example/example2.fa']
struct_data = ['example/exampleStrucData/exampleStructuralData1.fa',
'example/exampleStrucData/exampleStructuralData2.fa']
k = 3
top = 10
peak = None
feature = None
cmd = False
no_sec_peak = 1 # True
# Executing
process = SecStructure(data, data, k, peak, top, feature, cmd, struct_data, no_sec_peak)
alphabet1 = process.getStructProfile1().getAlphabet()
alphabet2 = process.getStructProfile2().getAlphabet()
kmer_counts1 = process.getStructProfile1().getProfile()
kmer_counts2 = process.getStructProfile2().getProfile()
results = SecStructure.processData(process)
template1 = results[0][0]
template2 = results[1][0]
dotbracket_string1 = results[0][1]
dotbracket_string2 = results[1][1]
# Testing
assert len(alphabet1) == 6
for e in ["S", "H", "B", "I", "M", "E"]:
assert e in alphabet1
assert len(alphabet2) == 2
assert "S" in alphabet2
assert "E" in alphabet2
assert kmer_counts1 == {'EE': 4, 'ES': 1, 'SS': 11, 'SH': 1, 'HH': 3, 'II': 4, 'IS': 1, 'SM': 1, 'MM': 1, 'BB': 4,
'BS': 1}
assert kmer_counts2 == {'SS': 20, 'EE': 7, 'ES': 3, 'SE': 2}
assert template1 == "EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSMMMSSSHHHSSSEEE"
assert dotbracket_string1 == "...(((...(((...(((...))))))...)))...(((...)))..."
assert template2 == "EEESSSSSSEEE"
assert dotbracket_string2 == "...((()))..."
# Test2: with peak position
no_sec_peak = 0 # True
# Executing
process2 = SecStructure(data, data, k, peak, top, feature, cmd, struct_data, no_sec_peak)
alphabet1 = process2.getStructProfile1().getAlphabet()
alphabet2 = process2.getStructProfile2().getAlphabet()
kmer_counts1 = process2.getStructProfile1().getProfile()
kmer_counts2 = process2.getStructProfile2().getProfile()
results = SecStructure.processData(process2)
template1 = results[0][0]
template2 = results[1][0]
dotbracket_string1 = results[0][1]
dotbracket_string2 = results[1][1]
# Testing
assert len(alphabet1) == 10
for e in ["s", "h", "b", "i", "m", "E", "S", "B", "I", "E"]:
assert e in alphabet1
assert len(alphabet2) == 4
for e in ["s", "S", "e", "E"]:
assert e in alphabet2
assert kmer_counts1 == {'eE': 1, 'Es': 1, 'sS': 1, 'Sh': 1, 'iI': 1, 'Is': 1, 'bB': 1, 'Bs': 1}
assert kmer_counts2 == {'sS': 3, 'Ss': 2, 'sE': 1, 'Ee': 1, 'Se': 1}
assert template1 == "EEESSSIIISSSBBBSSSSSSSSSIIISSSEEE"
assert dotbracket_string1 == "...(((...(((...((())))))...)))..."
assert template2 == "EEESSSSSSEEE"
assert dotbracket_string2 == "...((()))..."
# Test3: different alphabets
sProfile1 = process.getStructProfile1()
sProfile2 = process.getStructProfile2()
# Test3a: alphabets with no multiloop
alphabet3 = ["S", "B", "E"]
alphabet4 = ["S", "I", "E"]
sProfile1.setAlphabet(alphabet3)
sProfile2.setAlphabet(alphabet4)
results = SecStructure.processData(process)
template1 = results[0][0]
template2 = results[1][0]
dotbracket_string1 = results[0][1]
dotbracket_string2 = results[1][1]
assert template1 == "EEESSSBBBSSSSSSSSSEEE"
assert dotbracket_string1 == "...(((...((())))))..."
assert template2 == "EEESSSIIISSSSSSIIISSSEEE"
assert dotbracket_string2 == "...(((...((()))...)))..."
# Test3b: alphabets with only hairpin or hairpin and multiloop
alphabet5 = ["S", "H", "E"]
alphabet6 = ["S", "H", "M", "E"]
sProfile1.setAlphabet(alphabet5)
sProfile2.setAlphabet(alphabet6)
results = SecStructure.processData(process)
template1 = results[0][0]
template2 = results[1][0]
dotbracket_string1 = results[0][1]
dotbracket_string2 = results[1][1]
assert template1 == "EEESSSHHHSSSEEE"
assert dotbracket_string1 == "...(((...)))..."
assert template2 == "EEESSSHHHSSSMMMSSSHHHSSSEEE"
assert dotbracket_string2 == "...(((...)))...(((...)))..."
# Test3c: ('flawed') alphabets with no multiloops
alphabet7 = ["S", "H", "E", "B", "I"]
alphabet8 = ["S", "M", "E"] # should be equal to ["S","E"]
sProfile1.setAlphabet(alphabet7)
sProfile2.setAlphabet(alphabet8)
results = SecStructure.processData(process)
template1 = results[0][0]
template2 = results[1][0]
dotbracket_string1 = results[0][1]
dotbracket_string2 = results[1][1]
assert template1 == "EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE"
assert dotbracket_string1 == "...(((...(((...(((...))))))...)))..."
assert template2 == "EEESSSSSSEEE"
assert dotbracket_string2 == "...((()))..."
def test_createColorVector():
# Test1: no normalization vector wanted
k = 2
no_sec_peak = 1
template = "EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE"
kmer_counts = {"EE": 5, "ES": 7, "SS": 20, "SI": 10, "II": 15, "IS": 11, "SB": 5, "BB": 6, "BS": 5, "SH": 4,
"HH": 5, "HS": 4, "SE": 7}
template_sTree = STree.STree(template)
normalization_vector1 = None
color_hm = {str(i): 0 for i in range(1, len(template) + 1)}
# Executing
new_color_hm1, not_matched1, color_domain_max1 = createColorVector(k, template_sTree, kmer_counts, color_hm,
no_sec_peak, normalization_vector1)
assert len(color_hm) == len(new_color_hm1)
for i in color_hm.keys():
x = color_hm[i]
if x > 0:
assert new_color_hm1[i] == math.log(x, 2)
else:
assert new_color_hm1[i] == 0
assert len(not_matched1) == 0
assert color_domain_max1 == 4.954196310386876
# Test2: with normalization vector
normalization_vector2 = {"EE": 0, "ES": 0, "SS": 0.7, "SI": 0.1, "II": 0.2, "IS": 0, "SB": 0, "BB": 0, "BS": 0,
"SH": 0, "HH": 0, "HS": 0, "SE": 0}
# Execution
color_hm = {str(i): 0 for i in range(1, len(template) + 1)}
new_color_hm2, not_matched2, color_domain_max2 = createColorVector(k, template_sTree, kmer_counts, color_hm,
no_sec_peak, normalization_vector2)
last_idx = -1
last_kmer = ""
test_color_hm = {str(i): 0 for i in range(1, len(template) + 1)}
for kmer in normalization_vector2:
indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.escape(kmer)), template)]
indices_list.sort()
norm = normalization_vector2[kmer]
if norm == 0:
norm = 1
for idx in indices_list:
for i in range(0, k):
current_idx = str(idx + i + 1)
if last_idx + 2 == int(current_idx) and last_kmer == kmer:
continue
test_color_hm[current_idx] += (kmer_counts[kmer] / norm)
last_idx = idx
last_kmer = kmer
test_color_hm = {x: math.log(y, 2) if y > 0 else y for x, y in test_color_hm.items()}
test_color_domain_max = max(test_color_hm.values())
# Testing
assert new_color_hm1 is not new_color_hm2
assert len(color_hm) == len(new_color_hm2)
assert len(not_matched2) == 0
assert color_domain_max2 == test_color_domain_max
for i in new_color_hm2.keys():
assert new_color_hm2[i] == test_color_hm[i]
# Test3: normalization vector and secondary peak position
kmer_counts2 = {"Ee": 5, "eS": 7, "sS": 20, "Si": 10, "iI": 15, "iS": 11, "Sb": 5, "Bb": 6, "bS": 5, "sH": 4,
"Hh": 5, "hS": 4, "Se": 7}
no_sec_peak2 = 0
# Execution
color_hm = {str(i): 0 for i in range(1, len(template) + 1)}
new_color_hm3, not_matched3, color_domain_max3 = createColorVector(k, template_sTree, kmer_counts2, color_hm,
no_sec_peak2, normalization_vector2)
test_color_hm2 = {str(i): 0 for i in range(1, len(template) + 1)}
for kmer in kmer_counts2.keys():
indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.escape(kmer.upper())), template)]
indices_list.sort()
norm = normalization_vector2[kmer.upper()]
if norm == 0:
norm = 1
for idx in indices_list:
# use only peak-position in 2-mer for visualization
idx = [idx + i for i in range(0, len(kmer)) if kmer[i].isupper()][0]
test_color_hm2[str(idx + 1)] += (kmer_counts2[kmer] / norm)
test_color_hm2 = {x: math.log(y, 2) if y > 0 else y for x, y in test_color_hm2.items()}
test_color_domain_max2 = max(test_color_hm2.values())
# Testing
assert len(not_matched3) == 0
assert new_color_hm2 is not new_color_hm3
assert len(color_hm) == len(new_color_hm3)
for i in test_color_hm2:
assert test_color_hm2[i] == new_color_hm3[i]
assert test_color_domain_max2 == color_domain_max3
def test_helpAddIBloop():
k = 3
# Test 1: forward and all true
template1 = ["EEE"]
internalloop = True
bulge = True
forward = True
# Execution
new_template1 = helpAddIBloop(k, template1, internalloop, bulge, forward)
# Test 2: backward and all true
template2 = ["EEE", "SSS", "III", "SSS", "BBB", "SSS", "HHH"]
internalloop = True
bulge = True
forward = False
# Execution
new_template2 = helpAddIBloop(k, template2, internalloop, bulge, forward)
# Test 3: only internal loops, forward and backward
template3_f = ["EEE"]
template3_b = ["EEE", "SSS", "III", "SSS", "HHH"]
internalloop = True
bulge = False
forward = True
# Execution
new_template3_f = helpAddIBloop(k, template3_f, internalloop, bulge, forward)
forward = False
new_template3_b = helpAddIBloop(k, template3_b, internalloop, bulge, forward)
# Test 4: only bulges, forward and backward
template4_f = ["EEE"]
template4_b = ["EEE", "SSS", "BBB", "SSS", "HHH"]
internalloop = False
bulge = True
forward = True
# Execution
new_template4_f = helpAddIBloop(k, template4_f, internalloop, bulge, forward)
forward = False
new_template4_b = helpAddIBloop(k, template4_b, internalloop, bulge, forward)
# Testing
assert new_template1 == ["EEE", "SSS", "III", "SSS", "BBB"]
assert new_template2 == ["EEE", "SSS", "III", "SSS", "BBB", "SSS", "HHH", "SSS", "SSS", "III"]
assert new_template3_f == ["EEE", "SSS", "III"]
assert new_template3_b == ["EEE", "SSS", "III", "SSS", "HHH", "SSS", "III"]
assert new_template4_f == ["EEE", "SSS", "BBB"]
assert new_template4_b == ["EEE", "SSS", "BBB", "SSS", "HHH", "SSS"]
def test_element2dotbracket():
k3 = 3
k2 = 2
k4 = 4
# Test1 without multiloop
elem_list1 = ["EEE", "SSS", "III", "SSS", "BBB", "SSS", "HHH", "SSS", "SSS", "III", "SSS", "EEE"]
dotbracket_string1 = "...(((...(((...(((...))))))...)))..."
# Test2 with multiloop
elem_list2 = ["EE", "SS", "II", "SS", "HH", "SS", "II", "SS", "MM", "SS", "BB", "SS", "HH", "SS", "SS", "EE"]
dotbracket_string2 = "..((..((..))..))..((..((..)))).."
# Test 3 without loops
elem_list3 = ["EEEE", "SSSS", "SSSS", "EEEE"]
dotbracket_string3 = "....(((())))...."
# Test 5 with everything
elem_list4 = ["EEE", "SSS", "III", "SSS", "BBB", "SSS", "HHH", "SSS", "SSS", "III", "SSS", "MMM", "SSS", "HHH",
"SSS", "EEE"]
dotbracket_string4 = "...(((...(((...(((...))))))...)))...(((...)))..."
# Execution
db1 = []
db1.extend(element2dotbracket(elem_list1, k3, 0, 6, True))
db1.extend(element2dotbracket(elem_list1, k3, 7, len(elem_list1) - 1, False))
db1 = ''.join(db1)
db2 = []
db2.extend(element2dotbracket(elem_list2, k2, 0, 4, True))
db2.extend(element2dotbracket(elem_list2, k2, 5, 8, False))
db2.extend(element2dotbracket(elem_list2, k2, 9, 12, True))
db2.extend(element2dotbracket(elem_list2, k2, 13, len(elem_list2) - 1, False))
db2 = ''.join(db2)
db3 = []
db3.extend(element2dotbracket(elem_list3, k4, 0, 1, True))
db3.extend(element2dotbracket(elem_list3, k4, 2, len(elem_list3) - 1, False))
db3 = ''.join(db3)
db4 = []
db4.extend(element2dotbracket(elem_list4, k3, 0, 6, True))
db4.extend(element2dotbracket(elem_list4, k3, 7, 11, False))
db4.extend(element2dotbracket(elem_list4, k3, 12, 13, True))
db4.extend(element2dotbracket(elem_list4, k3, 14, len(elem_list4) - 1, False))
db4 = ''.join(db4)
# testing
assert db1 == dotbracket_string1
assert db2 == dotbracket_string2
assert db3 == dotbracket_string3
assert db4 == dotbracket_string4
|
flexible
|
{
"blob_id": "60b1a77d2de4a52ae9597f88917c4a3996c99923",
"index": 5626,
"step-1": "<mask token>\n\n\ndef test_createColorVector():\n k = 2\n no_sec_peak = 1\n template = 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE'\n kmer_counts = {'EE': 5, 'ES': 7, 'SS': 20, 'SI': 10, 'II': 15, 'IS': 11,\n 'SB': 5, 'BB': 6, 'BS': 5, 'SH': 4, 'HH': 5, 'HS': 4, 'SE': 7}\n template_sTree = STree.STree(template)\n normalization_vector1 = None\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm1, not_matched1, color_domain_max1 = createColorVector(k,\n template_sTree, kmer_counts, color_hm, no_sec_peak,\n normalization_vector1)\n assert len(color_hm) == len(new_color_hm1)\n for i in color_hm.keys():\n x = color_hm[i]\n if x > 0:\n assert new_color_hm1[i] == math.log(x, 2)\n else:\n assert new_color_hm1[i] == 0\n assert len(not_matched1) == 0\n assert color_domain_max1 == 4.954196310386876\n normalization_vector2 = {'EE': 0, 'ES': 0, 'SS': 0.7, 'SI': 0.1, 'II': \n 0.2, 'IS': 0, 'SB': 0, 'BB': 0, 'BS': 0, 'SH': 0, 'HH': 0, 'HS': 0,\n 'SE': 0}\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm2, not_matched2, color_domain_max2 = createColorVector(k,\n template_sTree, kmer_counts, color_hm, no_sec_peak,\n normalization_vector2)\n last_idx = -1\n last_kmer = ''\n test_color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n for kmer in normalization_vector2:\n indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.\n escape(kmer)), template)]\n indices_list.sort()\n norm = normalization_vector2[kmer]\n if norm == 0:\n norm = 1\n for idx in indices_list:\n for i in range(0, k):\n current_idx = str(idx + i + 1)\n if last_idx + 2 == int(current_idx) and last_kmer == kmer:\n continue\n test_color_hm[current_idx] += kmer_counts[kmer] / norm\n last_idx = idx\n last_kmer = kmer\n test_color_hm = {x: (math.log(y, 2) if y > 0 else y) for x, y in\n test_color_hm.items()}\n test_color_domain_max = max(test_color_hm.values())\n assert new_color_hm1 is not new_color_hm2\n assert len(color_hm) == len(new_color_hm2)\n assert len(not_matched2) == 0\n assert color_domain_max2 == test_color_domain_max\n for i in new_color_hm2.keys():\n assert new_color_hm2[i] == test_color_hm[i]\n kmer_counts2 = {'Ee': 5, 'eS': 7, 'sS': 20, 'Si': 10, 'iI': 15, 'iS': \n 11, 'Sb': 5, 'Bb': 6, 'bS': 5, 'sH': 4, 'Hh': 5, 'hS': 4, 'Se': 7}\n no_sec_peak2 = 0\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm3, not_matched3, color_domain_max3 = createColorVector(k,\n template_sTree, kmer_counts2, color_hm, no_sec_peak2,\n normalization_vector2)\n test_color_hm2 = {str(i): (0) for i in range(1, len(template) + 1)}\n for kmer in kmer_counts2.keys():\n indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.\n escape(kmer.upper())), template)]\n indices_list.sort()\n norm = normalization_vector2[kmer.upper()]\n if norm == 0:\n norm = 1\n for idx in indices_list:\n idx = [(idx + i) for i in range(0, len(kmer)) if kmer[i].isupper()\n ][0]\n test_color_hm2[str(idx + 1)] += kmer_counts2[kmer] / norm\n test_color_hm2 = {x: (math.log(y, 2) if y > 0 else y) for x, y in\n test_color_hm2.items()}\n test_color_domain_max2 = max(test_color_hm2.values())\n assert len(not_matched3) == 0\n assert new_color_hm2 is not new_color_hm3\n assert len(color_hm) == len(new_color_hm3)\n for i in test_color_hm2:\n assert test_color_hm2[i] == new_color_hm3[i]\n assert test_color_domain_max2 == color_domain_max3\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_createColorVector():\n k = 2\n no_sec_peak = 1\n template = 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE'\n kmer_counts = {'EE': 5, 'ES': 7, 'SS': 20, 'SI': 10, 'II': 15, 'IS': 11,\n 'SB': 5, 'BB': 6, 'BS': 5, 'SH': 4, 'HH': 5, 'HS': 4, 'SE': 7}\n template_sTree = STree.STree(template)\n normalization_vector1 = None\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm1, not_matched1, color_domain_max1 = createColorVector(k,\n template_sTree, kmer_counts, color_hm, no_sec_peak,\n normalization_vector1)\n assert len(color_hm) == len(new_color_hm1)\n for i in color_hm.keys():\n x = color_hm[i]\n if x > 0:\n assert new_color_hm1[i] == math.log(x, 2)\n else:\n assert new_color_hm1[i] == 0\n assert len(not_matched1) == 0\n assert color_domain_max1 == 4.954196310386876\n normalization_vector2 = {'EE': 0, 'ES': 0, 'SS': 0.7, 'SI': 0.1, 'II': \n 0.2, 'IS': 0, 'SB': 0, 'BB': 0, 'BS': 0, 'SH': 0, 'HH': 0, 'HS': 0,\n 'SE': 0}\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm2, not_matched2, color_domain_max2 = createColorVector(k,\n template_sTree, kmer_counts, color_hm, no_sec_peak,\n normalization_vector2)\n last_idx = -1\n last_kmer = ''\n test_color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n for kmer in normalization_vector2:\n indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.\n escape(kmer)), template)]\n indices_list.sort()\n norm = normalization_vector2[kmer]\n if norm == 0:\n norm = 1\n for idx in indices_list:\n for i in range(0, k):\n current_idx = str(idx + i + 1)\n if last_idx + 2 == int(current_idx) and last_kmer == kmer:\n continue\n test_color_hm[current_idx] += kmer_counts[kmer] / norm\n last_idx = idx\n last_kmer = kmer\n test_color_hm = {x: (math.log(y, 2) if y > 0 else y) for x, y in\n test_color_hm.items()}\n test_color_domain_max = max(test_color_hm.values())\n assert new_color_hm1 is not new_color_hm2\n assert len(color_hm) == len(new_color_hm2)\n assert len(not_matched2) == 0\n assert color_domain_max2 == test_color_domain_max\n for i in new_color_hm2.keys():\n assert new_color_hm2[i] == test_color_hm[i]\n kmer_counts2 = {'Ee': 5, 'eS': 7, 'sS': 20, 'Si': 10, 'iI': 15, 'iS': \n 11, 'Sb': 5, 'Bb': 6, 'bS': 5, 'sH': 4, 'Hh': 5, 'hS': 4, 'Se': 7}\n no_sec_peak2 = 0\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm3, not_matched3, color_domain_max3 = createColorVector(k,\n template_sTree, kmer_counts2, color_hm, no_sec_peak2,\n normalization_vector2)\n test_color_hm2 = {str(i): (0) for i in range(1, len(template) + 1)}\n for kmer in kmer_counts2.keys():\n indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.\n escape(kmer.upper())), template)]\n indices_list.sort()\n norm = normalization_vector2[kmer.upper()]\n if norm == 0:\n norm = 1\n for idx in indices_list:\n idx = [(idx + i) for i in range(0, len(kmer)) if kmer[i].isupper()\n ][0]\n test_color_hm2[str(idx + 1)] += kmer_counts2[kmer] / norm\n test_color_hm2 = {x: (math.log(y, 2) if y > 0 else y) for x, y in\n test_color_hm2.items()}\n test_color_domain_max2 = max(test_color_hm2.values())\n assert len(not_matched3) == 0\n assert new_color_hm2 is not new_color_hm3\n assert len(color_hm) == len(new_color_hm3)\n for i in test_color_hm2:\n assert test_color_hm2[i] == new_color_hm3[i]\n assert test_color_domain_max2 == color_domain_max3\n\n\ndef test_helpAddIBloop():\n k = 3\n template1 = ['EEE']\n internalloop = True\n bulge = True\n forward = True\n new_template1 = helpAddIBloop(k, template1, internalloop, bulge, forward)\n template2 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH']\n internalloop = True\n bulge = True\n forward = False\n new_template2 = helpAddIBloop(k, template2, internalloop, bulge, forward)\n template3_f = ['EEE']\n template3_b = ['EEE', 'SSS', 'III', 'SSS', 'HHH']\n internalloop = True\n bulge = False\n forward = True\n new_template3_f = helpAddIBloop(k, template3_f, internalloop, bulge,\n forward)\n forward = False\n new_template3_b = helpAddIBloop(k, template3_b, internalloop, bulge,\n forward)\n template4_f = ['EEE']\n template4_b = ['EEE', 'SSS', 'BBB', 'SSS', 'HHH']\n internalloop = False\n bulge = True\n forward = True\n new_template4_f = helpAddIBloop(k, template4_f, internalloop, bulge,\n forward)\n forward = False\n new_template4_b = helpAddIBloop(k, template4_b, internalloop, bulge,\n forward)\n assert new_template1 == ['EEE', 'SSS', 'III', 'SSS', 'BBB']\n assert new_template2 == ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS',\n 'HHH', 'SSS', 'SSS', 'III']\n assert new_template3_f == ['EEE', 'SSS', 'III']\n assert new_template3_b == ['EEE', 'SSS', 'III', 'SSS', 'HHH', 'SSS', 'III']\n assert new_template4_f == ['EEE', 'SSS', 'BBB']\n assert new_template4_b == ['EEE', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS']\n\n\ndef test_element2dotbracket():\n k3 = 3\n k2 = 2\n k4 = 4\n elem_list1 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS',\n 'SSS', 'III', 'SSS', 'EEE']\n dotbracket_string1 = '...(((...(((...(((...))))))...)))...'\n elem_list2 = ['EE', 'SS', 'II', 'SS', 'HH', 'SS', 'II', 'SS', 'MM',\n 'SS', 'BB', 'SS', 'HH', 'SS', 'SS', 'EE']\n dotbracket_string2 = '..((..((..))..))..((..((..))))..'\n elem_list3 = ['EEEE', 'SSSS', 'SSSS', 'EEEE']\n dotbracket_string3 = '....(((())))....'\n elem_list4 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS',\n 'SSS', 'III', 'SSS', 'MMM', 'SSS', 'HHH', 'SSS', 'EEE']\n dotbracket_string4 = '...(((...(((...(((...))))))...)))...(((...)))...'\n db1 = []\n db1.extend(element2dotbracket(elem_list1, k3, 0, 6, True))\n db1.extend(element2dotbracket(elem_list1, k3, 7, len(elem_list1) - 1, \n False))\n db1 = ''.join(db1)\n db2 = []\n db2.extend(element2dotbracket(elem_list2, k2, 0, 4, True))\n db2.extend(element2dotbracket(elem_list2, k2, 5, 8, False))\n db2.extend(element2dotbracket(elem_list2, k2, 9, 12, True))\n db2.extend(element2dotbracket(elem_list2, k2, 13, len(elem_list2) - 1, \n False))\n db2 = ''.join(db2)\n db3 = []\n db3.extend(element2dotbracket(elem_list3, k4, 0, 1, True))\n db3.extend(element2dotbracket(elem_list3, k4, 2, len(elem_list3) - 1, \n False))\n db3 = ''.join(db3)\n db4 = []\n db4.extend(element2dotbracket(elem_list4, k3, 0, 6, True))\n db4.extend(element2dotbracket(elem_list4, k3, 7, 11, False))\n db4.extend(element2dotbracket(elem_list4, k3, 12, 13, True))\n db4.extend(element2dotbracket(elem_list4, k3, 14, len(elem_list4) - 1, \n False))\n db4 = ''.join(db4)\n assert db1 == dotbracket_string1\n assert db2 == dotbracket_string2\n assert db3 == dotbracket_string3\n assert db4 == dotbracket_string4\n",
"step-3": "<mask token>\n\n\ndef test_processData():\n data = ['example/example1.fa', 'example/example2.fa']\n struct_data = ['example/exampleStrucData/exampleStructuralData1.fa',\n 'example/exampleStrucData/exampleStructuralData2.fa']\n k = 3\n top = 10\n peak = None\n feature = None\n cmd = False\n no_sec_peak = 1\n process = SecStructure(data, data, k, peak, top, feature, cmd,\n struct_data, no_sec_peak)\n alphabet1 = process.getStructProfile1().getAlphabet()\n alphabet2 = process.getStructProfile2().getAlphabet()\n kmer_counts1 = process.getStructProfile1().getProfile()\n kmer_counts2 = process.getStructProfile2().getProfile()\n results = SecStructure.processData(process)\n template1 = results[0][0]\n template2 = results[1][0]\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n assert len(alphabet1) == 6\n for e in ['S', 'H', 'B', 'I', 'M', 'E']:\n assert e in alphabet1\n assert len(alphabet2) == 2\n assert 'S' in alphabet2\n assert 'E' in alphabet2\n assert kmer_counts1 == {'EE': 4, 'ES': 1, 'SS': 11, 'SH': 1, 'HH': 3,\n 'II': 4, 'IS': 1, 'SM': 1, 'MM': 1, 'BB': 4, 'BS': 1}\n assert kmer_counts2 == {'SS': 20, 'EE': 7, 'ES': 3, 'SE': 2}\n assert template1 == 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSMMMSSSHHHSSSEEE'\n assert dotbracket_string1 == '...(((...(((...(((...))))))...)))...(((...)))...'\n assert template2 == 'EEESSSSSSEEE'\n assert dotbracket_string2 == '...((()))...'\n no_sec_peak = 0\n process2 = SecStructure(data, data, k, peak, top, feature, cmd,\n struct_data, no_sec_peak)\n alphabet1 = process2.getStructProfile1().getAlphabet()\n alphabet2 = process2.getStructProfile2().getAlphabet()\n kmer_counts1 = process2.getStructProfile1().getProfile()\n kmer_counts2 = process2.getStructProfile2().getProfile()\n results = SecStructure.processData(process2)\n template1 = results[0][0]\n template2 = results[1][0]\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n assert len(alphabet1) == 10\n for e in ['s', 'h', 'b', 'i', 'm', 'E', 'S', 'B', 'I', 'E']:\n assert e in alphabet1\n assert len(alphabet2) == 4\n for e in ['s', 'S', 'e', 'E']:\n assert e in alphabet2\n assert kmer_counts1 == {'eE': 1, 'Es': 1, 'sS': 1, 'Sh': 1, 'iI': 1,\n 'Is': 1, 'bB': 1, 'Bs': 1}\n assert kmer_counts2 == {'sS': 3, 'Ss': 2, 'sE': 1, 'Ee': 1, 'Se': 1}\n assert template1 == 'EEESSSIIISSSBBBSSSSSSSSSIIISSSEEE'\n assert dotbracket_string1 == '...(((...(((...((())))))...)))...'\n assert template2 == 'EEESSSSSSEEE'\n assert dotbracket_string2 == '...((()))...'\n sProfile1 = process.getStructProfile1()\n sProfile2 = process.getStructProfile2()\n alphabet3 = ['S', 'B', 'E']\n alphabet4 = ['S', 'I', 'E']\n sProfile1.setAlphabet(alphabet3)\n sProfile2.setAlphabet(alphabet4)\n results = SecStructure.processData(process)\n template1 = results[0][0]\n template2 = results[1][0]\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n assert template1 == 'EEESSSBBBSSSSSSSSSEEE'\n assert dotbracket_string1 == '...(((...((())))))...'\n assert template2 == 'EEESSSIIISSSSSSIIISSSEEE'\n assert dotbracket_string2 == '...(((...((()))...)))...'\n alphabet5 = ['S', 'H', 'E']\n alphabet6 = ['S', 'H', 'M', 'E']\n sProfile1.setAlphabet(alphabet5)\n sProfile2.setAlphabet(alphabet6)\n results = SecStructure.processData(process)\n template1 = results[0][0]\n template2 = results[1][0]\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n assert template1 == 'EEESSSHHHSSSEEE'\n assert dotbracket_string1 == '...(((...)))...'\n assert template2 == 'EEESSSHHHSSSMMMSSSHHHSSSEEE'\n assert dotbracket_string2 == '...(((...)))...(((...)))...'\n alphabet7 = ['S', 'H', 'E', 'B', 'I']\n alphabet8 = ['S', 'M', 'E']\n sProfile1.setAlphabet(alphabet7)\n sProfile2.setAlphabet(alphabet8)\n results = SecStructure.processData(process)\n template1 = results[0][0]\n template2 = results[1][0]\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n assert template1 == 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE'\n assert dotbracket_string1 == '...(((...(((...(((...))))))...)))...'\n assert template2 == 'EEESSSSSSEEE'\n assert dotbracket_string2 == '...((()))...'\n\n\ndef test_createColorVector():\n k = 2\n no_sec_peak = 1\n template = 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE'\n kmer_counts = {'EE': 5, 'ES': 7, 'SS': 20, 'SI': 10, 'II': 15, 'IS': 11,\n 'SB': 5, 'BB': 6, 'BS': 5, 'SH': 4, 'HH': 5, 'HS': 4, 'SE': 7}\n template_sTree = STree.STree(template)\n normalization_vector1 = None\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm1, not_matched1, color_domain_max1 = createColorVector(k,\n template_sTree, kmer_counts, color_hm, no_sec_peak,\n normalization_vector1)\n assert len(color_hm) == len(new_color_hm1)\n for i in color_hm.keys():\n x = color_hm[i]\n if x > 0:\n assert new_color_hm1[i] == math.log(x, 2)\n else:\n assert new_color_hm1[i] == 0\n assert len(not_matched1) == 0\n assert color_domain_max1 == 4.954196310386876\n normalization_vector2 = {'EE': 0, 'ES': 0, 'SS': 0.7, 'SI': 0.1, 'II': \n 0.2, 'IS': 0, 'SB': 0, 'BB': 0, 'BS': 0, 'SH': 0, 'HH': 0, 'HS': 0,\n 'SE': 0}\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm2, not_matched2, color_domain_max2 = createColorVector(k,\n template_sTree, kmer_counts, color_hm, no_sec_peak,\n normalization_vector2)\n last_idx = -1\n last_kmer = ''\n test_color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n for kmer in normalization_vector2:\n indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.\n escape(kmer)), template)]\n indices_list.sort()\n norm = normalization_vector2[kmer]\n if norm == 0:\n norm = 1\n for idx in indices_list:\n for i in range(0, k):\n current_idx = str(idx + i + 1)\n if last_idx + 2 == int(current_idx) and last_kmer == kmer:\n continue\n test_color_hm[current_idx] += kmer_counts[kmer] / norm\n last_idx = idx\n last_kmer = kmer\n test_color_hm = {x: (math.log(y, 2) if y > 0 else y) for x, y in\n test_color_hm.items()}\n test_color_domain_max = max(test_color_hm.values())\n assert new_color_hm1 is not new_color_hm2\n assert len(color_hm) == len(new_color_hm2)\n assert len(not_matched2) == 0\n assert color_domain_max2 == test_color_domain_max\n for i in new_color_hm2.keys():\n assert new_color_hm2[i] == test_color_hm[i]\n kmer_counts2 = {'Ee': 5, 'eS': 7, 'sS': 20, 'Si': 10, 'iI': 15, 'iS': \n 11, 'Sb': 5, 'Bb': 6, 'bS': 5, 'sH': 4, 'Hh': 5, 'hS': 4, 'Se': 7}\n no_sec_peak2 = 0\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm3, not_matched3, color_domain_max3 = createColorVector(k,\n template_sTree, kmer_counts2, color_hm, no_sec_peak2,\n normalization_vector2)\n test_color_hm2 = {str(i): (0) for i in range(1, len(template) + 1)}\n for kmer in kmer_counts2.keys():\n indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.\n escape(kmer.upper())), template)]\n indices_list.sort()\n norm = normalization_vector2[kmer.upper()]\n if norm == 0:\n norm = 1\n for idx in indices_list:\n idx = [(idx + i) for i in range(0, len(kmer)) if kmer[i].isupper()\n ][0]\n test_color_hm2[str(idx + 1)] += kmer_counts2[kmer] / norm\n test_color_hm2 = {x: (math.log(y, 2) if y > 0 else y) for x, y in\n test_color_hm2.items()}\n test_color_domain_max2 = max(test_color_hm2.values())\n assert len(not_matched3) == 0\n assert new_color_hm2 is not new_color_hm3\n assert len(color_hm) == len(new_color_hm3)\n for i in test_color_hm2:\n assert test_color_hm2[i] == new_color_hm3[i]\n assert test_color_domain_max2 == color_domain_max3\n\n\ndef test_helpAddIBloop():\n k = 3\n template1 = ['EEE']\n internalloop = True\n bulge = True\n forward = True\n new_template1 = helpAddIBloop(k, template1, internalloop, bulge, forward)\n template2 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH']\n internalloop = True\n bulge = True\n forward = False\n new_template2 = helpAddIBloop(k, template2, internalloop, bulge, forward)\n template3_f = ['EEE']\n template3_b = ['EEE', 'SSS', 'III', 'SSS', 'HHH']\n internalloop = True\n bulge = False\n forward = True\n new_template3_f = helpAddIBloop(k, template3_f, internalloop, bulge,\n forward)\n forward = False\n new_template3_b = helpAddIBloop(k, template3_b, internalloop, bulge,\n forward)\n template4_f = ['EEE']\n template4_b = ['EEE', 'SSS', 'BBB', 'SSS', 'HHH']\n internalloop = False\n bulge = True\n forward = True\n new_template4_f = helpAddIBloop(k, template4_f, internalloop, bulge,\n forward)\n forward = False\n new_template4_b = helpAddIBloop(k, template4_b, internalloop, bulge,\n forward)\n assert new_template1 == ['EEE', 'SSS', 'III', 'SSS', 'BBB']\n assert new_template2 == ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS',\n 'HHH', 'SSS', 'SSS', 'III']\n assert new_template3_f == ['EEE', 'SSS', 'III']\n assert new_template3_b == ['EEE', 'SSS', 'III', 'SSS', 'HHH', 'SSS', 'III']\n assert new_template4_f == ['EEE', 'SSS', 'BBB']\n assert new_template4_b == ['EEE', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS']\n\n\ndef test_element2dotbracket():\n k3 = 3\n k2 = 2\n k4 = 4\n elem_list1 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS',\n 'SSS', 'III', 'SSS', 'EEE']\n dotbracket_string1 = '...(((...(((...(((...))))))...)))...'\n elem_list2 = ['EE', 'SS', 'II', 'SS', 'HH', 'SS', 'II', 'SS', 'MM',\n 'SS', 'BB', 'SS', 'HH', 'SS', 'SS', 'EE']\n dotbracket_string2 = '..((..((..))..))..((..((..))))..'\n elem_list3 = ['EEEE', 'SSSS', 'SSSS', 'EEEE']\n dotbracket_string3 = '....(((())))....'\n elem_list4 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS',\n 'SSS', 'III', 'SSS', 'MMM', 'SSS', 'HHH', 'SSS', 'EEE']\n dotbracket_string4 = '...(((...(((...(((...))))))...)))...(((...)))...'\n db1 = []\n db1.extend(element2dotbracket(elem_list1, k3, 0, 6, True))\n db1.extend(element2dotbracket(elem_list1, k3, 7, len(elem_list1) - 1, \n False))\n db1 = ''.join(db1)\n db2 = []\n db2.extend(element2dotbracket(elem_list2, k2, 0, 4, True))\n db2.extend(element2dotbracket(elem_list2, k2, 5, 8, False))\n db2.extend(element2dotbracket(elem_list2, k2, 9, 12, True))\n db2.extend(element2dotbracket(elem_list2, k2, 13, len(elem_list2) - 1, \n False))\n db2 = ''.join(db2)\n db3 = []\n db3.extend(element2dotbracket(elem_list3, k4, 0, 1, True))\n db3.extend(element2dotbracket(elem_list3, k4, 2, len(elem_list3) - 1, \n False))\n db3 = ''.join(db3)\n db4 = []\n db4.extend(element2dotbracket(elem_list4, k3, 0, 6, True))\n db4.extend(element2dotbracket(elem_list4, k3, 7, 11, False))\n db4.extend(element2dotbracket(elem_list4, k3, 12, 13, True))\n db4.extend(element2dotbracket(elem_list4, k3, 14, len(elem_list4) - 1, \n False))\n db4 = ''.join(db4)\n assert db1 == dotbracket_string1\n assert db2 == dotbracket_string2\n assert db3 == dotbracket_string3\n assert db4 == dotbracket_string4\n",
"step-4": "from src.secStructure import *\nfrom suffix_trees import STree\nimport math\nimport re\n\n\ndef test_processData():\n data = ['example/example1.fa', 'example/example2.fa']\n struct_data = ['example/exampleStrucData/exampleStructuralData1.fa',\n 'example/exampleStrucData/exampleStructuralData2.fa']\n k = 3\n top = 10\n peak = None\n feature = None\n cmd = False\n no_sec_peak = 1\n process = SecStructure(data, data, k, peak, top, feature, cmd,\n struct_data, no_sec_peak)\n alphabet1 = process.getStructProfile1().getAlphabet()\n alphabet2 = process.getStructProfile2().getAlphabet()\n kmer_counts1 = process.getStructProfile1().getProfile()\n kmer_counts2 = process.getStructProfile2().getProfile()\n results = SecStructure.processData(process)\n template1 = results[0][0]\n template2 = results[1][0]\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n assert len(alphabet1) == 6\n for e in ['S', 'H', 'B', 'I', 'M', 'E']:\n assert e in alphabet1\n assert len(alphabet2) == 2\n assert 'S' in alphabet2\n assert 'E' in alphabet2\n assert kmer_counts1 == {'EE': 4, 'ES': 1, 'SS': 11, 'SH': 1, 'HH': 3,\n 'II': 4, 'IS': 1, 'SM': 1, 'MM': 1, 'BB': 4, 'BS': 1}\n assert kmer_counts2 == {'SS': 20, 'EE': 7, 'ES': 3, 'SE': 2}\n assert template1 == 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSMMMSSSHHHSSSEEE'\n assert dotbracket_string1 == '...(((...(((...(((...))))))...)))...(((...)))...'\n assert template2 == 'EEESSSSSSEEE'\n assert dotbracket_string2 == '...((()))...'\n no_sec_peak = 0\n process2 = SecStructure(data, data, k, peak, top, feature, cmd,\n struct_data, no_sec_peak)\n alphabet1 = process2.getStructProfile1().getAlphabet()\n alphabet2 = process2.getStructProfile2().getAlphabet()\n kmer_counts1 = process2.getStructProfile1().getProfile()\n kmer_counts2 = process2.getStructProfile2().getProfile()\n results = SecStructure.processData(process2)\n template1 = results[0][0]\n template2 = results[1][0]\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n assert len(alphabet1) == 10\n for e in ['s', 'h', 'b', 'i', 'm', 'E', 'S', 'B', 'I', 'E']:\n assert e in alphabet1\n assert len(alphabet2) == 4\n for e in ['s', 'S', 'e', 'E']:\n assert e in alphabet2\n assert kmer_counts1 == {'eE': 1, 'Es': 1, 'sS': 1, 'Sh': 1, 'iI': 1,\n 'Is': 1, 'bB': 1, 'Bs': 1}\n assert kmer_counts2 == {'sS': 3, 'Ss': 2, 'sE': 1, 'Ee': 1, 'Se': 1}\n assert template1 == 'EEESSSIIISSSBBBSSSSSSSSSIIISSSEEE'\n assert dotbracket_string1 == '...(((...(((...((())))))...)))...'\n assert template2 == 'EEESSSSSSEEE'\n assert dotbracket_string2 == '...((()))...'\n sProfile1 = process.getStructProfile1()\n sProfile2 = process.getStructProfile2()\n alphabet3 = ['S', 'B', 'E']\n alphabet4 = ['S', 'I', 'E']\n sProfile1.setAlphabet(alphabet3)\n sProfile2.setAlphabet(alphabet4)\n results = SecStructure.processData(process)\n template1 = results[0][0]\n template2 = results[1][0]\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n assert template1 == 'EEESSSBBBSSSSSSSSSEEE'\n assert dotbracket_string1 == '...(((...((())))))...'\n assert template2 == 'EEESSSIIISSSSSSIIISSSEEE'\n assert dotbracket_string2 == '...(((...((()))...)))...'\n alphabet5 = ['S', 'H', 'E']\n alphabet6 = ['S', 'H', 'M', 'E']\n sProfile1.setAlphabet(alphabet5)\n sProfile2.setAlphabet(alphabet6)\n results = SecStructure.processData(process)\n template1 = results[0][0]\n template2 = results[1][0]\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n assert template1 == 'EEESSSHHHSSSEEE'\n assert dotbracket_string1 == '...(((...)))...'\n assert template2 == 'EEESSSHHHSSSMMMSSSHHHSSSEEE'\n assert dotbracket_string2 == '...(((...)))...(((...)))...'\n alphabet7 = ['S', 'H', 'E', 'B', 'I']\n alphabet8 = ['S', 'M', 'E']\n sProfile1.setAlphabet(alphabet7)\n sProfile2.setAlphabet(alphabet8)\n results = SecStructure.processData(process)\n template1 = results[0][0]\n template2 = results[1][0]\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n assert template1 == 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE'\n assert dotbracket_string1 == '...(((...(((...(((...))))))...)))...'\n assert template2 == 'EEESSSSSSEEE'\n assert dotbracket_string2 == '...((()))...'\n\n\ndef test_createColorVector():\n k = 2\n no_sec_peak = 1\n template = 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE'\n kmer_counts = {'EE': 5, 'ES': 7, 'SS': 20, 'SI': 10, 'II': 15, 'IS': 11,\n 'SB': 5, 'BB': 6, 'BS': 5, 'SH': 4, 'HH': 5, 'HS': 4, 'SE': 7}\n template_sTree = STree.STree(template)\n normalization_vector1 = None\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm1, not_matched1, color_domain_max1 = createColorVector(k,\n template_sTree, kmer_counts, color_hm, no_sec_peak,\n normalization_vector1)\n assert len(color_hm) == len(new_color_hm1)\n for i in color_hm.keys():\n x = color_hm[i]\n if x > 0:\n assert new_color_hm1[i] == math.log(x, 2)\n else:\n assert new_color_hm1[i] == 0\n assert len(not_matched1) == 0\n assert color_domain_max1 == 4.954196310386876\n normalization_vector2 = {'EE': 0, 'ES': 0, 'SS': 0.7, 'SI': 0.1, 'II': \n 0.2, 'IS': 0, 'SB': 0, 'BB': 0, 'BS': 0, 'SH': 0, 'HH': 0, 'HS': 0,\n 'SE': 0}\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm2, not_matched2, color_domain_max2 = createColorVector(k,\n template_sTree, kmer_counts, color_hm, no_sec_peak,\n normalization_vector2)\n last_idx = -1\n last_kmer = ''\n test_color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n for kmer in normalization_vector2:\n indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.\n escape(kmer)), template)]\n indices_list.sort()\n norm = normalization_vector2[kmer]\n if norm == 0:\n norm = 1\n for idx in indices_list:\n for i in range(0, k):\n current_idx = str(idx + i + 1)\n if last_idx + 2 == int(current_idx) and last_kmer == kmer:\n continue\n test_color_hm[current_idx] += kmer_counts[kmer] / norm\n last_idx = idx\n last_kmer = kmer\n test_color_hm = {x: (math.log(y, 2) if y > 0 else y) for x, y in\n test_color_hm.items()}\n test_color_domain_max = max(test_color_hm.values())\n assert new_color_hm1 is not new_color_hm2\n assert len(color_hm) == len(new_color_hm2)\n assert len(not_matched2) == 0\n assert color_domain_max2 == test_color_domain_max\n for i in new_color_hm2.keys():\n assert new_color_hm2[i] == test_color_hm[i]\n kmer_counts2 = {'Ee': 5, 'eS': 7, 'sS': 20, 'Si': 10, 'iI': 15, 'iS': \n 11, 'Sb': 5, 'Bb': 6, 'bS': 5, 'sH': 4, 'Hh': 5, 'hS': 4, 'Se': 7}\n no_sec_peak2 = 0\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm3, not_matched3, color_domain_max3 = createColorVector(k,\n template_sTree, kmer_counts2, color_hm, no_sec_peak2,\n normalization_vector2)\n test_color_hm2 = {str(i): (0) for i in range(1, len(template) + 1)}\n for kmer in kmer_counts2.keys():\n indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.\n escape(kmer.upper())), template)]\n indices_list.sort()\n norm = normalization_vector2[kmer.upper()]\n if norm == 0:\n norm = 1\n for idx in indices_list:\n idx = [(idx + i) for i in range(0, len(kmer)) if kmer[i].isupper()\n ][0]\n test_color_hm2[str(idx + 1)] += kmer_counts2[kmer] / norm\n test_color_hm2 = {x: (math.log(y, 2) if y > 0 else y) for x, y in\n test_color_hm2.items()}\n test_color_domain_max2 = max(test_color_hm2.values())\n assert len(not_matched3) == 0\n assert new_color_hm2 is not new_color_hm3\n assert len(color_hm) == len(new_color_hm3)\n for i in test_color_hm2:\n assert test_color_hm2[i] == new_color_hm3[i]\n assert test_color_domain_max2 == color_domain_max3\n\n\ndef test_helpAddIBloop():\n k = 3\n template1 = ['EEE']\n internalloop = True\n bulge = True\n forward = True\n new_template1 = helpAddIBloop(k, template1, internalloop, bulge, forward)\n template2 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH']\n internalloop = True\n bulge = True\n forward = False\n new_template2 = helpAddIBloop(k, template2, internalloop, bulge, forward)\n template3_f = ['EEE']\n template3_b = ['EEE', 'SSS', 'III', 'SSS', 'HHH']\n internalloop = True\n bulge = False\n forward = True\n new_template3_f = helpAddIBloop(k, template3_f, internalloop, bulge,\n forward)\n forward = False\n new_template3_b = helpAddIBloop(k, template3_b, internalloop, bulge,\n forward)\n template4_f = ['EEE']\n template4_b = ['EEE', 'SSS', 'BBB', 'SSS', 'HHH']\n internalloop = False\n bulge = True\n forward = True\n new_template4_f = helpAddIBloop(k, template4_f, internalloop, bulge,\n forward)\n forward = False\n new_template4_b = helpAddIBloop(k, template4_b, internalloop, bulge,\n forward)\n assert new_template1 == ['EEE', 'SSS', 'III', 'SSS', 'BBB']\n assert new_template2 == ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS',\n 'HHH', 'SSS', 'SSS', 'III']\n assert new_template3_f == ['EEE', 'SSS', 'III']\n assert new_template3_b == ['EEE', 'SSS', 'III', 'SSS', 'HHH', 'SSS', 'III']\n assert new_template4_f == ['EEE', 'SSS', 'BBB']\n assert new_template4_b == ['EEE', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS']\n\n\ndef test_element2dotbracket():\n k3 = 3\n k2 = 2\n k4 = 4\n elem_list1 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS',\n 'SSS', 'III', 'SSS', 'EEE']\n dotbracket_string1 = '...(((...(((...(((...))))))...)))...'\n elem_list2 = ['EE', 'SS', 'II', 'SS', 'HH', 'SS', 'II', 'SS', 'MM',\n 'SS', 'BB', 'SS', 'HH', 'SS', 'SS', 'EE']\n dotbracket_string2 = '..((..((..))..))..((..((..))))..'\n elem_list3 = ['EEEE', 'SSSS', 'SSSS', 'EEEE']\n dotbracket_string3 = '....(((())))....'\n elem_list4 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS',\n 'SSS', 'III', 'SSS', 'MMM', 'SSS', 'HHH', 'SSS', 'EEE']\n dotbracket_string4 = '...(((...(((...(((...))))))...)))...(((...)))...'\n db1 = []\n db1.extend(element2dotbracket(elem_list1, k3, 0, 6, True))\n db1.extend(element2dotbracket(elem_list1, k3, 7, len(elem_list1) - 1, \n False))\n db1 = ''.join(db1)\n db2 = []\n db2.extend(element2dotbracket(elem_list2, k2, 0, 4, True))\n db2.extend(element2dotbracket(elem_list2, k2, 5, 8, False))\n db2.extend(element2dotbracket(elem_list2, k2, 9, 12, True))\n db2.extend(element2dotbracket(elem_list2, k2, 13, len(elem_list2) - 1, \n False))\n db2 = ''.join(db2)\n db3 = []\n db3.extend(element2dotbracket(elem_list3, k4, 0, 1, True))\n db3.extend(element2dotbracket(elem_list3, k4, 2, len(elem_list3) - 1, \n False))\n db3 = ''.join(db3)\n db4 = []\n db4.extend(element2dotbracket(elem_list4, k3, 0, 6, True))\n db4.extend(element2dotbracket(elem_list4, k3, 7, 11, False))\n db4.extend(element2dotbracket(elem_list4, k3, 12, 13, True))\n db4.extend(element2dotbracket(elem_list4, k3, 14, len(elem_list4) - 1, \n False))\n db4 = ''.join(db4)\n assert db1 == dotbracket_string1\n assert db2 == dotbracket_string2\n assert db3 == dotbracket_string3\n assert db4 == dotbracket_string4\n",
"step-5": "from src.secStructure import *\nfrom suffix_trees import STree\nimport math\nimport re\n\n\ndef test_processData():\n # Test1: ignoring peak position\n data = ['example/example1.fa', 'example/example2.fa']\n struct_data = ['example/exampleStrucData/exampleStructuralData1.fa',\n 'example/exampleStrucData/exampleStructuralData2.fa']\n k = 3\n top = 10\n peak = None\n feature = None\n cmd = False\n no_sec_peak = 1 # True\n\n # Executing\n\n process = SecStructure(data, data, k, peak, top, feature, cmd, struct_data, no_sec_peak)\n\n alphabet1 = process.getStructProfile1().getAlphabet()\n alphabet2 = process.getStructProfile2().getAlphabet()\n\n kmer_counts1 = process.getStructProfile1().getProfile()\n kmer_counts2 = process.getStructProfile2().getProfile()\n\n results = SecStructure.processData(process)\n\n template1 = results[0][0]\n template2 = results[1][0]\n\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n\n # Testing\n\n assert len(alphabet1) == 6\n for e in [\"S\", \"H\", \"B\", \"I\", \"M\", \"E\"]:\n assert e in alphabet1\n\n assert len(alphabet2) == 2\n assert \"S\" in alphabet2\n assert \"E\" in alphabet2\n\n assert kmer_counts1 == {'EE': 4, 'ES': 1, 'SS': 11, 'SH': 1, 'HH': 3, 'II': 4, 'IS': 1, 'SM': 1, 'MM': 1, 'BB': 4,\n 'BS': 1}\n assert kmer_counts2 == {'SS': 20, 'EE': 7, 'ES': 3, 'SE': 2}\n\n assert template1 == \"EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSMMMSSSHHHSSSEEE\"\n assert dotbracket_string1 == \"...(((...(((...(((...))))))...)))...(((...)))...\"\n\n assert template2 == \"EEESSSSSSEEE\"\n assert dotbracket_string2 == \"...((()))...\"\n\n # Test2: with peak position\n no_sec_peak = 0 # True\n\n # Executing\n\n process2 = SecStructure(data, data, k, peak, top, feature, cmd, struct_data, no_sec_peak)\n\n alphabet1 = process2.getStructProfile1().getAlphabet()\n alphabet2 = process2.getStructProfile2().getAlphabet()\n\n kmer_counts1 = process2.getStructProfile1().getProfile()\n kmer_counts2 = process2.getStructProfile2().getProfile()\n\n results = SecStructure.processData(process2)\n\n template1 = results[0][0]\n template2 = results[1][0]\n\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n\n # Testing\n\n assert len(alphabet1) == 10\n for e in [\"s\", \"h\", \"b\", \"i\", \"m\", \"E\", \"S\", \"B\", \"I\", \"E\"]:\n assert e in alphabet1\n\n assert len(alphabet2) == 4\n for e in [\"s\", \"S\", \"e\", \"E\"]:\n assert e in alphabet2\n\n assert kmer_counts1 == {'eE': 1, 'Es': 1, 'sS': 1, 'Sh': 1, 'iI': 1, 'Is': 1, 'bB': 1, 'Bs': 1}\n assert kmer_counts2 == {'sS': 3, 'Ss': 2, 'sE': 1, 'Ee': 1, 'Se': 1}\n\n assert template1 == \"EEESSSIIISSSBBBSSSSSSSSSIIISSSEEE\"\n assert dotbracket_string1 == \"...(((...(((...((())))))...)))...\"\n\n assert template2 == \"EEESSSSSSEEE\"\n assert dotbracket_string2 == \"...((()))...\"\n\n # Test3: different alphabets\n sProfile1 = process.getStructProfile1()\n sProfile2 = process.getStructProfile2()\n\n # Test3a: alphabets with no multiloop\n\n alphabet3 = [\"S\", \"B\", \"E\"]\n alphabet4 = [\"S\", \"I\", \"E\"]\n\n sProfile1.setAlphabet(alphabet3)\n sProfile2.setAlphabet(alphabet4)\n\n results = SecStructure.processData(process)\n\n template1 = results[0][0]\n template2 = results[1][0]\n\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n\n assert template1 == \"EEESSSBBBSSSSSSSSSEEE\"\n assert dotbracket_string1 == \"...(((...((())))))...\"\n\n assert template2 == \"EEESSSIIISSSSSSIIISSSEEE\"\n assert dotbracket_string2 == \"...(((...((()))...)))...\"\n\n # Test3b: alphabets with only hairpin or hairpin and multiloop\n alphabet5 = [\"S\", \"H\", \"E\"]\n alphabet6 = [\"S\", \"H\", \"M\", \"E\"]\n\n sProfile1.setAlphabet(alphabet5)\n sProfile2.setAlphabet(alphabet6)\n\n results = SecStructure.processData(process)\n\n template1 = results[0][0]\n template2 = results[1][0]\n\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n\n assert template1 == \"EEESSSHHHSSSEEE\"\n assert dotbracket_string1 == \"...(((...)))...\"\n\n assert template2 == \"EEESSSHHHSSSMMMSSSHHHSSSEEE\"\n assert dotbracket_string2 == \"...(((...)))...(((...)))...\"\n\n # Test3c: ('flawed') alphabets with no multiloops\n\n alphabet7 = [\"S\", \"H\", \"E\", \"B\", \"I\"]\n alphabet8 = [\"S\", \"M\", \"E\"] # should be equal to [\"S\",\"E\"]\n\n sProfile1.setAlphabet(alphabet7)\n sProfile2.setAlphabet(alphabet8)\n\n results = SecStructure.processData(process)\n\n template1 = results[0][0]\n template2 = results[1][0]\n\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n\n assert template1 == \"EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE\"\n assert dotbracket_string1 == \"...(((...(((...(((...))))))...)))...\"\n\n assert template2 == \"EEESSSSSSEEE\"\n assert dotbracket_string2 == \"...((()))...\"\n\n\ndef test_createColorVector():\n # Test1: no normalization vector wanted\n k = 2\n no_sec_peak = 1\n template = \"EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE\"\n kmer_counts = {\"EE\": 5, \"ES\": 7, \"SS\": 20, \"SI\": 10, \"II\": 15, \"IS\": 11, \"SB\": 5, \"BB\": 6, \"BS\": 5, \"SH\": 4,\n \"HH\": 5, \"HS\": 4, \"SE\": 7}\n template_sTree = STree.STree(template)\n normalization_vector1 = None\n\n color_hm = {str(i): 0 for i in range(1, len(template) + 1)}\n\n # Executing\n new_color_hm1, not_matched1, color_domain_max1 = createColorVector(k, template_sTree, kmer_counts, color_hm,\n no_sec_peak, normalization_vector1)\n\n assert len(color_hm) == len(new_color_hm1)\n for i in color_hm.keys():\n x = color_hm[i]\n if x > 0:\n assert new_color_hm1[i] == math.log(x, 2)\n else:\n assert new_color_hm1[i] == 0\n assert len(not_matched1) == 0\n assert color_domain_max1 == 4.954196310386876\n\n # Test2: with normalization vector\n\n normalization_vector2 = {\"EE\": 0, \"ES\": 0, \"SS\": 0.7, \"SI\": 0.1, \"II\": 0.2, \"IS\": 0, \"SB\": 0, \"BB\": 0, \"BS\": 0,\n \"SH\": 0, \"HH\": 0, \"HS\": 0, \"SE\": 0}\n\n # Execution\n\n color_hm = {str(i): 0 for i in range(1, len(template) + 1)}\n new_color_hm2, not_matched2, color_domain_max2 = createColorVector(k, template_sTree, kmer_counts, color_hm,\n no_sec_peak, normalization_vector2)\n\n last_idx = -1\n last_kmer = \"\"\n\n test_color_hm = {str(i): 0 for i in range(1, len(template) + 1)}\n for kmer in normalization_vector2:\n indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.escape(kmer)), template)]\n indices_list.sort()\n norm = normalization_vector2[kmer]\n if norm == 0:\n norm = 1\n for idx in indices_list:\n for i in range(0, k):\n current_idx = str(idx + i + 1)\n if last_idx + 2 == int(current_idx) and last_kmer == kmer:\n continue\n test_color_hm[current_idx] += (kmer_counts[kmer] / norm)\n last_idx = idx\n last_kmer = kmer\n\n test_color_hm = {x: math.log(y, 2) if y > 0 else y for x, y in test_color_hm.items()}\n test_color_domain_max = max(test_color_hm.values())\n\n # Testing\n\n assert new_color_hm1 is not new_color_hm2\n assert len(color_hm) == len(new_color_hm2)\n assert len(not_matched2) == 0\n assert color_domain_max2 == test_color_domain_max\n for i in new_color_hm2.keys():\n assert new_color_hm2[i] == test_color_hm[i]\n\n # Test3: normalization vector and secondary peak position\n\n kmer_counts2 = {\"Ee\": 5, \"eS\": 7, \"sS\": 20, \"Si\": 10, \"iI\": 15, \"iS\": 11, \"Sb\": 5, \"Bb\": 6, \"bS\": 5, \"sH\": 4,\n \"Hh\": 5, \"hS\": 4, \"Se\": 7}\n no_sec_peak2 = 0\n\n # Execution\n\n color_hm = {str(i): 0 for i in range(1, len(template) + 1)}\n new_color_hm3, not_matched3, color_domain_max3 = createColorVector(k, template_sTree, kmer_counts2, color_hm,\n no_sec_peak2, normalization_vector2)\n\n test_color_hm2 = {str(i): 0 for i in range(1, len(template) + 1)}\n for kmer in kmer_counts2.keys():\n indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.escape(kmer.upper())), template)]\n indices_list.sort()\n norm = normalization_vector2[kmer.upper()]\n if norm == 0:\n norm = 1\n for idx in indices_list:\n # use only peak-position in 2-mer for visualization\n idx = [idx + i for i in range(0, len(kmer)) if kmer[i].isupper()][0]\n test_color_hm2[str(idx + 1)] += (kmer_counts2[kmer] / norm)\n\n test_color_hm2 = {x: math.log(y, 2) if y > 0 else y for x, y in test_color_hm2.items()}\n test_color_domain_max2 = max(test_color_hm2.values())\n\n # Testing\n\n assert len(not_matched3) == 0\n assert new_color_hm2 is not new_color_hm3\n assert len(color_hm) == len(new_color_hm3)\n for i in test_color_hm2:\n assert test_color_hm2[i] == new_color_hm3[i]\n assert test_color_domain_max2 == color_domain_max3\n\n\ndef test_helpAddIBloop():\n k = 3\n\n # Test 1: forward and all true\n template1 = [\"EEE\"]\n internalloop = True\n bulge = True\n forward = True\n\n # Execution\n new_template1 = helpAddIBloop(k, template1, internalloop, bulge, forward)\n\n # Test 2: backward and all true\n template2 = [\"EEE\", \"SSS\", \"III\", \"SSS\", \"BBB\", \"SSS\", \"HHH\"]\n internalloop = True\n bulge = True\n forward = False\n\n # Execution\n new_template2 = helpAddIBloop(k, template2, internalloop, bulge, forward)\n\n # Test 3: only internal loops, forward and backward\n template3_f = [\"EEE\"]\n template3_b = [\"EEE\", \"SSS\", \"III\", \"SSS\", \"HHH\"]\n internalloop = True\n bulge = False\n forward = True\n\n # Execution\n new_template3_f = helpAddIBloop(k, template3_f, internalloop, bulge, forward)\n\n forward = False\n new_template3_b = helpAddIBloop(k, template3_b, internalloop, bulge, forward)\n\n # Test 4: only bulges, forward and backward\n template4_f = [\"EEE\"]\n template4_b = [\"EEE\", \"SSS\", \"BBB\", \"SSS\", \"HHH\"]\n internalloop = False\n bulge = True\n forward = True\n\n # Execution\n new_template4_f = helpAddIBloop(k, template4_f, internalloop, bulge, forward)\n\n forward = False\n new_template4_b = helpAddIBloop(k, template4_b, internalloop, bulge, forward)\n\n # Testing\n assert new_template1 == [\"EEE\", \"SSS\", \"III\", \"SSS\", \"BBB\"]\n assert new_template2 == [\"EEE\", \"SSS\", \"III\", \"SSS\", \"BBB\", \"SSS\", \"HHH\", \"SSS\", \"SSS\", \"III\"]\n assert new_template3_f == [\"EEE\", \"SSS\", \"III\"]\n assert new_template3_b == [\"EEE\", \"SSS\", \"III\", \"SSS\", \"HHH\", \"SSS\", \"III\"]\n assert new_template4_f == [\"EEE\", \"SSS\", \"BBB\"]\n assert new_template4_b == [\"EEE\", \"SSS\", \"BBB\", \"SSS\", \"HHH\", \"SSS\"]\n\n\ndef test_element2dotbracket():\n k3 = 3\n k2 = 2\n k4 = 4\n\n # Test1 without multiloop\n elem_list1 = [\"EEE\", \"SSS\", \"III\", \"SSS\", \"BBB\", \"SSS\", \"HHH\", \"SSS\", \"SSS\", \"III\", \"SSS\", \"EEE\"]\n dotbracket_string1 = \"...(((...(((...(((...))))))...)))...\"\n\n # Test2 with multiloop\n elem_list2 = [\"EE\", \"SS\", \"II\", \"SS\", \"HH\", \"SS\", \"II\", \"SS\", \"MM\", \"SS\", \"BB\", \"SS\", \"HH\", \"SS\", \"SS\", \"EE\"]\n dotbracket_string2 = \"..((..((..))..))..((..((..))))..\"\n\n # Test 3 without loops\n elem_list3 = [\"EEEE\", \"SSSS\", \"SSSS\", \"EEEE\"]\n dotbracket_string3 = \"....(((())))....\"\n\n # Test 5 with everything\n elem_list4 = [\"EEE\", \"SSS\", \"III\", \"SSS\", \"BBB\", \"SSS\", \"HHH\", \"SSS\", \"SSS\", \"III\", \"SSS\", \"MMM\", \"SSS\", \"HHH\",\n \"SSS\", \"EEE\"]\n dotbracket_string4 = \"...(((...(((...(((...))))))...)))...(((...)))...\"\n\n # Execution\n db1 = []\n db1.extend(element2dotbracket(elem_list1, k3, 0, 6, True))\n db1.extend(element2dotbracket(elem_list1, k3, 7, len(elem_list1) - 1, False))\n db1 = ''.join(db1)\n\n db2 = []\n db2.extend(element2dotbracket(elem_list2, k2, 0, 4, True))\n db2.extend(element2dotbracket(elem_list2, k2, 5, 8, False))\n db2.extend(element2dotbracket(elem_list2, k2, 9, 12, True))\n db2.extend(element2dotbracket(elem_list2, k2, 13, len(elem_list2) - 1, False))\n db2 = ''.join(db2)\n\n db3 = []\n db3.extend(element2dotbracket(elem_list3, k4, 0, 1, True))\n db3.extend(element2dotbracket(elem_list3, k4, 2, len(elem_list3) - 1, False))\n db3 = ''.join(db3)\n\n db4 = []\n db4.extend(element2dotbracket(elem_list4, k3, 0, 6, True))\n db4.extend(element2dotbracket(elem_list4, k3, 7, 11, False))\n db4.extend(element2dotbracket(elem_list4, k3, 12, 13, True))\n db4.extend(element2dotbracket(elem_list4, k3, 14, len(elem_list4) - 1, False))\n db4 = ''.join(db4)\n\n # testing\n assert db1 == dotbracket_string1\n assert db2 == dotbracket_string2\n assert db3 == dotbracket_string3\n assert db4 == dotbracket_string4\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
# Generated by Django 2.2.6 on 2020-05-21 09:44
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('DHOPD', '0015_auto_20200515_0126'),
]
operations = [
migrations.CreateModel(
name='Patient_c',
fields=[
('patient_id', models.AutoField(max_length=200, primary_key=True, serialize=False)),
('patient_fname', models.CharField(max_length=200)),
('patient_mname', models.CharField(max_length=200)),
('patient_lname', models.CharField(max_length=200)),
('patient_title', models.CharField(max_length=20)),
('patient_address', models.CharField(max_length=500)),
('patient_town', models.CharField(max_length=200)),
('patient_phone', models.CharField(max_length=15)),
('patient_services', models.CharField(max_length=500)),
('patient_status', models.CharField(max_length=2)),
('patient_cost', models.CharField(max_length=100)),
('patient_date', models.DateField(default=datetime.date.today)),
('patient_time', models.TimeField(auto_now_add=True)),
('patient_comment', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Receipt_c',
fields=[
('receipt_id', models.AutoField(max_length=200, primary_key=True, serialize=False)),
('receipt_patient', models.CharField(max_length=200)),
('receipt_cost', models.CharField(max_length=200)),
('receipt_time', models.TimeField(auto_now=True)),
('receipt_status', models.CharField(default='-1', max_length=10)),
],
),
]
|
normal
|
{
"blob_id": "52da8608e43b2d8dfe00f0956a1187fcf2e7b1ff",
"index": 41,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('DHOPD', '0015_auto_20200515_0126')]\n operations = [migrations.CreateModel(name='Patient_c', fields=[(\n 'patient_id', models.AutoField(max_length=200, primary_key=True,\n serialize=False)), ('patient_fname', models.CharField(max_length=\n 200)), ('patient_mname', models.CharField(max_length=200)), (\n 'patient_lname', models.CharField(max_length=200)), (\n 'patient_title', models.CharField(max_length=20)), (\n 'patient_address', models.CharField(max_length=500)), (\n 'patient_town', models.CharField(max_length=200)), ('patient_phone',\n models.CharField(max_length=15)), ('patient_services', models.\n CharField(max_length=500)), ('patient_status', models.CharField(\n max_length=2)), ('patient_cost', models.CharField(max_length=100)),\n ('patient_date', models.DateField(default=datetime.date.today)), (\n 'patient_time', models.TimeField(auto_now_add=True)), (\n 'patient_comment', models.CharField(max_length=200))]), migrations.\n CreateModel(name='Receipt_c', fields=[('receipt_id', models.\n AutoField(max_length=200, primary_key=True, serialize=False)), (\n 'receipt_patient', models.CharField(max_length=200)), (\n 'receipt_cost', models.CharField(max_length=200)), ('receipt_time',\n models.TimeField(auto_now=True)), ('receipt_status', models.\n CharField(default='-1', max_length=10))])]\n",
"step-4": "import datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('DHOPD', '0015_auto_20200515_0126')]\n operations = [migrations.CreateModel(name='Patient_c', fields=[(\n 'patient_id', models.AutoField(max_length=200, primary_key=True,\n serialize=False)), ('patient_fname', models.CharField(max_length=\n 200)), ('patient_mname', models.CharField(max_length=200)), (\n 'patient_lname', models.CharField(max_length=200)), (\n 'patient_title', models.CharField(max_length=20)), (\n 'patient_address', models.CharField(max_length=500)), (\n 'patient_town', models.CharField(max_length=200)), ('patient_phone',\n models.CharField(max_length=15)), ('patient_services', models.\n CharField(max_length=500)), ('patient_status', models.CharField(\n max_length=2)), ('patient_cost', models.CharField(max_length=100)),\n ('patient_date', models.DateField(default=datetime.date.today)), (\n 'patient_time', models.TimeField(auto_now_add=True)), (\n 'patient_comment', models.CharField(max_length=200))]), migrations.\n CreateModel(name='Receipt_c', fields=[('receipt_id', models.\n AutoField(max_length=200, primary_key=True, serialize=False)), (\n 'receipt_patient', models.CharField(max_length=200)), (\n 'receipt_cost', models.CharField(max_length=200)), ('receipt_time',\n models.TimeField(auto_now=True)), ('receipt_status', models.\n CharField(default='-1', max_length=10))])]\n",
"step-5": "# Generated by Django 2.2.6 on 2020-05-21 09:44\n\nimport datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('DHOPD', '0015_auto_20200515_0126'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Patient_c',\n fields=[\n ('patient_id', models.AutoField(max_length=200, primary_key=True, serialize=False)),\n ('patient_fname', models.CharField(max_length=200)),\n ('patient_mname', models.CharField(max_length=200)),\n ('patient_lname', models.CharField(max_length=200)),\n ('patient_title', models.CharField(max_length=20)),\n ('patient_address', models.CharField(max_length=500)),\n ('patient_town', models.CharField(max_length=200)),\n ('patient_phone', models.CharField(max_length=15)),\n ('patient_services', models.CharField(max_length=500)),\n ('patient_status', models.CharField(max_length=2)),\n ('patient_cost', models.CharField(max_length=100)),\n ('patient_date', models.DateField(default=datetime.date.today)),\n ('patient_time', models.TimeField(auto_now_add=True)),\n ('patient_comment', models.CharField(max_length=200)),\n ],\n ),\n migrations.CreateModel(\n name='Receipt_c',\n fields=[\n ('receipt_id', models.AutoField(max_length=200, primary_key=True, serialize=False)),\n ('receipt_patient', models.CharField(max_length=200)),\n ('receipt_cost', models.CharField(max_length=200)),\n ('receipt_time', models.TimeField(auto_now=True)),\n ('receipt_status', models.CharField(default='-1', max_length=10)),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class DepositForm(base.DepositForm):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@classmethod
def is_automatic(cls, instance):
return True
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def clean(self):
from platforms.converter import convert_currency
amount = self.cleaned_data['amount']
currency = self.cleaned_data['currency']
return super(DepositForm, self).clean()
<|reserved_special_token_0|>
@classmethod
def _calculate_commission(cls, request, full_commission=False):
commission = request.amount * cls.commission_rate
min_comm = Decimal('1')
commission = max(min_comm, commission)
return CommissionCalculationResult(amount=request.amount,
commission=commission, currency=request.currency)
class DetailsForm(base.DetailsForm):
def __init__(self, *args, **kwargs):
super(DetailsForm, self).__init__(*args, **kwargs)
self.fields['purse'].label = _('Net account')
self.fields['purse'].help_text = _(
"Your Neteller's 12-digit Account ID or email address that is associated with their NETELLER account"
)
class WithdrawForm(base.WithdrawForm):
MIN_AMOUNT = 10, 'USD'
commission_rate = Decimal('0.025')
@classmethod
def _calculate_commission(cls, request, full_commission=False):
commission = request.amount * cls.commission_rate
min_comm = Decimal('1')
max_comm = Decimal('30')
commission = min(max_comm, max(min_comm, commission))
return CommissionCalculationResult(amount=request.amount,
commission=commission, currency=request.currency)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DepositForm(base.DepositForm):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@classmethod
def is_automatic(cls, instance):
return True
<|reserved_special_token_0|>
def make_request(self):
import json
currency = {'RUR': 'RUB'}.get(self.instance.currency, self.instance
.currency)
amount = int(decimal_round(self.instance.amount) * 100)
token_tuple = self.get_neteller_token()
if not token_tuple:
return "Can't get the token."
data = {'paymentMethod': {'type': 'neteller', 'value': self.
instance.purse}, 'transaction': {'merchantRefId': unicode(self.
instance.pk), 'amount': amount, 'currency': currency},
'verificationCode': unicode(self.instance.params['secure_id'])}
headers = {'Content-Type': 'application/json', 'Authorization':
token_tuple[1] + ' ' + token_tuple[0]}
request = requests.post(self.bill_address, data=json.dumps(data),
headers=headers)
request = request.json()
if request.get('transaction') and request.get('transaction').get(
'status') == 'accepted':
self.instance.refresh_state()
self.instance.is_payed = True
self.instance.params['transaction'] = request.get('transaction'
).get('id')
self.instance.save()
return None
else:
error_message = request.get('error').get('message') if request.get(
'error') else 'Automatic payment failed.'
self.instance.is_committed = False
self.instance.is_payed = False
self.instance.public_comment = error_message
self.instance.save()
return error_message
<|reserved_special_token_0|>
def clean(self):
from platforms.converter import convert_currency
amount = self.cleaned_data['amount']
currency = self.cleaned_data['currency']
return super(DepositForm, self).clean()
<|reserved_special_token_0|>
@classmethod
def _calculate_commission(cls, request, full_commission=False):
commission = request.amount * cls.commission_rate
min_comm = Decimal('1')
commission = max(min_comm, commission)
return CommissionCalculationResult(amount=request.amount,
commission=commission, currency=request.currency)
class DetailsForm(base.DetailsForm):
def __init__(self, *args, **kwargs):
super(DetailsForm, self).__init__(*args, **kwargs)
self.fields['purse'].label = _('Net account')
self.fields['purse'].help_text = _(
"Your Neteller's 12-digit Account ID or email address that is associated with their NETELLER account"
)
class WithdrawForm(base.WithdrawForm):
MIN_AMOUNT = 10, 'USD'
commission_rate = Decimal('0.025')
@classmethod
def _calculate_commission(cls, request, full_commission=False):
commission = request.amount * cls.commission_rate
min_comm = Decimal('1')
max_comm = Decimal('30')
commission = min(max_comm, max(min_comm, commission))
return CommissionCalculationResult(amount=request.amount,
commission=commission, currency=request.currency)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DepositForm(base.DepositForm):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@classmethod
def is_automatic(cls, instance):
return True
<|reserved_special_token_0|>
def make_request(self):
import json
currency = {'RUR': 'RUB'}.get(self.instance.currency, self.instance
.currency)
amount = int(decimal_round(self.instance.amount) * 100)
token_tuple = self.get_neteller_token()
if not token_tuple:
return "Can't get the token."
data = {'paymentMethod': {'type': 'neteller', 'value': self.
instance.purse}, 'transaction': {'merchantRefId': unicode(self.
instance.pk), 'amount': amount, 'currency': currency},
'verificationCode': unicode(self.instance.params['secure_id'])}
headers = {'Content-Type': 'application/json', 'Authorization':
token_tuple[1] + ' ' + token_tuple[0]}
request = requests.post(self.bill_address, data=json.dumps(data),
headers=headers)
request = request.json()
if request.get('transaction') and request.get('transaction').get(
'status') == 'accepted':
self.instance.refresh_state()
self.instance.is_payed = True
self.instance.params['transaction'] = request.get('transaction'
).get('id')
self.instance.save()
return None
else:
error_message = request.get('error').get('message') if request.get(
'error') else 'Automatic payment failed.'
self.instance.is_committed = False
self.instance.is_payed = False
self.instance.public_comment = error_message
self.instance.save()
return error_message
@classmethod
def generate_mt4_comment(cls, payment_request):
return '{NETELLER}[%s]' % payment_request.pk
def clean(self):
from platforms.converter import convert_currency
amount = self.cleaned_data['amount']
currency = self.cleaned_data['currency']
return super(DepositForm, self).clean()
<|reserved_special_token_0|>
@classmethod
def _calculate_commission(cls, request, full_commission=False):
commission = request.amount * cls.commission_rate
min_comm = Decimal('1')
commission = max(min_comm, commission)
return CommissionCalculationResult(amount=request.amount,
commission=commission, currency=request.currency)
class DetailsForm(base.DetailsForm):
def __init__(self, *args, **kwargs):
super(DetailsForm, self).__init__(*args, **kwargs)
self.fields['purse'].label = _('Net account')
self.fields['purse'].help_text = _(
"Your Neteller's 12-digit Account ID or email address that is associated with their NETELLER account"
)
class WithdrawForm(base.WithdrawForm):
MIN_AMOUNT = 10, 'USD'
commission_rate = Decimal('0.025')
@classmethod
def _calculate_commission(cls, request, full_commission=False):
commission = request.amount * cls.commission_rate
min_comm = Decimal('1')
max_comm = Decimal('30')
commission = min(max_comm, max(min_comm, commission))
return CommissionCalculationResult(amount=request.amount,
commission=commission, currency=request.currency)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DepositForm(base.DepositForm):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@classmethod
def is_automatic(cls, instance):
return True
def get_neteller_token(self):
"""
:return: tuple. ('accessToken', 'Auth method'). Example: ("0.AQAAAU3in", "Bearer")
or None if can't get token.
"""
headers = {'Content-Type': 'application/json', 'Cache-Control':
'no-cache', 'Authorization': 'Basic ' + base64.b64encode(
settings.NETELLER_MERCHANT_ID + ':' + settings.NETELLER_SECRET_KEY)
}
result = requests.post(self.get_token_url, headers=headers)
if result.status_code == 200:
result = result.json()
else:
return None
if result.get('accessToken'):
return result.get('accessToken'), result.get('tokenType')
else:
return None
def make_request(self):
import json
currency = {'RUR': 'RUB'}.get(self.instance.currency, self.instance
.currency)
amount = int(decimal_round(self.instance.amount) * 100)
token_tuple = self.get_neteller_token()
if not token_tuple:
return "Can't get the token."
data = {'paymentMethod': {'type': 'neteller', 'value': self.
instance.purse}, 'transaction': {'merchantRefId': unicode(self.
instance.pk), 'amount': amount, 'currency': currency},
'verificationCode': unicode(self.instance.params['secure_id'])}
headers = {'Content-Type': 'application/json', 'Authorization':
token_tuple[1] + ' ' + token_tuple[0]}
request = requests.post(self.bill_address, data=json.dumps(data),
headers=headers)
request = request.json()
if request.get('transaction') and request.get('transaction').get(
'status') == 'accepted':
self.instance.refresh_state()
self.instance.is_payed = True
self.instance.params['transaction'] = request.get('transaction'
).get('id')
self.instance.save()
return None
else:
error_message = request.get('error').get('message') if request.get(
'error') else 'Automatic payment failed.'
self.instance.is_committed = False
self.instance.is_payed = False
self.instance.public_comment = error_message
self.instance.save()
return error_message
@classmethod
def generate_mt4_comment(cls, payment_request):
return '{NETELLER}[%s]' % payment_request.pk
def clean(self):
from platforms.converter import convert_currency
amount = self.cleaned_data['amount']
currency = self.cleaned_data['currency']
return super(DepositForm, self).clean()
def confirmed_response_data(self, request):
error = self.make_request()
if error:
return {'detail': 'Error: %s' % error}, 400
else:
return {'success': True}, None
@classmethod
def _calculate_commission(cls, request, full_commission=False):
commission = request.amount * cls.commission_rate
min_comm = Decimal('1')
commission = max(min_comm, commission)
return CommissionCalculationResult(amount=request.amount,
commission=commission, currency=request.currency)
class DetailsForm(base.DetailsForm):
def __init__(self, *args, **kwargs):
super(DetailsForm, self).__init__(*args, **kwargs)
self.fields['purse'].label = _('Net account')
self.fields['purse'].help_text = _(
"Your Neteller's 12-digit Account ID or email address that is associated with their NETELLER account"
)
class WithdrawForm(base.WithdrawForm):
MIN_AMOUNT = 10, 'USD'
commission_rate = Decimal('0.025')
@classmethod
def _calculate_commission(cls, request, full_commission=False):
commission = request.amount * cls.commission_rate
min_comm = Decimal('1')
max_comm = Decimal('30')
commission = min(max_comm, max(min_comm, commission))
return CommissionCalculationResult(amount=request.amount,
commission=commission, currency=request.currency)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import base64
import logging
from decimal import Decimal
import requests
from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from currencies.currencies import decimal_round
from payments.systems import base
from payments.systems.bankusd import display_amount_usd
from payments.systems.base import CommissionCalculationResult
name = _("Neteller")
logo = "neteller.png"
slug = __name__.rsplit(".", 1)[-1]
currencies = ["USD"]
mt4_payment_slug = "NETELLER"
transfer_details = {
"deposit": {
"fee": "3.5% min $1",
"time": _("Within day"),
"min_amount": display_amount_usd(10),
},
"withdraw": {
"fee": _("2.5% min $1 max $30"),
"time": _("Within day"),
"min_amount": display_amount_usd(10),
}
}
templates = {
"deposit": "payments/forms/deposit/neteller.html",
"withdraw": "payments/forms/withdraw/electronic.html",
}
log = logging.getLogger(__name__)
class DepositForm(base.DepositForm):
purse = forms.CharField(max_length=100, label=_("Net account"),
help_text=_("Your Neteller's 12-digit Account ID or email address that is "
"associated with their NETELLER account"))
secure_id = forms.IntegerField(label=_("Secure ID"), help_text=_("Your Neteller's 6-digit Secure ID"))
bill_address = "https://api.neteller.com/v1/transferIn"
get_token_url = "https://api.neteller.com/v1/oauth2/token?grant_type=client_credentials"
commission_rate = Decimal("0.035")
MIN_AMOUNT = (10, 'USD')
@classmethod
def is_automatic(cls, instance):
return True
def get_neteller_token(self):
"""
:return: tuple. ('accessToken', 'Auth method'). Example: ("0.AQAAAU3in", "Bearer")
or None if can't get token.
"""
headers = {'Content-Type': 'application/json',
'Cache-Control': 'no-cache',
'Authorization': 'Basic ' + base64.b64encode(
settings.NETELLER_MERCHANT_ID + ':' + settings.NETELLER_SECRET_KEY)}
result = requests.post(self.get_token_url, headers = headers)
if result.status_code == 200:
result = result.json()
else:
return None
if result.get("accessToken"):
return result.get("accessToken"), result.get("tokenType")
else:
return None
def make_request(self):
import json
currency = {
"RUR": "RUB"
}.get(self.instance.currency, self.instance.currency)
amount = int(decimal_round(self.instance.amount) * 100)
token_tuple = self.get_neteller_token()
if not token_tuple:
return "Can't get the token."
data = {
"paymentMethod": {
"type": "neteller",
"value": self.instance.purse
},
"transaction": {
"merchantRefId": unicode(self.instance.pk),
"amount": amount,
"currency": currency
},
"verificationCode": unicode(self.instance.params["secure_id"]),
}
headers = {'Content-Type': 'application/json', 'Authorization': token_tuple[1] + " " + token_tuple[0]}
request = requests.post(self.bill_address, data=json.dumps(data), headers=headers)
request = request.json()
if request.get("transaction") and request.get("transaction").get("status") == "accepted":
self.instance.refresh_state()
self.instance.is_payed = True
self.instance.params["transaction"] = request.get("transaction").get("id")
self.instance.save()
return None
else:
error_message = request.get("error").get("message") if request.get("error") else \
"Automatic payment failed."
self.instance.is_committed = False
self.instance.is_payed = False
self.instance.public_comment = error_message
self.instance.save()
return error_message
@classmethod
def generate_mt4_comment(cls, payment_request):
return "{NETELLER}[%s]" % payment_request.pk
def clean(self):
from platforms.converter import convert_currency
amount = self.cleaned_data["amount"]
currency = self.cleaned_data["currency"]
return super(DepositForm, self).clean()
def confirmed_response_data(self, request):
error = self.make_request()
if error:
return {'detail': "Error: %s" % error}, 400
else:
return {"success": True}, None
@classmethod
def _calculate_commission(cls, request, full_commission=False):
commission = request.amount * cls.commission_rate
min_comm = Decimal("1")
commission = max(min_comm, commission)
return CommissionCalculationResult(
amount=request.amount,
commission=commission,
currency=request.currency
)
class DetailsForm(base.DetailsForm):
def __init__(self, *args, **kwargs):
super(DetailsForm, self).__init__(*args, **kwargs)
self.fields["purse"].label = _("Net account")
self.fields["purse"].help_text = _("Your Neteller's 12-digit Account ID or email address that is "
"associated with their NETELLER account")
class WithdrawForm(base.WithdrawForm):
MIN_AMOUNT = (10, 'USD')
commission_rate = Decimal("0.025")
@classmethod
def _calculate_commission(cls, request, full_commission=False):
commission = request.amount * cls.commission_rate
min_comm = Decimal("1")
max_comm = Decimal("30")
commission = min(max_comm, max(min_comm, commission))
return CommissionCalculationResult(
amount=request.amount,
commission=commission,
currency=request.currency
)
|
flexible
|
{
"blob_id": "15c1db535beb115c45aeba433a946255f70fa86e",
"index": 7845,
"step-1": "<mask token>\n\n\nclass DepositForm(base.DepositForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def is_automatic(cls, instance):\n return True\n <mask token>\n <mask token>\n <mask token>\n\n def clean(self):\n from platforms.converter import convert_currency\n amount = self.cleaned_data['amount']\n currency = self.cleaned_data['currency']\n return super(DepositForm, self).clean()\n <mask token>\n\n @classmethod\n def _calculate_commission(cls, request, full_commission=False):\n commission = request.amount * cls.commission_rate\n min_comm = Decimal('1')\n commission = max(min_comm, commission)\n return CommissionCalculationResult(amount=request.amount,\n commission=commission, currency=request.currency)\n\n\nclass DetailsForm(base.DetailsForm):\n\n def __init__(self, *args, **kwargs):\n super(DetailsForm, self).__init__(*args, **kwargs)\n self.fields['purse'].label = _('Net account')\n self.fields['purse'].help_text = _(\n \"Your Neteller's 12-digit Account ID or email address that is associated with their NETELLER account\"\n )\n\n\nclass WithdrawForm(base.WithdrawForm):\n MIN_AMOUNT = 10, 'USD'\n commission_rate = Decimal('0.025')\n\n @classmethod\n def _calculate_commission(cls, request, full_commission=False):\n commission = request.amount * cls.commission_rate\n min_comm = Decimal('1')\n max_comm = Decimal('30')\n commission = min(max_comm, max(min_comm, commission))\n return CommissionCalculationResult(amount=request.amount,\n commission=commission, currency=request.currency)\n",
"step-2": "<mask token>\n\n\nclass DepositForm(base.DepositForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def is_automatic(cls, instance):\n return True\n <mask token>\n\n def make_request(self):\n import json\n currency = {'RUR': 'RUB'}.get(self.instance.currency, self.instance\n .currency)\n amount = int(decimal_round(self.instance.amount) * 100)\n token_tuple = self.get_neteller_token()\n if not token_tuple:\n return \"Can't get the token.\"\n data = {'paymentMethod': {'type': 'neteller', 'value': self.\n instance.purse}, 'transaction': {'merchantRefId': unicode(self.\n instance.pk), 'amount': amount, 'currency': currency},\n 'verificationCode': unicode(self.instance.params['secure_id'])}\n headers = {'Content-Type': 'application/json', 'Authorization': \n token_tuple[1] + ' ' + token_tuple[0]}\n request = requests.post(self.bill_address, data=json.dumps(data),\n headers=headers)\n request = request.json()\n if request.get('transaction') and request.get('transaction').get(\n 'status') == 'accepted':\n self.instance.refresh_state()\n self.instance.is_payed = True\n self.instance.params['transaction'] = request.get('transaction'\n ).get('id')\n self.instance.save()\n return None\n else:\n error_message = request.get('error').get('message') if request.get(\n 'error') else 'Automatic payment failed.'\n self.instance.is_committed = False\n self.instance.is_payed = False\n self.instance.public_comment = error_message\n self.instance.save()\n return error_message\n <mask token>\n\n def clean(self):\n from platforms.converter import convert_currency\n amount = self.cleaned_data['amount']\n currency = self.cleaned_data['currency']\n return super(DepositForm, self).clean()\n <mask token>\n\n @classmethod\n def _calculate_commission(cls, request, full_commission=False):\n commission = request.amount * cls.commission_rate\n min_comm = Decimal('1')\n commission = max(min_comm, commission)\n return CommissionCalculationResult(amount=request.amount,\n commission=commission, currency=request.currency)\n\n\nclass DetailsForm(base.DetailsForm):\n\n def __init__(self, *args, **kwargs):\n super(DetailsForm, self).__init__(*args, **kwargs)\n self.fields['purse'].label = _('Net account')\n self.fields['purse'].help_text = _(\n \"Your Neteller's 12-digit Account ID or email address that is associated with their NETELLER account\"\n )\n\n\nclass WithdrawForm(base.WithdrawForm):\n MIN_AMOUNT = 10, 'USD'\n commission_rate = Decimal('0.025')\n\n @classmethod\n def _calculate_commission(cls, request, full_commission=False):\n commission = request.amount * cls.commission_rate\n min_comm = Decimal('1')\n max_comm = Decimal('30')\n commission = min(max_comm, max(min_comm, commission))\n return CommissionCalculationResult(amount=request.amount,\n commission=commission, currency=request.currency)\n",
"step-3": "<mask token>\n\n\nclass DepositForm(base.DepositForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def is_automatic(cls, instance):\n return True\n <mask token>\n\n def make_request(self):\n import json\n currency = {'RUR': 'RUB'}.get(self.instance.currency, self.instance\n .currency)\n amount = int(decimal_round(self.instance.amount) * 100)\n token_tuple = self.get_neteller_token()\n if not token_tuple:\n return \"Can't get the token.\"\n data = {'paymentMethod': {'type': 'neteller', 'value': self.\n instance.purse}, 'transaction': {'merchantRefId': unicode(self.\n instance.pk), 'amount': amount, 'currency': currency},\n 'verificationCode': unicode(self.instance.params['secure_id'])}\n headers = {'Content-Type': 'application/json', 'Authorization': \n token_tuple[1] + ' ' + token_tuple[0]}\n request = requests.post(self.bill_address, data=json.dumps(data),\n headers=headers)\n request = request.json()\n if request.get('transaction') and request.get('transaction').get(\n 'status') == 'accepted':\n self.instance.refresh_state()\n self.instance.is_payed = True\n self.instance.params['transaction'] = request.get('transaction'\n ).get('id')\n self.instance.save()\n return None\n else:\n error_message = request.get('error').get('message') if request.get(\n 'error') else 'Automatic payment failed.'\n self.instance.is_committed = False\n self.instance.is_payed = False\n self.instance.public_comment = error_message\n self.instance.save()\n return error_message\n\n @classmethod\n def generate_mt4_comment(cls, payment_request):\n return '{NETELLER}[%s]' % payment_request.pk\n\n def clean(self):\n from platforms.converter import convert_currency\n amount = self.cleaned_data['amount']\n currency = self.cleaned_data['currency']\n return super(DepositForm, self).clean()\n <mask token>\n\n @classmethod\n def _calculate_commission(cls, request, full_commission=False):\n commission = request.amount * cls.commission_rate\n min_comm = Decimal('1')\n commission = max(min_comm, commission)\n return CommissionCalculationResult(amount=request.amount,\n commission=commission, currency=request.currency)\n\n\nclass DetailsForm(base.DetailsForm):\n\n def __init__(self, *args, **kwargs):\n super(DetailsForm, self).__init__(*args, **kwargs)\n self.fields['purse'].label = _('Net account')\n self.fields['purse'].help_text = _(\n \"Your Neteller's 12-digit Account ID or email address that is associated with their NETELLER account\"\n )\n\n\nclass WithdrawForm(base.WithdrawForm):\n MIN_AMOUNT = 10, 'USD'\n commission_rate = Decimal('0.025')\n\n @classmethod\n def _calculate_commission(cls, request, full_commission=False):\n commission = request.amount * cls.commission_rate\n min_comm = Decimal('1')\n max_comm = Decimal('30')\n commission = min(max_comm, max(min_comm, commission))\n return CommissionCalculationResult(amount=request.amount,\n commission=commission, currency=request.currency)\n",
"step-4": "<mask token>\n\n\nclass DepositForm(base.DepositForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def is_automatic(cls, instance):\n return True\n\n def get_neteller_token(self):\n \"\"\"\n :return: tuple. ('accessToken', 'Auth method'). Example: (\"0.AQAAAU3in\", \"Bearer\")\n or None if can't get token.\n \"\"\"\n headers = {'Content-Type': 'application/json', 'Cache-Control':\n 'no-cache', 'Authorization': 'Basic ' + base64.b64encode(\n settings.NETELLER_MERCHANT_ID + ':' + settings.NETELLER_SECRET_KEY)\n }\n result = requests.post(self.get_token_url, headers=headers)\n if result.status_code == 200:\n result = result.json()\n else:\n return None\n if result.get('accessToken'):\n return result.get('accessToken'), result.get('tokenType')\n else:\n return None\n\n def make_request(self):\n import json\n currency = {'RUR': 'RUB'}.get(self.instance.currency, self.instance\n .currency)\n amount = int(decimal_round(self.instance.amount) * 100)\n token_tuple = self.get_neteller_token()\n if not token_tuple:\n return \"Can't get the token.\"\n data = {'paymentMethod': {'type': 'neteller', 'value': self.\n instance.purse}, 'transaction': {'merchantRefId': unicode(self.\n instance.pk), 'amount': amount, 'currency': currency},\n 'verificationCode': unicode(self.instance.params['secure_id'])}\n headers = {'Content-Type': 'application/json', 'Authorization': \n token_tuple[1] + ' ' + token_tuple[0]}\n request = requests.post(self.bill_address, data=json.dumps(data),\n headers=headers)\n request = request.json()\n if request.get('transaction') and request.get('transaction').get(\n 'status') == 'accepted':\n self.instance.refresh_state()\n self.instance.is_payed = True\n self.instance.params['transaction'] = request.get('transaction'\n ).get('id')\n self.instance.save()\n return None\n else:\n error_message = request.get('error').get('message') if request.get(\n 'error') else 'Automatic payment failed.'\n self.instance.is_committed = False\n self.instance.is_payed = False\n self.instance.public_comment = error_message\n self.instance.save()\n return error_message\n\n @classmethod\n def generate_mt4_comment(cls, payment_request):\n return '{NETELLER}[%s]' % payment_request.pk\n\n def clean(self):\n from platforms.converter import convert_currency\n amount = self.cleaned_data['amount']\n currency = self.cleaned_data['currency']\n return super(DepositForm, self).clean()\n\n def confirmed_response_data(self, request):\n error = self.make_request()\n if error:\n return {'detail': 'Error: %s' % error}, 400\n else:\n return {'success': True}, None\n\n @classmethod\n def _calculate_commission(cls, request, full_commission=False):\n commission = request.amount * cls.commission_rate\n min_comm = Decimal('1')\n commission = max(min_comm, commission)\n return CommissionCalculationResult(amount=request.amount,\n commission=commission, currency=request.currency)\n\n\nclass DetailsForm(base.DetailsForm):\n\n def __init__(self, *args, **kwargs):\n super(DetailsForm, self).__init__(*args, **kwargs)\n self.fields['purse'].label = _('Net account')\n self.fields['purse'].help_text = _(\n \"Your Neteller's 12-digit Account ID or email address that is associated with their NETELLER account\"\n )\n\n\nclass WithdrawForm(base.WithdrawForm):\n MIN_AMOUNT = 10, 'USD'\n commission_rate = Decimal('0.025')\n\n @classmethod\n def _calculate_commission(cls, request, full_commission=False):\n commission = request.amount * cls.commission_rate\n min_comm = Decimal('1')\n max_comm = Decimal('30')\n commission = min(max_comm, max(min_comm, commission))\n return CommissionCalculationResult(amount=request.amount,\n commission=commission, currency=request.currency)\n",
"step-5": "# -*- coding: utf-8 -*-\nimport base64\nimport logging\nfrom decimal import Decimal\n\nimport requests\nfrom django import forms\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom currencies.currencies import decimal_round\nfrom payments.systems import base\nfrom payments.systems.bankusd import display_amount_usd\nfrom payments.systems.base import CommissionCalculationResult\n\nname = _(\"Neteller\")\nlogo = \"neteller.png\"\nslug = __name__.rsplit(\".\", 1)[-1]\ncurrencies = [\"USD\"]\nmt4_payment_slug = \"NETELLER\"\n\ntransfer_details = {\n \"deposit\": {\n \"fee\": \"3.5% min $1\",\n \"time\": _(\"Within day\"),\n \"min_amount\": display_amount_usd(10),\n },\n \"withdraw\": {\n \"fee\": _(\"2.5% min $1 max $30\"),\n \"time\": _(\"Within day\"),\n \"min_amount\": display_amount_usd(10),\n }\n}\n\ntemplates = {\n \"deposit\": \"payments/forms/deposit/neteller.html\",\n \"withdraw\": \"payments/forms/withdraw/electronic.html\",\n}\n\nlog = logging.getLogger(__name__)\n\n\nclass DepositForm(base.DepositForm):\n\n purse = forms.CharField(max_length=100, label=_(\"Net account\"),\n help_text=_(\"Your Neteller's 12-digit Account ID or email address that is \"\n \"associated with their NETELLER account\"))\n secure_id = forms.IntegerField(label=_(\"Secure ID\"), help_text=_(\"Your Neteller's 6-digit Secure ID\"))\n\n bill_address = \"https://api.neteller.com/v1/transferIn\"\n get_token_url = \"https://api.neteller.com/v1/oauth2/token?grant_type=client_credentials\"\n commission_rate = Decimal(\"0.035\")\n MIN_AMOUNT = (10, 'USD')\n\n @classmethod\n def is_automatic(cls, instance):\n return True\n\n def get_neteller_token(self):\n \"\"\"\n :return: tuple. ('accessToken', 'Auth method'). Example: (\"0.AQAAAU3in\", \"Bearer\")\n or None if can't get token.\n \"\"\"\n\n headers = {'Content-Type': 'application/json',\n 'Cache-Control': 'no-cache',\n 'Authorization': 'Basic ' + base64.b64encode(\n settings.NETELLER_MERCHANT_ID + ':' + settings.NETELLER_SECRET_KEY)}\n\n\n result = requests.post(self.get_token_url, headers = headers)\n\n if result.status_code == 200:\n result = result.json()\n else:\n return None\n\n if result.get(\"accessToken\"):\n return result.get(\"accessToken\"), result.get(\"tokenType\")\n else:\n return None\n\n def make_request(self):\n import json\n\n currency = {\n \"RUR\": \"RUB\"\n }.get(self.instance.currency, self.instance.currency)\n amount = int(decimal_round(self.instance.amount) * 100)\n token_tuple = self.get_neteller_token()\n\n if not token_tuple:\n return \"Can't get the token.\"\n\n data = {\n \"paymentMethod\": {\n \"type\": \"neteller\",\n \"value\": self.instance.purse\n },\n \"transaction\": {\n \"merchantRefId\": unicode(self.instance.pk),\n \"amount\": amount,\n \"currency\": currency\n },\n \"verificationCode\": unicode(self.instance.params[\"secure_id\"]),\n }\n\n headers = {'Content-Type': 'application/json', 'Authorization': token_tuple[1] + \" \" + token_tuple[0]}\n\n request = requests.post(self.bill_address, data=json.dumps(data), headers=headers)\n\n request = request.json()\n\n if request.get(\"transaction\") and request.get(\"transaction\").get(\"status\") == \"accepted\":\n self.instance.refresh_state()\n self.instance.is_payed = True\n self.instance.params[\"transaction\"] = request.get(\"transaction\").get(\"id\")\n self.instance.save()\n return None\n else:\n error_message = request.get(\"error\").get(\"message\") if request.get(\"error\") else \\\n \"Automatic payment failed.\"\n self.instance.is_committed = False\n self.instance.is_payed = False\n self.instance.public_comment = error_message\n self.instance.save()\n return error_message\n\n @classmethod\n def generate_mt4_comment(cls, payment_request):\n return \"{NETELLER}[%s]\" % payment_request.pk\n\n def clean(self):\n from platforms.converter import convert_currency\n amount = self.cleaned_data[\"amount\"]\n currency = self.cleaned_data[\"currency\"]\n return super(DepositForm, self).clean()\n\n def confirmed_response_data(self, request):\n error = self.make_request()\n if error:\n return {'detail': \"Error: %s\" % error}, 400\n else:\n return {\"success\": True}, None\n\n @classmethod\n def _calculate_commission(cls, request, full_commission=False):\n commission = request.amount * cls.commission_rate\n min_comm = Decimal(\"1\")\n commission = max(min_comm, commission)\n return CommissionCalculationResult(\n amount=request.amount,\n commission=commission,\n currency=request.currency\n )\n\nclass DetailsForm(base.DetailsForm):\n\n def __init__(self, *args, **kwargs):\n super(DetailsForm, self).__init__(*args, **kwargs)\n self.fields[\"purse\"].label = _(\"Net account\")\n self.fields[\"purse\"].help_text = _(\"Your Neteller's 12-digit Account ID or email address that is \"\n \"associated with their NETELLER account\")\n\n\nclass WithdrawForm(base.WithdrawForm):\n MIN_AMOUNT = (10, 'USD')\n commission_rate = Decimal(\"0.025\")\n\n @classmethod\n def _calculate_commission(cls, request, full_commission=False):\n commission = request.amount * cls.commission_rate\n min_comm = Decimal(\"1\")\n max_comm = Decimal(\"30\")\n commission = min(max_comm, max(min_comm, commission))\n return CommissionCalculationResult(\n amount=request.amount,\n commission=commission,\n currency=request.currency\n )\n",
"step-ids": [
9,
10,
11,
13,
17
]
}
|
[
9,
10,
11,
13,
17
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with urlopen('http://api.nbp.pl/api/exchangerates/tables/A/') as site:
data = loads(site.read().decode('utf-8'))
rates = data[0]['rates']
exchange = input('Jaką wartość chcesz wymienić na złotówki? ')
value, code = exchange.split(' ')
value = float(value)
rate = list(filter(lambda x: x['code'] == code, rates))
print(f"Otrzymujesz {value * rate[0]['mid']} PLN")
<|reserved_special_token_1|>
from urllib.request import urlopen
from json import loads
with urlopen('http://api.nbp.pl/api/exchangerates/tables/A/') as site:
data = loads(site.read().decode('utf-8'))
rates = data[0]['rates']
exchange = input('Jaką wartość chcesz wymienić na złotówki? ')
value, code = exchange.split(' ')
value = float(value)
rate = list(filter(lambda x: x['code'] == code, rates))
print(f"Otrzymujesz {value * rate[0]['mid']} PLN")
<|reserved_special_token_1|>
from urllib.request import urlopen
from json import loads
with urlopen('http://api.nbp.pl/api/exchangerates/tables/A/') as site:
data = loads(site.read().decode('utf-8'))
rates = data[0]['rates']
exchange = input('Jaką wartość chcesz wymienić na złotówki? ')
value, code = exchange.split(' ')
value = float(value)
rate = list(filter(lambda x: x['code'] == code, rates))
print(f'Otrzymujesz {value * rate[0]["mid"]} PLN')
|
flexible
|
{
"blob_id": "3f3d7cdf7732b2a1568cd97574e1443225667327",
"index": 9622,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith urlopen('http://api.nbp.pl/api/exchangerates/tables/A/') as site:\n data = loads(site.read().decode('utf-8'))\n rates = data[0]['rates']\n exchange = input('Jaką wartość chcesz wymienić na złotówki? ')\n value, code = exchange.split(' ')\n value = float(value)\n rate = list(filter(lambda x: x['code'] == code, rates))\n print(f\"Otrzymujesz {value * rate[0]['mid']} PLN\")\n",
"step-3": "from urllib.request import urlopen\nfrom json import loads\nwith urlopen('http://api.nbp.pl/api/exchangerates/tables/A/') as site:\n data = loads(site.read().decode('utf-8'))\n rates = data[0]['rates']\n exchange = input('Jaką wartość chcesz wymienić na złotówki? ')\n value, code = exchange.split(' ')\n value = float(value)\n rate = list(filter(lambda x: x['code'] == code, rates))\n print(f\"Otrzymujesz {value * rate[0]['mid']} PLN\")\n",
"step-4": "from urllib.request import urlopen\nfrom json import loads\n\n\nwith urlopen('http://api.nbp.pl/api/exchangerates/tables/A/') as site:\n data = loads(site.read().decode('utf-8'))\n rates = data[0]['rates']\n\n exchange = input('Jaką wartość chcesz wymienić na złotówki? ')\n value, code = exchange.split(' ')\n value = float(value)\n\n rate = list(filter(lambda x: x['code'] == code, rates))\n print(f'Otrzymujesz {value * rate[0][\"mid\"]} PLN')\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.contrib import admin
from .models import Hash
admin.site.register(Hash)
|
normal
|
{
"blob_id": "e2e4adaa8f7f62662e0c2915faff1bed72986351",
"index": 1084,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Hash)\n",
"step-3": "from django.contrib import admin\nfrom .models import Hash\nadmin.site.register(Hash)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import heapq as heap
import networkx as nx
import copy
import random
def remove_jumps(moves):
res = []
for move in moves:
if move[2] > 1:
move[3].reverse()
res.extend(make_moves_from_path(move[3]))
else:
res.append(move)
return res
def make_moves_from_path(path):
moves = []
p = path[:]
for i in range(len(p)-1):
moves.append((p[i+1], p[i], 1, [p[i+1], p[i]]))
return moves
def find_nearest_hole(o,r,graph, start):
visited, queue = [], [(start, [start])]
results = []
while queue:
(node, search_path) = queue.pop(0)
if node not in visited:
visited.append(node)
adjacent = graph.adj[node]
for neighbor in adjacent:
if neighbor in o:
if neighbor not in visited:
queue.append((neighbor, search_path + [neighbor]))
else:
if neighbor != r:
results.append(search_path + [neighbor])
moves = []
for res in results:
moves.append((res[0], res[-1], len(res)-1, res))
return moves
def move_robot(o,r,graph,node_from,node_to):
obstacles = o[:]
robot = r
if not node_from == r:
raise RuntimeError('node_from is not robot ' + node_from)
if node_to in obstacles:
raise RuntimeError('node_to is obstacle ' + node_to)
robot = node_to
return (obstacles,robot)
def move_obstacle(o,r,graph,node_from,node_to):
obstacles = o[:]
robot = r
if node_from not in obstacles:
raise RuntimeError('node_from is not obstacle ' + node_from)
if node_to in obstacles:
raise RuntimeError('node_to is obstacle ' + node_to)
if node_to == robot:
raise RuntimeError('node_to is robot' + node_to)
obstacles.append(node_to)
obstacles.remove(node_from)
return(obstacles,robot)
def make_move(o,r,graph,node_from,node_to):
if node_from == None:
return (o, r)
if( r == node_from):
return move_robot(o,r,graph,node_from,node_to)
if ( node_from in o):
return move_obstacle(o,r,graph,node_from,node_to)
raise RuntimeError('Cant move from ' + node_from)
def make_moves(o,r,graph,moves):
obstacles= o[:]
robot = r
for move in moves:
obstacles,robot = make_move(obstacles,robot,graph,move[0],move[1])
return (obstacles,robot)
def is_hole(o, r, node):
if (node not in o):
return True
return False
def possible_robot_moves(o, r, graph):
moves=[]
robot_node = r
robot_neighbors = graph.adj[r]
for neighbor in robot_neighbors:
if is_hole(o,r,neighbor):
moves.append((robot_node, neighbor, 1, [robot_node, neighbor]))
return moves
def possible_obstacle_moves(o,r,graph,obstacle):
obstacle_neighbors = graph.adj[obstacle]
moves = []
for neighbor in obstacle_neighbors:
if is_hole(o,r,neighbor) and neighbor != r:
moves.append((obstacle, neighbor, 1, [obstacle, neighbor]))
else:
if neighbor != r:
nh = find_nearest_hole(o, r, graph, neighbor)
if len(nh) > 0:
moves.extend(find_nearest_hole(o,r,graph, neighbor))
return moves
def possible_obstacles_moves(o,r,graph):
moves = []
for obstacle in o:
moves.extend(possible_obstacle_moves(o,r,graph,obstacle))
return moves
def possible_moves(o,r,graph):
moves = []
moves.extend(possible_robot_moves(o,r,graph))
moves.extend(possible_obstacles_moves(o,r,graph))
return moves
def color(o,r,graph,node,target,start):
if (node in o and node == target):
return 'c'
if node in o:
return 'r'
if node == r:
return 'b'
if node == start:
return 'y'
if node == target:
return 'g'
return 'w'
def create_state(o, r):
o.sort()
return '-'.join(o) + ' ___ R = ' + r
#__________________________________________________________________________________
def fitness_fun_heap(graph, obstacles, robot, target, num_of_moves):
shortest = nx.shortest_path(graph,robot,target)
score = -len(shortest) - num_of_moves
for obstacle in obstacles:
if obstacle in shortest:
score = score - 1
return -score
def solve_heap(o,r,graph,t):
round = 0
visited = set([])
queue= [(-1000,[],o,r)]
while queue:
score,moves,obstacles,robot = heap.heappop(queue)
obstacles.sort()
st = ('#'.join(obstacles),robot)
if ( st not in visited ):
visited.add(st)
score = fitness_fun_heap(graph,obstacles,robot,t,len(moves))
pm = possible_moves(obstacles,robot,graph)
for move in pm:
new_moves = moves[:]
new_moves.append(move)
newobstacles,newrobot = make_moves(obstacles,robot,graph,[move])
if t == newrobot:
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
return new_moves
round = round+1
if (round % 100000 == 0):
print ("Visited = " + str(len(visited)))
heap.heappush(queue,(score,new_moves,newobstacles,newrobot))
def solve_brute_force(o,r,graph,t):
num_of_solutions = 0
all_solutions = []
round = 0
visited = set([])
queue = [([],o,r)]
while queue:
moves,obstacles,robot = queue.pop(0)
obstacles.sort()
st = ('#'.join(obstacles),robot)
if ( st not in visited ):
visited.add(st)
pm = possible_moves(obstacles,robot,graph)
for move in pm:
new_moves = moves[:]
new_moves.append(move)
newobstacles,newrobot = make_moves(obstacles,robot,graph,[move])
if t == newrobot:
all_solutions.append(new_moves)
round = round+1
if (round % 100000 == 0):
print ("Visited = " + str(len(visited)))
queue.append((new_moves,newobstacles,newrobot))
print('Number of solutions: ' + str(len(all_solutions)))
best = min(all_solutions, key = lambda x : len(x))
return best
|
normal
|
{
"blob_id": "800edfc61635564abf8297c4f33c59d48cc99960",
"index": 4058,
"step-1": "<mask token>\n\n\ndef make_moves_from_path(path):\n moves = []\n p = path[:]\n for i in range(len(p) - 1):\n moves.append((p[i + 1], p[i], 1, [p[i + 1], p[i]]))\n return moves\n\n\ndef find_nearest_hole(o, r, graph, start):\n visited, queue = [], [(start, [start])]\n results = []\n while queue:\n node, search_path = queue.pop(0)\n if node not in visited:\n visited.append(node)\n adjacent = graph.adj[node]\n for neighbor in adjacent:\n if neighbor in o:\n if neighbor not in visited:\n queue.append((neighbor, search_path + [neighbor]))\n elif neighbor != r:\n results.append(search_path + [neighbor])\n moves = []\n for res in results:\n moves.append((res[0], res[-1], len(res) - 1, res))\n return moves\n\n\ndef move_robot(o, r, graph, node_from, node_to):\n obstacles = o[:]\n robot = r\n if not node_from == r:\n raise RuntimeError('node_from is not robot ' + node_from)\n if node_to in obstacles:\n raise RuntimeError('node_to is obstacle ' + node_to)\n robot = node_to\n return obstacles, robot\n\n\n<mask token>\n\n\ndef possible_robot_moves(o, r, graph):\n moves = []\n robot_node = r\n robot_neighbors = graph.adj[r]\n for neighbor in robot_neighbors:\n if is_hole(o, r, neighbor):\n moves.append((robot_node, neighbor, 1, [robot_node, neighbor]))\n return moves\n\n\ndef possible_obstacle_moves(o, r, graph, obstacle):\n obstacle_neighbors = graph.adj[obstacle]\n moves = []\n for neighbor in obstacle_neighbors:\n if is_hole(o, r, neighbor) and neighbor != r:\n moves.append((obstacle, neighbor, 1, [obstacle, neighbor]))\n elif neighbor != r:\n nh = find_nearest_hole(o, r, graph, neighbor)\n if len(nh) > 0:\n moves.extend(find_nearest_hole(o, r, graph, neighbor))\n return moves\n\n\n<mask token>\n\n\ndef possible_moves(o, r, graph):\n moves = []\n moves.extend(possible_robot_moves(o, r, graph))\n moves.extend(possible_obstacles_moves(o, r, graph))\n return moves\n\n\n<mask token>\n\n\ndef solve_heap(o, r, graph, t):\n round = 0\n visited = set([])\n queue = [(-1000, [], o, r)]\n while queue:\n score, moves, obstacles, robot = heap.heappop(queue)\n obstacles.sort()\n st = '#'.join(obstacles), robot\n if st not in visited:\n visited.add(st)\n score = fitness_fun_heap(graph, obstacles, robot, t, len(moves))\n pm = possible_moves(obstacles, robot, graph)\n for move in pm:\n new_moves = moves[:]\n new_moves.append(move)\n newobstacles, newrobot = make_moves(obstacles, robot, graph,\n [move])\n if t == newrobot:\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n return new_moves\n round = round + 1\n if round % 100000 == 0:\n print('Visited = ' + str(len(visited)))\n heap.heappush(queue, (score, new_moves, newobstacles, newrobot)\n )\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef remove_jumps(moves):\n res = []\n for move in moves:\n if move[2] > 1:\n move[3].reverse()\n res.extend(make_moves_from_path(move[3]))\n else:\n res.append(move)\n return res\n\n\ndef make_moves_from_path(path):\n moves = []\n p = path[:]\n for i in range(len(p) - 1):\n moves.append((p[i + 1], p[i], 1, [p[i + 1], p[i]]))\n return moves\n\n\ndef find_nearest_hole(o, r, graph, start):\n visited, queue = [], [(start, [start])]\n results = []\n while queue:\n node, search_path = queue.pop(0)\n if node not in visited:\n visited.append(node)\n adjacent = graph.adj[node]\n for neighbor in adjacent:\n if neighbor in o:\n if neighbor not in visited:\n queue.append((neighbor, search_path + [neighbor]))\n elif neighbor != r:\n results.append(search_path + [neighbor])\n moves = []\n for res in results:\n moves.append((res[0], res[-1], len(res) - 1, res))\n return moves\n\n\ndef move_robot(o, r, graph, node_from, node_to):\n obstacles = o[:]\n robot = r\n if not node_from == r:\n raise RuntimeError('node_from is not robot ' + node_from)\n if node_to in obstacles:\n raise RuntimeError('node_to is obstacle ' + node_to)\n robot = node_to\n return obstacles, robot\n\n\ndef move_obstacle(o, r, graph, node_from, node_to):\n obstacles = o[:]\n robot = r\n if node_from not in obstacles:\n raise RuntimeError('node_from is not obstacle ' + node_from)\n if node_to in obstacles:\n raise RuntimeError('node_to is obstacle ' + node_to)\n if node_to == robot:\n raise RuntimeError('node_to is robot' + node_to)\n obstacles.append(node_to)\n obstacles.remove(node_from)\n return obstacles, robot\n\n\n<mask token>\n\n\ndef possible_robot_moves(o, r, graph):\n moves = []\n robot_node = r\n robot_neighbors = graph.adj[r]\n for neighbor in robot_neighbors:\n if is_hole(o, r, neighbor):\n moves.append((robot_node, neighbor, 1, [robot_node, neighbor]))\n return moves\n\n\ndef possible_obstacle_moves(o, r, graph, obstacle):\n obstacle_neighbors = graph.adj[obstacle]\n moves = []\n for neighbor in obstacle_neighbors:\n if is_hole(o, r, neighbor) and neighbor != r:\n moves.append((obstacle, neighbor, 1, [obstacle, neighbor]))\n elif neighbor != r:\n nh = find_nearest_hole(o, r, graph, neighbor)\n if len(nh) > 0:\n moves.extend(find_nearest_hole(o, r, graph, neighbor))\n return moves\n\n\n<mask token>\n\n\ndef possible_moves(o, r, graph):\n moves = []\n moves.extend(possible_robot_moves(o, r, graph))\n moves.extend(possible_obstacles_moves(o, r, graph))\n return moves\n\n\ndef color(o, r, graph, node, target, start):\n if node in o and node == target:\n return 'c'\n if node in o:\n return 'r'\n if node == r:\n return 'b'\n if node == start:\n return 'y'\n if node == target:\n return 'g'\n return 'w'\n\n\n<mask token>\n\n\ndef solve_heap(o, r, graph, t):\n round = 0\n visited = set([])\n queue = [(-1000, [], o, r)]\n while queue:\n score, moves, obstacles, robot = heap.heappop(queue)\n obstacles.sort()\n st = '#'.join(obstacles), robot\n if st not in visited:\n visited.add(st)\n score = fitness_fun_heap(graph, obstacles, robot, t, len(moves))\n pm = possible_moves(obstacles, robot, graph)\n for move in pm:\n new_moves = moves[:]\n new_moves.append(move)\n newobstacles, newrobot = make_moves(obstacles, robot, graph,\n [move])\n if t == newrobot:\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n return new_moves\n round = round + 1\n if round % 100000 == 0:\n print('Visited = ' + str(len(visited)))\n heap.heappush(queue, (score, new_moves, newobstacles, newrobot)\n )\n\n\ndef solve_brute_force(o, r, graph, t):\n num_of_solutions = 0\n all_solutions = []\n round = 0\n visited = set([])\n queue = [([], o, r)]\n while queue:\n moves, obstacles, robot = queue.pop(0)\n obstacles.sort()\n st = '#'.join(obstacles), robot\n if st not in visited:\n visited.add(st)\n pm = possible_moves(obstacles, robot, graph)\n for move in pm:\n new_moves = moves[:]\n new_moves.append(move)\n newobstacles, newrobot = make_moves(obstacles, robot, graph,\n [move])\n if t == newrobot:\n all_solutions.append(new_moves)\n round = round + 1\n if round % 100000 == 0:\n print('Visited = ' + str(len(visited)))\n queue.append((new_moves, newobstacles, newrobot))\n print('Number of solutions: ' + str(len(all_solutions)))\n best = min(all_solutions, key=lambda x: len(x))\n return best\n",
"step-3": "<mask token>\n\n\ndef remove_jumps(moves):\n res = []\n for move in moves:\n if move[2] > 1:\n move[3].reverse()\n res.extend(make_moves_from_path(move[3]))\n else:\n res.append(move)\n return res\n\n\ndef make_moves_from_path(path):\n moves = []\n p = path[:]\n for i in range(len(p) - 1):\n moves.append((p[i + 1], p[i], 1, [p[i + 1], p[i]]))\n return moves\n\n\ndef find_nearest_hole(o, r, graph, start):\n visited, queue = [], [(start, [start])]\n results = []\n while queue:\n node, search_path = queue.pop(0)\n if node not in visited:\n visited.append(node)\n adjacent = graph.adj[node]\n for neighbor in adjacent:\n if neighbor in o:\n if neighbor not in visited:\n queue.append((neighbor, search_path + [neighbor]))\n elif neighbor != r:\n results.append(search_path + [neighbor])\n moves = []\n for res in results:\n moves.append((res[0], res[-1], len(res) - 1, res))\n return moves\n\n\ndef move_robot(o, r, graph, node_from, node_to):\n obstacles = o[:]\n robot = r\n if not node_from == r:\n raise RuntimeError('node_from is not robot ' + node_from)\n if node_to in obstacles:\n raise RuntimeError('node_to is obstacle ' + node_to)\n robot = node_to\n return obstacles, robot\n\n\ndef move_obstacle(o, r, graph, node_from, node_to):\n obstacles = o[:]\n robot = r\n if node_from not in obstacles:\n raise RuntimeError('node_from is not obstacle ' + node_from)\n if node_to in obstacles:\n raise RuntimeError('node_to is obstacle ' + node_to)\n if node_to == robot:\n raise RuntimeError('node_to is robot' + node_to)\n obstacles.append(node_to)\n obstacles.remove(node_from)\n return obstacles, robot\n\n\n<mask token>\n\n\ndef possible_robot_moves(o, r, graph):\n moves = []\n robot_node = r\n robot_neighbors = graph.adj[r]\n for neighbor in robot_neighbors:\n if is_hole(o, r, neighbor):\n moves.append((robot_node, neighbor, 1, [robot_node, neighbor]))\n return moves\n\n\ndef possible_obstacle_moves(o, r, graph, obstacle):\n obstacle_neighbors = graph.adj[obstacle]\n moves = []\n for neighbor in obstacle_neighbors:\n if is_hole(o, r, neighbor) and neighbor != r:\n moves.append((obstacle, neighbor, 1, [obstacle, neighbor]))\n elif neighbor != r:\n nh = find_nearest_hole(o, r, graph, neighbor)\n if len(nh) > 0:\n moves.extend(find_nearest_hole(o, r, graph, neighbor))\n return moves\n\n\n<mask token>\n\n\ndef possible_moves(o, r, graph):\n moves = []\n moves.extend(possible_robot_moves(o, r, graph))\n moves.extend(possible_obstacles_moves(o, r, graph))\n return moves\n\n\ndef color(o, r, graph, node, target, start):\n if node in o and node == target:\n return 'c'\n if node in o:\n return 'r'\n if node == r:\n return 'b'\n if node == start:\n return 'y'\n if node == target:\n return 'g'\n return 'w'\n\n\n<mask token>\n\n\ndef fitness_fun_heap(graph, obstacles, robot, target, num_of_moves):\n shortest = nx.shortest_path(graph, robot, target)\n score = -len(shortest) - num_of_moves\n for obstacle in obstacles:\n if obstacle in shortest:\n score = score - 1\n return -score\n\n\ndef solve_heap(o, r, graph, t):\n round = 0\n visited = set([])\n queue = [(-1000, [], o, r)]\n while queue:\n score, moves, obstacles, robot = heap.heappop(queue)\n obstacles.sort()\n st = '#'.join(obstacles), robot\n if st not in visited:\n visited.add(st)\n score = fitness_fun_heap(graph, obstacles, robot, t, len(moves))\n pm = possible_moves(obstacles, robot, graph)\n for move in pm:\n new_moves = moves[:]\n new_moves.append(move)\n newobstacles, newrobot = make_moves(obstacles, robot, graph,\n [move])\n if t == newrobot:\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n return new_moves\n round = round + 1\n if round % 100000 == 0:\n print('Visited = ' + str(len(visited)))\n heap.heappush(queue, (score, new_moves, newobstacles, newrobot)\n )\n\n\ndef solve_brute_force(o, r, graph, t):\n num_of_solutions = 0\n all_solutions = []\n round = 0\n visited = set([])\n queue = [([], o, r)]\n while queue:\n moves, obstacles, robot = queue.pop(0)\n obstacles.sort()\n st = '#'.join(obstacles), robot\n if st not in visited:\n visited.add(st)\n pm = possible_moves(obstacles, robot, graph)\n for move in pm:\n new_moves = moves[:]\n new_moves.append(move)\n newobstacles, newrobot = make_moves(obstacles, robot, graph,\n [move])\n if t == newrobot:\n all_solutions.append(new_moves)\n round = round + 1\n if round % 100000 == 0:\n print('Visited = ' + str(len(visited)))\n queue.append((new_moves, newobstacles, newrobot))\n print('Number of solutions: ' + str(len(all_solutions)))\n best = min(all_solutions, key=lambda x: len(x))\n return best\n",
"step-4": "<mask token>\n\n\ndef remove_jumps(moves):\n res = []\n for move in moves:\n if move[2] > 1:\n move[3].reverse()\n res.extend(make_moves_from_path(move[3]))\n else:\n res.append(move)\n return res\n\n\ndef make_moves_from_path(path):\n moves = []\n p = path[:]\n for i in range(len(p) - 1):\n moves.append((p[i + 1], p[i], 1, [p[i + 1], p[i]]))\n return moves\n\n\ndef find_nearest_hole(o, r, graph, start):\n visited, queue = [], [(start, [start])]\n results = []\n while queue:\n node, search_path = queue.pop(0)\n if node not in visited:\n visited.append(node)\n adjacent = graph.adj[node]\n for neighbor in adjacent:\n if neighbor in o:\n if neighbor not in visited:\n queue.append((neighbor, search_path + [neighbor]))\n elif neighbor != r:\n results.append(search_path + [neighbor])\n moves = []\n for res in results:\n moves.append((res[0], res[-1], len(res) - 1, res))\n return moves\n\n\ndef move_robot(o, r, graph, node_from, node_to):\n obstacles = o[:]\n robot = r\n if not node_from == r:\n raise RuntimeError('node_from is not robot ' + node_from)\n if node_to in obstacles:\n raise RuntimeError('node_to is obstacle ' + node_to)\n robot = node_to\n return obstacles, robot\n\n\ndef move_obstacle(o, r, graph, node_from, node_to):\n obstacles = o[:]\n robot = r\n if node_from not in obstacles:\n raise RuntimeError('node_from is not obstacle ' + node_from)\n if node_to in obstacles:\n raise RuntimeError('node_to is obstacle ' + node_to)\n if node_to == robot:\n raise RuntimeError('node_to is robot' + node_to)\n obstacles.append(node_to)\n obstacles.remove(node_from)\n return obstacles, robot\n\n\ndef make_move(o, r, graph, node_from, node_to):\n if node_from == None:\n return o, r\n if r == node_from:\n return move_robot(o, r, graph, node_from, node_to)\n if node_from in o:\n return move_obstacle(o, r, graph, node_from, node_to)\n raise RuntimeError('Cant move from ' + node_from)\n\n\n<mask token>\n\n\ndef is_hole(o, r, node):\n if node not in o:\n return True\n return False\n\n\ndef possible_robot_moves(o, r, graph):\n moves = []\n robot_node = r\n robot_neighbors = graph.adj[r]\n for neighbor in robot_neighbors:\n if is_hole(o, r, neighbor):\n moves.append((robot_node, neighbor, 1, [robot_node, neighbor]))\n return moves\n\n\ndef possible_obstacle_moves(o, r, graph, obstacle):\n obstacle_neighbors = graph.adj[obstacle]\n moves = []\n for neighbor in obstacle_neighbors:\n if is_hole(o, r, neighbor) and neighbor != r:\n moves.append((obstacle, neighbor, 1, [obstacle, neighbor]))\n elif neighbor != r:\n nh = find_nearest_hole(o, r, graph, neighbor)\n if len(nh) > 0:\n moves.extend(find_nearest_hole(o, r, graph, neighbor))\n return moves\n\n\ndef possible_obstacles_moves(o, r, graph):\n moves = []\n for obstacle in o:\n moves.extend(possible_obstacle_moves(o, r, graph, obstacle))\n return moves\n\n\ndef possible_moves(o, r, graph):\n moves = []\n moves.extend(possible_robot_moves(o, r, graph))\n moves.extend(possible_obstacles_moves(o, r, graph))\n return moves\n\n\ndef color(o, r, graph, node, target, start):\n if node in o and node == target:\n return 'c'\n if node in o:\n return 'r'\n if node == r:\n return 'b'\n if node == start:\n return 'y'\n if node == target:\n return 'g'\n return 'w'\n\n\ndef create_state(o, r):\n o.sort()\n return '-'.join(o) + ' ___ R = ' + r\n\n\ndef fitness_fun_heap(graph, obstacles, robot, target, num_of_moves):\n shortest = nx.shortest_path(graph, robot, target)\n score = -len(shortest) - num_of_moves\n for obstacle in obstacles:\n if obstacle in shortest:\n score = score - 1\n return -score\n\n\ndef solve_heap(o, r, graph, t):\n round = 0\n visited = set([])\n queue = [(-1000, [], o, r)]\n while queue:\n score, moves, obstacles, robot = heap.heappop(queue)\n obstacles.sort()\n st = '#'.join(obstacles), robot\n if st not in visited:\n visited.add(st)\n score = fitness_fun_heap(graph, obstacles, robot, t, len(moves))\n pm = possible_moves(obstacles, robot, graph)\n for move in pm:\n new_moves = moves[:]\n new_moves.append(move)\n newobstacles, newrobot = make_moves(obstacles, robot, graph,\n [move])\n if t == newrobot:\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n return new_moves\n round = round + 1\n if round % 100000 == 0:\n print('Visited = ' + str(len(visited)))\n heap.heappush(queue, (score, new_moves, newobstacles, newrobot)\n )\n\n\ndef solve_brute_force(o, r, graph, t):\n num_of_solutions = 0\n all_solutions = []\n round = 0\n visited = set([])\n queue = [([], o, r)]\n while queue:\n moves, obstacles, robot = queue.pop(0)\n obstacles.sort()\n st = '#'.join(obstacles), robot\n if st not in visited:\n visited.add(st)\n pm = possible_moves(obstacles, robot, graph)\n for move in pm:\n new_moves = moves[:]\n new_moves.append(move)\n newobstacles, newrobot = make_moves(obstacles, robot, graph,\n [move])\n if t == newrobot:\n all_solutions.append(new_moves)\n round = round + 1\n if round % 100000 == 0:\n print('Visited = ' + str(len(visited)))\n queue.append((new_moves, newobstacles, newrobot))\n print('Number of solutions: ' + str(len(all_solutions)))\n best = min(all_solutions, key=lambda x: len(x))\n return best\n",
"step-5": "import heapq as heap\nimport networkx as nx\nimport copy\nimport random\ndef remove_jumps(moves):\n\n res = []\n\n for move in moves:\n if move[2] > 1:\n move[3].reverse()\n res.extend(make_moves_from_path(move[3]))\n else:\n res.append(move)\n\n return res\n\n\ndef make_moves_from_path(path):\n\n moves = []\n p = path[:]\n\n for i in range(len(p)-1):\n moves.append((p[i+1], p[i], 1, [p[i+1], p[i]]))\n return moves\n\n\ndef find_nearest_hole(o,r,graph, start):\n visited, queue = [], [(start, [start])]\n results = []\n\n while queue:\n (node, search_path) = queue.pop(0)\n\n if node not in visited:\n visited.append(node)\n\n adjacent = graph.adj[node]\n\n for neighbor in adjacent:\n if neighbor in o:\n if neighbor not in visited:\n queue.append((neighbor, search_path + [neighbor]))\n else:\n if neighbor != r:\n results.append(search_path + [neighbor])\n\n moves = []\n for res in results:\n\n moves.append((res[0], res[-1], len(res)-1, res))\n return moves\n\ndef move_robot(o,r,graph,node_from,node_to):\n obstacles = o[:]\n robot = r\n if not node_from == r:\n raise RuntimeError('node_from is not robot ' + node_from)\n\n if node_to in obstacles:\n raise RuntimeError('node_to is obstacle ' + node_to)\n robot = node_to\n return (obstacles,robot)\n\ndef move_obstacle(o,r,graph,node_from,node_to):\n obstacles = o[:]\n robot = r\n if node_from not in obstacles:\n raise RuntimeError('node_from is not obstacle ' + node_from)\n if node_to in obstacles:\n raise RuntimeError('node_to is obstacle ' + node_to)\n\n if node_to == robot:\n raise RuntimeError('node_to is robot' + node_to)\n\n obstacles.append(node_to)\n obstacles.remove(node_from)\n\n return(obstacles,robot)\n\ndef make_move(o,r,graph,node_from,node_to):\n\n if node_from == None:\n return (o, r)\n\n if( r == node_from):\n return move_robot(o,r,graph,node_from,node_to)\n if ( node_from in o):\n return move_obstacle(o,r,graph,node_from,node_to)\n\n raise RuntimeError('Cant move from ' + node_from)\n\ndef make_moves(o,r,graph,moves):\n obstacles= o[:]\n robot = r\n for move in moves:\n obstacles,robot = make_move(obstacles,robot,graph,move[0],move[1])\n return (obstacles,robot)\n\ndef is_hole(o, r, node):\n if (node not in o):\n return True\n return False\n\ndef possible_robot_moves(o, r, graph):\n moves=[]\n robot_node = r\n robot_neighbors = graph.adj[r]\n\n for neighbor in robot_neighbors:\n if is_hole(o,r,neighbor):\n moves.append((robot_node, neighbor, 1, [robot_node, neighbor]))\n return moves\n\ndef possible_obstacle_moves(o,r,graph,obstacle):\n obstacle_neighbors = graph.adj[obstacle]\n moves = []\n\n for neighbor in obstacle_neighbors:\n if is_hole(o,r,neighbor) and neighbor != r:\n moves.append((obstacle, neighbor, 1, [obstacle, neighbor]))\n else:\n if neighbor != r:\n nh = find_nearest_hole(o, r, graph, neighbor)\n if len(nh) > 0:\n moves.extend(find_nearest_hole(o,r,graph, neighbor))\n\n return moves\n\ndef possible_obstacles_moves(o,r,graph):\n moves = []\n for obstacle in o:\n moves.extend(possible_obstacle_moves(o,r,graph,obstacle))\n return moves\n\ndef possible_moves(o,r,graph):\n moves = []\n moves.extend(possible_robot_moves(o,r,graph))\n moves.extend(possible_obstacles_moves(o,r,graph))\n return moves\n\n\ndef color(o,r,graph,node,target,start):\n if (node in o and node == target):\n return 'c'\n if node in o:\n return 'r'\n if node == r:\n return 'b'\n if node == start:\n return 'y'\n if node == target:\n return 'g'\n return 'w'\n\n\ndef create_state(o, r):\n\n o.sort()\n return '-'.join(o) + ' ___ R = ' + r\n\n#__________________________________________________________________________________\n\ndef fitness_fun_heap(graph, obstacles, robot, target, num_of_moves):\n shortest = nx.shortest_path(graph,robot,target)\n score = -len(shortest) - num_of_moves\n\n for obstacle in obstacles:\n if obstacle in shortest:\n score = score - 1\n\n return -score\n\n\n\ndef solve_heap(o,r,graph,t):\n round = 0\n visited = set([])\n queue= [(-1000,[],o,r)]\n while queue:\n score,moves,obstacles,robot = heap.heappop(queue)\n obstacles.sort()\n st = ('#'.join(obstacles),robot)\n if ( st not in visited ):\n visited.add(st)\n score = fitness_fun_heap(graph,obstacles,robot,t,len(moves))\n pm = possible_moves(obstacles,robot,graph)\n\n for move in pm:\n new_moves = moves[:]\n new_moves.append(move)\n newobstacles,newrobot = make_moves(obstacles,robot,graph,[move])\n if t == newrobot:\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n return new_moves\n\n round = round+1\n if (round % 100000 == 0):\n print (\"Visited = \" + str(len(visited)))\n heap.heappush(queue,(score,new_moves,newobstacles,newrobot))\n\n\n\n\n\ndef solve_brute_force(o,r,graph,t):\n num_of_solutions = 0\n all_solutions = []\n\n round = 0\n visited = set([])\n queue = [([],o,r)]\n while queue:\n moves,obstacles,robot = queue.pop(0)\n obstacles.sort()\n st = ('#'.join(obstacles),robot)\n if ( st not in visited ):\n visited.add(st)\n\n pm = possible_moves(obstacles,robot,graph)\n for move in pm:\n new_moves = moves[:]\n new_moves.append(move)\n newobstacles,newrobot = make_moves(obstacles,robot,graph,[move])\n if t == newrobot:\n all_solutions.append(new_moves)\n\n round = round+1\n\n if (round % 100000 == 0):\n print (\"Visited = \" + str(len(visited)))\n queue.append((new_moves,newobstacles,newrobot))\n\n\n print('Number of solutions: ' + str(len(all_solutions)))\n\n best = min(all_solutions, key = lambda x : len(x))\n\n return best\n",
"step-ids": [
7,
11,
12,
16,
19
]
}
|
[
7,
11,
12,
16,
19
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for obj_file in OBJ_FILES:
cond_results[obj_file] = set()
dump = subprocess.run(['objdump', '-M', 'intel', '-dr', PREFIX +
obj_file], stdout=subprocess.PIPE, check=True).stdout
for line in dump.decode('utf-8').split('\n'):
cols = line.split('\t')
if len(cols) > 2:
new_code = re.sub(' .*', '', cols[2])
if new_code == '':
continue
if new_code not in LIBFTFP and new_code not in SKIP:
cond_results[obj_file].add(new_code)
opcodes.add(new_code)
print(sorted(opcodes - LIBFTFP))
for k, v in cond_results.items():
print(k, sorted(v))
<|reserved_special_token_0|>
combo.add('cmovne')
<|reserved_special_token_0|>
for i in range(0, len(combo)):
print('\\texttt{' + combo[i] + '}', end='')
if combo[i] not in LIBFTFP:
print('*', end='')
if i % 5 == 4:
print(' \\\\')
else:
print(' & ', end='')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
PREFIX = 'Enclave/'
OBJ_FILES = ['p_block.o', 'runtime.o', 'primitives.o', 'unary_op.o',
'unary/isna.o', 'unary/mathgen.o', 'unary/mathtrig.o',
'unary/plusminus.o', 'unary/summary.o', 'unary/print.o',
'unary/ustats.o', 'binary_op.o', 'binary/arith.o', 'binary/bstats.o',
'binary/log_bin.o', 'binary/logic.o', 'binary/matmul.o',
'binary/compare.o', 'binary/pminmax.o', 'binary/bstats.o']
CONDITIONALS = []
LIBFTFP = set(['add', 'mov', 'pop', 'setg', 'and', 'movabs', 'push', 'setl',
'call', 'movsd', 'rep', 'setle', 'cdqe', 'movsx', 'ret', 'setne', 'cmp',
'movsxd', 'sar', 'shl', 'imul', 'movzx', 'sbb', 'shr', 'je', 'mul',
'seta', 'sub', 'jmp', 'neg', 'setae', 'test', 'jne', 'not', 'setbe',
'xor', 'lea', 'or', 'sete'])
SKIP = ['nop']
opcodes = set()
cond_results = {}
for obj_file in OBJ_FILES:
cond_results[obj_file] = set()
dump = subprocess.run(['objdump', '-M', 'intel', '-dr', PREFIX +
obj_file], stdout=subprocess.PIPE, check=True).stdout
for line in dump.decode('utf-8').split('\n'):
cols = line.split('\t')
if len(cols) > 2:
new_code = re.sub(' .*', '', cols[2])
if new_code == '':
continue
if new_code not in LIBFTFP and new_code not in SKIP:
cond_results[obj_file].add(new_code)
opcodes.add(new_code)
print(sorted(opcodes - LIBFTFP))
for k, v in cond_results.items():
print(k, sorted(v))
combo = LIBFTFP.copy()
combo.add('cmovne')
combo = sorted(combo)
for i in range(0, len(combo)):
print('\\texttt{' + combo[i] + '}', end='')
if combo[i] not in LIBFTFP:
print('*', end='')
if i % 5 == 4:
print(' \\\\')
else:
print(' & ', end='')
<|reserved_special_token_1|>
import re
import subprocess
PREFIX = 'Enclave/'
OBJ_FILES = ['p_block.o', 'runtime.o', 'primitives.o', 'unary_op.o',
'unary/isna.o', 'unary/mathgen.o', 'unary/mathtrig.o',
'unary/plusminus.o', 'unary/summary.o', 'unary/print.o',
'unary/ustats.o', 'binary_op.o', 'binary/arith.o', 'binary/bstats.o',
'binary/log_bin.o', 'binary/logic.o', 'binary/matmul.o',
'binary/compare.o', 'binary/pminmax.o', 'binary/bstats.o']
CONDITIONALS = []
LIBFTFP = set(['add', 'mov', 'pop', 'setg', 'and', 'movabs', 'push', 'setl',
'call', 'movsd', 'rep', 'setle', 'cdqe', 'movsx', 'ret', 'setne', 'cmp',
'movsxd', 'sar', 'shl', 'imul', 'movzx', 'sbb', 'shr', 'je', 'mul',
'seta', 'sub', 'jmp', 'neg', 'setae', 'test', 'jne', 'not', 'setbe',
'xor', 'lea', 'or', 'sete'])
SKIP = ['nop']
opcodes = set()
cond_results = {}
for obj_file in OBJ_FILES:
cond_results[obj_file] = set()
dump = subprocess.run(['objdump', '-M', 'intel', '-dr', PREFIX +
obj_file], stdout=subprocess.PIPE, check=True).stdout
for line in dump.decode('utf-8').split('\n'):
cols = line.split('\t')
if len(cols) > 2:
new_code = re.sub(' .*', '', cols[2])
if new_code == '':
continue
if new_code not in LIBFTFP and new_code not in SKIP:
cond_results[obj_file].add(new_code)
opcodes.add(new_code)
print(sorted(opcodes - LIBFTFP))
for k, v in cond_results.items():
print(k, sorted(v))
combo = LIBFTFP.copy()
combo.add('cmovne')
combo = sorted(combo)
for i in range(0, len(combo)):
print('\\texttt{' + combo[i] + '}', end='')
if combo[i] not in LIBFTFP:
print('*', end='')
if i % 5 == 4:
print(' \\\\')
else:
print(' & ', end='')
<|reserved_special_token_1|>
#!/usr/bin/env python3
import re
import subprocess
PREFIX = "Enclave/"
OBJ_FILES = [
# "Enclave.o",
"p_block.o",
# "symbols.o",
"runtime.o",
"primitives.o",
"unary_op.o",
"unary/isna.o",
"unary/mathgen.o",
"unary/mathtrig.o",
"unary/plusminus.o",
"unary/summary.o",
"unary/print.o", # data dependent by design
"unary/ustats.o", # only the opcode for the dispatch, not the actual.
"binary_op.o",
"binary/arith.o",
"binary/bstats.o", # only the opcode for the dispatch, not the actual.
"binary/log_bin.o",
"binary/logic.o",
"binary/matmul.o",
"binary/compare.o",
"binary/pminmax.o",
"binary/bstats.o",
]
CONDITIONALS = [
]
# LIBFTFP = set(['add','mov','pop','setg','and','movabs','push','setl', 'call','movsd','rep','setle','cdqe','movsx','ret','setne','cmp','movsxd','sar','shl', 'imul','movzx','sbb','shr','je','mul','seta','sub', 'jmp','neg','setae','test', 'jne','not','setbe','xor', 'lea','or','sete']) - set(['jne', 'je'])
LIBFTFP = set(['add','mov','pop','setg','and','movabs','push','setl', 'call','movsd','rep','setle','cdqe','movsx','ret','setne','cmp','movsxd','sar','shl', 'imul','movzx','sbb','shr','je','mul','seta','sub', 'jmp','neg','setae','test', 'jne','not','setbe','xor', 'lea','or','sete'])
SKIP = ['nop',]
opcodes = set()
cond_results = {}
# subprocess.run(["make", "-f", "split.makefile", "clean"], check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# subprocess.run(["make", "-f", "split.makefile", "all"], check=True)
for obj_file in OBJ_FILES:
cond_results[obj_file] = set()
dump = subprocess.run(["objdump", "-M", "intel", "-dr", PREFIX + obj_file], stdout=subprocess.PIPE, check=True).stdout
for line in dump.decode("utf-8").split("\n"):
cols = line.split('\t')
if len(cols) > 2:
new_code = re.sub(' .*', '', cols[2])
if new_code == '':
continue
# if new_code in CONDITIONALS:
if new_code not in LIBFTFP and new_code not in SKIP:
cond_results[obj_file].add(new_code)
opcodes.add(new_code)
# print(sorted(opcodes))
print(sorted(opcodes - LIBFTFP))
for k,v in cond_results.items():
print(k,sorted(v))
combo = LIBFTFP.copy()
# for s in ['ja', 'jae', 'jb', 'je', 'jne', 'jge', 'jle', 'repz', 'cmovne', 'movq', 'jns']:
# combo.add(s)
combo.add("cmovne")
combo = sorted(combo)
for i in range(0, len(combo)):
print(r'\texttt{' + combo[i] + '}', end='')
if combo[i] not in LIBFTFP:
print('*', end='')
if i % 5 == 4:
print(r' \\')
else:
print(' & ', end='')
|
flexible
|
{
"blob_id": "45d69194e14e8c20161e979d4ff34d0b90df4672",
"index": 4750,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor obj_file in OBJ_FILES:\n cond_results[obj_file] = set()\n dump = subprocess.run(['objdump', '-M', 'intel', '-dr', PREFIX +\n obj_file], stdout=subprocess.PIPE, check=True).stdout\n for line in dump.decode('utf-8').split('\\n'):\n cols = line.split('\\t')\n if len(cols) > 2:\n new_code = re.sub(' .*', '', cols[2])\n if new_code == '':\n continue\n if new_code not in LIBFTFP and new_code not in SKIP:\n cond_results[obj_file].add(new_code)\n opcodes.add(new_code)\nprint(sorted(opcodes - LIBFTFP))\nfor k, v in cond_results.items():\n print(k, sorted(v))\n<mask token>\ncombo.add('cmovne')\n<mask token>\nfor i in range(0, len(combo)):\n print('\\\\texttt{' + combo[i] + '}', end='')\n if combo[i] not in LIBFTFP:\n print('*', end='')\n if i % 5 == 4:\n print(' \\\\\\\\')\n else:\n print(' & ', end='')\n",
"step-3": "<mask token>\nPREFIX = 'Enclave/'\nOBJ_FILES = ['p_block.o', 'runtime.o', 'primitives.o', 'unary_op.o',\n 'unary/isna.o', 'unary/mathgen.o', 'unary/mathtrig.o',\n 'unary/plusminus.o', 'unary/summary.o', 'unary/print.o',\n 'unary/ustats.o', 'binary_op.o', 'binary/arith.o', 'binary/bstats.o',\n 'binary/log_bin.o', 'binary/logic.o', 'binary/matmul.o',\n 'binary/compare.o', 'binary/pminmax.o', 'binary/bstats.o']\nCONDITIONALS = []\nLIBFTFP = set(['add', 'mov', 'pop', 'setg', 'and', 'movabs', 'push', 'setl',\n 'call', 'movsd', 'rep', 'setle', 'cdqe', 'movsx', 'ret', 'setne', 'cmp',\n 'movsxd', 'sar', 'shl', 'imul', 'movzx', 'sbb', 'shr', 'je', 'mul',\n 'seta', 'sub', 'jmp', 'neg', 'setae', 'test', 'jne', 'not', 'setbe',\n 'xor', 'lea', 'or', 'sete'])\nSKIP = ['nop']\nopcodes = set()\ncond_results = {}\nfor obj_file in OBJ_FILES:\n cond_results[obj_file] = set()\n dump = subprocess.run(['objdump', '-M', 'intel', '-dr', PREFIX +\n obj_file], stdout=subprocess.PIPE, check=True).stdout\n for line in dump.decode('utf-8').split('\\n'):\n cols = line.split('\\t')\n if len(cols) > 2:\n new_code = re.sub(' .*', '', cols[2])\n if new_code == '':\n continue\n if new_code not in LIBFTFP and new_code not in SKIP:\n cond_results[obj_file].add(new_code)\n opcodes.add(new_code)\nprint(sorted(opcodes - LIBFTFP))\nfor k, v in cond_results.items():\n print(k, sorted(v))\ncombo = LIBFTFP.copy()\ncombo.add('cmovne')\ncombo = sorted(combo)\nfor i in range(0, len(combo)):\n print('\\\\texttt{' + combo[i] + '}', end='')\n if combo[i] not in LIBFTFP:\n print('*', end='')\n if i % 5 == 4:\n print(' \\\\\\\\')\n else:\n print(' & ', end='')\n",
"step-4": "import re\nimport subprocess\nPREFIX = 'Enclave/'\nOBJ_FILES = ['p_block.o', 'runtime.o', 'primitives.o', 'unary_op.o',\n 'unary/isna.o', 'unary/mathgen.o', 'unary/mathtrig.o',\n 'unary/plusminus.o', 'unary/summary.o', 'unary/print.o',\n 'unary/ustats.o', 'binary_op.o', 'binary/arith.o', 'binary/bstats.o',\n 'binary/log_bin.o', 'binary/logic.o', 'binary/matmul.o',\n 'binary/compare.o', 'binary/pminmax.o', 'binary/bstats.o']\nCONDITIONALS = []\nLIBFTFP = set(['add', 'mov', 'pop', 'setg', 'and', 'movabs', 'push', 'setl',\n 'call', 'movsd', 'rep', 'setle', 'cdqe', 'movsx', 'ret', 'setne', 'cmp',\n 'movsxd', 'sar', 'shl', 'imul', 'movzx', 'sbb', 'shr', 'je', 'mul',\n 'seta', 'sub', 'jmp', 'neg', 'setae', 'test', 'jne', 'not', 'setbe',\n 'xor', 'lea', 'or', 'sete'])\nSKIP = ['nop']\nopcodes = set()\ncond_results = {}\nfor obj_file in OBJ_FILES:\n cond_results[obj_file] = set()\n dump = subprocess.run(['objdump', '-M', 'intel', '-dr', PREFIX +\n obj_file], stdout=subprocess.PIPE, check=True).stdout\n for line in dump.decode('utf-8').split('\\n'):\n cols = line.split('\\t')\n if len(cols) > 2:\n new_code = re.sub(' .*', '', cols[2])\n if new_code == '':\n continue\n if new_code not in LIBFTFP and new_code not in SKIP:\n cond_results[obj_file].add(new_code)\n opcodes.add(new_code)\nprint(sorted(opcodes - LIBFTFP))\nfor k, v in cond_results.items():\n print(k, sorted(v))\ncombo = LIBFTFP.copy()\ncombo.add('cmovne')\ncombo = sorted(combo)\nfor i in range(0, len(combo)):\n print('\\\\texttt{' + combo[i] + '}', end='')\n if combo[i] not in LIBFTFP:\n print('*', end='')\n if i % 5 == 4:\n print(' \\\\\\\\')\n else:\n print(' & ', end='')\n",
"step-5": "#!/usr/bin/env python3\nimport re\nimport subprocess\n\nPREFIX = \"Enclave/\"\nOBJ_FILES = [\n # \"Enclave.o\",\n \"p_block.o\",\n # \"symbols.o\",\n \"runtime.o\",\n \"primitives.o\",\n \"unary_op.o\",\n \"unary/isna.o\",\n \"unary/mathgen.o\",\n \"unary/mathtrig.o\",\n \"unary/plusminus.o\",\n \"unary/summary.o\",\n \"unary/print.o\", # data dependent by design\n \"unary/ustats.o\", # only the opcode for the dispatch, not the actual.\n \"binary_op.o\",\n \"binary/arith.o\",\n \"binary/bstats.o\", # only the opcode for the dispatch, not the actual.\n \"binary/log_bin.o\",\n \"binary/logic.o\",\n \"binary/matmul.o\",\n \"binary/compare.o\",\n \"binary/pminmax.o\",\n \"binary/bstats.o\",\n]\n\nCONDITIONALS = [\n]\n\n# LIBFTFP = set(['add','mov','pop','setg','and','movabs','push','setl', 'call','movsd','rep','setle','cdqe','movsx','ret','setne','cmp','movsxd','sar','shl', 'imul','movzx','sbb','shr','je','mul','seta','sub', 'jmp','neg','setae','test', 'jne','not','setbe','xor', 'lea','or','sete']) - set(['jne', 'je'])\nLIBFTFP = set(['add','mov','pop','setg','and','movabs','push','setl', 'call','movsd','rep','setle','cdqe','movsx','ret','setne','cmp','movsxd','sar','shl', 'imul','movzx','sbb','shr','je','mul','seta','sub', 'jmp','neg','setae','test', 'jne','not','setbe','xor', 'lea','or','sete'])\n\nSKIP = ['nop',]\n\nopcodes = set()\ncond_results = {}\n# subprocess.run([\"make\", \"-f\", \"split.makefile\", \"clean\"], check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n# subprocess.run([\"make\", \"-f\", \"split.makefile\", \"all\"], check=True)\nfor obj_file in OBJ_FILES:\n cond_results[obj_file] = set()\n dump = subprocess.run([\"objdump\", \"-M\", \"intel\", \"-dr\", PREFIX + obj_file], stdout=subprocess.PIPE, check=True).stdout\n for line in dump.decode(\"utf-8\").split(\"\\n\"):\n cols = line.split('\\t')\n if len(cols) > 2:\n new_code = re.sub(' .*', '', cols[2])\n if new_code == '':\n continue\n # if new_code in CONDITIONALS:\n if new_code not in LIBFTFP and new_code not in SKIP:\n cond_results[obj_file].add(new_code)\n opcodes.add(new_code)\n\n\n# print(sorted(opcodes))\nprint(sorted(opcodes - LIBFTFP))\nfor k,v in cond_results.items():\n print(k,sorted(v))\n\ncombo = LIBFTFP.copy()\n# for s in ['ja', 'jae', 'jb', 'je', 'jne', 'jge', 'jle', 'repz', 'cmovne', 'movq', 'jns']:\n# combo.add(s)\ncombo.add(\"cmovne\")\ncombo = sorted(combo)\nfor i in range(0, len(combo)):\n print(r'\\texttt{' + combo[i] + '}', end='')\n if combo[i] not in LIBFTFP:\n print('*', end='')\n if i % 5 == 4:\n print(r' \\\\')\n else:\n print(' & ', end='')\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def longestConsecutive(self, nums) ->int:
s = set(nums)
answer = 0
for value in s:
if value - 1 not in s:
j = value
while j in s:
j = j + 1
answer = max(answer, j - value)
return answer
<|reserved_special_token_1|>
class Solution:
def longestConsecutive(self, nums) -> int:
s = set(nums)
answer = 0
# n = len(s)
for value in s:
if value - 1 not in s:
j = value
while (j in s):
j = j + 1
answer = max(answer, j - value)
return answer
|
flexible
|
{
"blob_id": "9cb5573fada7a1529507da1d031f836044c10066",
"index": 2474,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def longestConsecutive(self, nums) ->int:\n s = set(nums)\n answer = 0\n for value in s:\n if value - 1 not in s:\n j = value\n while j in s:\n j = j + 1\n answer = max(answer, j - value)\n return answer\n",
"step-4": "class Solution:\n def longestConsecutive(self, nums) -> int:\n\n s = set(nums)\n answer = 0\n # n = len(s)\n\n for value in s:\n\n if value - 1 not in s:\n j = value\n while (j in s):\n j = j + 1\n\n answer = max(answer, j - value)\n\n return answer",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
################################################################################
# #
# This file is part of the Potato Engine (PE). #
# #
# Copyright (C) 2007-2010 ElectroMagnetic Potatoes (EMP). #
# See the AUTHORS file for more information. #
# #
# This library is free software; you can redistribute it and/or #
# modify it under the terms of the GNU Lesser General Public #
# License as published by the Free Software Foundation; either #
# version 2.1 of the License, or (at your option) any later version. #
# #
# This library is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# Lesser General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import os
import build
################################################################
# Default options (will be overriden by command line switches) #
################################################################
# Parallel build
SetOption('num_jobs', 4)
# include cache
SetOption('implicit_cache', 1)
##########################################################
# Command-line parameters (overriden by localconfig.py) #
##########################################################
buildVariables = Variables("localconfig.py")
buildVariables.Add(PathVariable("QTDIR", "Qt4 root directory", "/usr/share/qt4", PathVariable.PathIsDir))
buildVariables.Add(PathVariable("OGRE_HOME", "Ogre1.6 root directory (windows only)", None, PathVariable.PathIsDir))
buildVariables.Add(PathVariable("PTHREADWIN32_HOME", "PthreadWin32 root directory (windows only)", None, PathVariable.PathIsDir))
buildVariables.Add(PathVariable("ODE_HOME", "ODE 0.11 root directory", None, PathVariable.PathIsDir))
buildVariables.Add(BoolVariable("DEBUG", "If true, build in debug configuration", False))
buildVariables.Add(BoolVariable("FORCE_MINGW", "When both MinGW and VC++ are installed, force the use of the MinGW compiler instead of the default (windows only)", False))
buildVariables.Add(BoolVariable("DISABLE_GRAPH", "Disable dependency graph generation", False))
##############################################################################
# Variable value extraction (nasty, should be updated when the API evolves) #
# The reason for having this here is that we have to access variables before #
# we can create the real construction environment (for tools selection) #
##############################################################################
currentVariables = Environment(variables = buildVariables).Dictionary()
####################
# Base environment #
####################
baseTools = ["qt"]
if os.name == "nt":
if currentVariables["FORCE_MINGW"]:
baseTools.append("mingw")
else:
baseTools.append("default")
else:
baseTools.append("default")
baseEnvironment = Environment(tools = baseTools, variables = buildVariables)
# additional variables
baseEnvironment["OSNAME"] = os.name
baseEnvironment["SYSPATH"] = os.environ["PATH"].split(os.pathsep)
if baseEnvironment["CC"] == "cl":
baseEnvironment.AppendUnique(CPPFLAGS = ["/EHsc"])
# debug symbols vs. optimization
if baseEnvironment["DEBUG"]:
if baseEnvironment["CC"] == "cl":
baseEnvironment.AppendUnique(CPPFLAGS = ["/Z7"])
else:
baseEnvironment.AppendUnique(CPPFLAGS = ["-g"])
else:
if baseEnvironment["CC"] == "cl":
baseEnvironment.AppendUnique(CPPFLAGS = ["/Ox"])
else:
baseEnvironment.AppendUnique(CPPFLAGS = ["-O2"])
# Qt tool workaround
baseEnvironment.Replace(LIBS = [])
baseEnvironment.Replace(LIBPATH = [])
baseEnvironment.Replace(CPPPATH = [])
# Qt UI builder
uiBuilder = Builder(action = '$QT_UIC $QT_UICDECLFLAGS -o ${TARGETS[0]} $SOURCE')
baseEnvironment.Append(BUILDERS = {'Ui' : uiBuilder})
# Qt RC builder
rcBuilder = Builder(action = '$QT_BINPATH/rcc $QT_RCCDECLFLAGS -o ${TARGETS[0]} $SOURCE')
baseEnvironment.Append(BUILDERS = {'Rc' : rcBuilder})
# Under windows, add the platform SDK
if os.name == "nt" and baseEnvironment["CC"] == "cl":
import _winreg
key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Microsoft SDKs\\Windows")
winSdkHome = _winreg.QueryValueEx(key, "CurrentInstallFolder")[0]
_winreg.CloseKey(key)
baseEnvironment["WINSDK_HOME"] = winSdkHome
baseEnvironment.AppendUnique(CPPPATH = ["$WINSDK_HOME/Include"])
baseEnvironment.AppendUnique(LIBPATH = ["$WINSDK_HOME/Lib"])
# Do not rely on VC++ runtime library
if os.name == "nt" and baseEnvironment["CC"] == "cl":
baseEnvironment.AppendUnique(CPPFLAGS = ["/MD"])
# Speed up change analysis
baseEnvironment.Decider('MD5-timestamp')
#####################
# Command-line help #
#####################
Help(buildVariables.GenerateHelpText(baseEnvironment))
##################################
# SCons environment declarations #
##################################
walker = build.DependencyWalker()
# external component database
for script in Glob("components.*.py"):
SConscript(script, exports = "walker", variant_dir = "build", duplicate = 0)
walker.makeEnvironments(baseEnvironment)
if not baseEnvironment["DISABLE_GRAPH"]:
walker.makeDependencyGraph("dependencies.png")
|
normal
|
{
"blob_id": "595912753d778a0fa8332f0df00e06a9da5cde93",
"index": 447,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nSetOption('num_jobs', 4)\nSetOption('implicit_cache', 1)\n<mask token>\nbuildVariables.Add(PathVariable('QTDIR', 'Qt4 root directory',\n '/usr/share/qt4', PathVariable.PathIsDir))\nbuildVariables.Add(PathVariable('OGRE_HOME',\n 'Ogre1.6 root directory (windows only)', None, PathVariable.PathIsDir))\nbuildVariables.Add(PathVariable('PTHREADWIN32_HOME',\n 'PthreadWin32 root directory (windows only)', None, PathVariable.PathIsDir)\n )\nbuildVariables.Add(PathVariable('ODE_HOME', 'ODE 0.11 root directory', None,\n PathVariable.PathIsDir))\nbuildVariables.Add(BoolVariable('DEBUG',\n 'If true, build in debug configuration', False))\nbuildVariables.Add(BoolVariable('FORCE_MINGW',\n 'When both MinGW and VC++ are installed, force the use of the MinGW compiler instead of the default (windows only)'\n , False))\nbuildVariables.Add(BoolVariable('DISABLE_GRAPH',\n 'Disable dependency graph generation', False))\n<mask token>\nif os.name == 'nt':\n if currentVariables['FORCE_MINGW']:\n baseTools.append('mingw')\n else:\n baseTools.append('default')\nelse:\n baseTools.append('default')\n<mask token>\nif baseEnvironment['CC'] == 'cl':\n baseEnvironment.AppendUnique(CPPFLAGS=['/EHsc'])\nif baseEnvironment['DEBUG']:\n if baseEnvironment['CC'] == 'cl':\n baseEnvironment.AppendUnique(CPPFLAGS=['/Z7'])\n else:\n baseEnvironment.AppendUnique(CPPFLAGS=['-g'])\nelif baseEnvironment['CC'] == 'cl':\n baseEnvironment.AppendUnique(CPPFLAGS=['/Ox'])\nelse:\n baseEnvironment.AppendUnique(CPPFLAGS=['-O2'])\nbaseEnvironment.Replace(LIBS=[])\nbaseEnvironment.Replace(LIBPATH=[])\nbaseEnvironment.Replace(CPPPATH=[])\n<mask token>\nbaseEnvironment.Append(BUILDERS={'Ui': uiBuilder})\n<mask token>\nbaseEnvironment.Append(BUILDERS={'Rc': rcBuilder})\nif os.name == 'nt' and baseEnvironment['CC'] == 'cl':\n import _winreg\n key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,\n 'Software\\\\Microsoft\\\\Microsoft SDKs\\\\Windows')\n winSdkHome = _winreg.QueryValueEx(key, 'CurrentInstallFolder')[0]\n _winreg.CloseKey(key)\n baseEnvironment['WINSDK_HOME'] = winSdkHome\n baseEnvironment.AppendUnique(CPPPATH=['$WINSDK_HOME/Include'])\n baseEnvironment.AppendUnique(LIBPATH=['$WINSDK_HOME/Lib'])\nif os.name == 'nt' and baseEnvironment['CC'] == 'cl':\n baseEnvironment.AppendUnique(CPPFLAGS=['/MD'])\nbaseEnvironment.Decider('MD5-timestamp')\nHelp(buildVariables.GenerateHelpText(baseEnvironment))\n<mask token>\nfor script in Glob('components.*.py'):\n SConscript(script, exports='walker', variant_dir='build', duplicate=0)\nwalker.makeEnvironments(baseEnvironment)\nif not baseEnvironment['DISABLE_GRAPH']:\n walker.makeDependencyGraph('dependencies.png')\n",
"step-3": "<mask token>\nSetOption('num_jobs', 4)\nSetOption('implicit_cache', 1)\nbuildVariables = Variables('localconfig.py')\nbuildVariables.Add(PathVariable('QTDIR', 'Qt4 root directory',\n '/usr/share/qt4', PathVariable.PathIsDir))\nbuildVariables.Add(PathVariable('OGRE_HOME',\n 'Ogre1.6 root directory (windows only)', None, PathVariable.PathIsDir))\nbuildVariables.Add(PathVariable('PTHREADWIN32_HOME',\n 'PthreadWin32 root directory (windows only)', None, PathVariable.PathIsDir)\n )\nbuildVariables.Add(PathVariable('ODE_HOME', 'ODE 0.11 root directory', None,\n PathVariable.PathIsDir))\nbuildVariables.Add(BoolVariable('DEBUG',\n 'If true, build in debug configuration', False))\nbuildVariables.Add(BoolVariable('FORCE_MINGW',\n 'When both MinGW and VC++ are installed, force the use of the MinGW compiler instead of the default (windows only)'\n , False))\nbuildVariables.Add(BoolVariable('DISABLE_GRAPH',\n 'Disable dependency graph generation', False))\ncurrentVariables = Environment(variables=buildVariables).Dictionary()\nbaseTools = ['qt']\nif os.name == 'nt':\n if currentVariables['FORCE_MINGW']:\n baseTools.append('mingw')\n else:\n baseTools.append('default')\nelse:\n baseTools.append('default')\nbaseEnvironment = Environment(tools=baseTools, variables=buildVariables)\nbaseEnvironment['OSNAME'] = os.name\nbaseEnvironment['SYSPATH'] = os.environ['PATH'].split(os.pathsep)\nif baseEnvironment['CC'] == 'cl':\n baseEnvironment.AppendUnique(CPPFLAGS=['/EHsc'])\nif baseEnvironment['DEBUG']:\n if baseEnvironment['CC'] == 'cl':\n baseEnvironment.AppendUnique(CPPFLAGS=['/Z7'])\n else:\n baseEnvironment.AppendUnique(CPPFLAGS=['-g'])\nelif baseEnvironment['CC'] == 'cl':\n baseEnvironment.AppendUnique(CPPFLAGS=['/Ox'])\nelse:\n baseEnvironment.AppendUnique(CPPFLAGS=['-O2'])\nbaseEnvironment.Replace(LIBS=[])\nbaseEnvironment.Replace(LIBPATH=[])\nbaseEnvironment.Replace(CPPPATH=[])\nuiBuilder = Builder(action='$QT_UIC $QT_UICDECLFLAGS -o ${TARGETS[0]} $SOURCE')\nbaseEnvironment.Append(BUILDERS={'Ui': uiBuilder})\nrcBuilder = Builder(action=\n '$QT_BINPATH/rcc $QT_RCCDECLFLAGS -o ${TARGETS[0]} $SOURCE')\nbaseEnvironment.Append(BUILDERS={'Rc': rcBuilder})\nif os.name == 'nt' and baseEnvironment['CC'] == 'cl':\n import _winreg\n key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,\n 'Software\\\\Microsoft\\\\Microsoft SDKs\\\\Windows')\n winSdkHome = _winreg.QueryValueEx(key, 'CurrentInstallFolder')[0]\n _winreg.CloseKey(key)\n baseEnvironment['WINSDK_HOME'] = winSdkHome\n baseEnvironment.AppendUnique(CPPPATH=['$WINSDK_HOME/Include'])\n baseEnvironment.AppendUnique(LIBPATH=['$WINSDK_HOME/Lib'])\nif os.name == 'nt' and baseEnvironment['CC'] == 'cl':\n baseEnvironment.AppendUnique(CPPFLAGS=['/MD'])\nbaseEnvironment.Decider('MD5-timestamp')\nHelp(buildVariables.GenerateHelpText(baseEnvironment))\nwalker = build.DependencyWalker()\nfor script in Glob('components.*.py'):\n SConscript(script, exports='walker', variant_dir='build', duplicate=0)\nwalker.makeEnvironments(baseEnvironment)\nif not baseEnvironment['DISABLE_GRAPH']:\n walker.makeDependencyGraph('dependencies.png')\n",
"step-4": "import os\nimport build\nSetOption('num_jobs', 4)\nSetOption('implicit_cache', 1)\nbuildVariables = Variables('localconfig.py')\nbuildVariables.Add(PathVariable('QTDIR', 'Qt4 root directory',\n '/usr/share/qt4', PathVariable.PathIsDir))\nbuildVariables.Add(PathVariable('OGRE_HOME',\n 'Ogre1.6 root directory (windows only)', None, PathVariable.PathIsDir))\nbuildVariables.Add(PathVariable('PTHREADWIN32_HOME',\n 'PthreadWin32 root directory (windows only)', None, PathVariable.PathIsDir)\n )\nbuildVariables.Add(PathVariable('ODE_HOME', 'ODE 0.11 root directory', None,\n PathVariable.PathIsDir))\nbuildVariables.Add(BoolVariable('DEBUG',\n 'If true, build in debug configuration', False))\nbuildVariables.Add(BoolVariable('FORCE_MINGW',\n 'When both MinGW and VC++ are installed, force the use of the MinGW compiler instead of the default (windows only)'\n , False))\nbuildVariables.Add(BoolVariable('DISABLE_GRAPH',\n 'Disable dependency graph generation', False))\ncurrentVariables = Environment(variables=buildVariables).Dictionary()\nbaseTools = ['qt']\nif os.name == 'nt':\n if currentVariables['FORCE_MINGW']:\n baseTools.append('mingw')\n else:\n baseTools.append('default')\nelse:\n baseTools.append('default')\nbaseEnvironment = Environment(tools=baseTools, variables=buildVariables)\nbaseEnvironment['OSNAME'] = os.name\nbaseEnvironment['SYSPATH'] = os.environ['PATH'].split(os.pathsep)\nif baseEnvironment['CC'] == 'cl':\n baseEnvironment.AppendUnique(CPPFLAGS=['/EHsc'])\nif baseEnvironment['DEBUG']:\n if baseEnvironment['CC'] == 'cl':\n baseEnvironment.AppendUnique(CPPFLAGS=['/Z7'])\n else:\n baseEnvironment.AppendUnique(CPPFLAGS=['-g'])\nelif baseEnvironment['CC'] == 'cl':\n baseEnvironment.AppendUnique(CPPFLAGS=['/Ox'])\nelse:\n baseEnvironment.AppendUnique(CPPFLAGS=['-O2'])\nbaseEnvironment.Replace(LIBS=[])\nbaseEnvironment.Replace(LIBPATH=[])\nbaseEnvironment.Replace(CPPPATH=[])\nuiBuilder = Builder(action='$QT_UIC $QT_UICDECLFLAGS -o ${TARGETS[0]} $SOURCE')\nbaseEnvironment.Append(BUILDERS={'Ui': uiBuilder})\nrcBuilder = Builder(action=\n '$QT_BINPATH/rcc $QT_RCCDECLFLAGS -o ${TARGETS[0]} $SOURCE')\nbaseEnvironment.Append(BUILDERS={'Rc': rcBuilder})\nif os.name == 'nt' and baseEnvironment['CC'] == 'cl':\n import _winreg\n key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,\n 'Software\\\\Microsoft\\\\Microsoft SDKs\\\\Windows')\n winSdkHome = _winreg.QueryValueEx(key, 'CurrentInstallFolder')[0]\n _winreg.CloseKey(key)\n baseEnvironment['WINSDK_HOME'] = winSdkHome\n baseEnvironment.AppendUnique(CPPPATH=['$WINSDK_HOME/Include'])\n baseEnvironment.AppendUnique(LIBPATH=['$WINSDK_HOME/Lib'])\nif os.name == 'nt' and baseEnvironment['CC'] == 'cl':\n baseEnvironment.AppendUnique(CPPFLAGS=['/MD'])\nbaseEnvironment.Decider('MD5-timestamp')\nHelp(buildVariables.GenerateHelpText(baseEnvironment))\nwalker = build.DependencyWalker()\nfor script in Glob('components.*.py'):\n SConscript(script, exports='walker', variant_dir='build', duplicate=0)\nwalker.makeEnvironments(baseEnvironment)\nif not baseEnvironment['DISABLE_GRAPH']:\n walker.makeDependencyGraph('dependencies.png')\n",
"step-5": "################################################################################\r\n# #\r\n# This file is part of the Potato Engine (PE). #\r\n# #\r\n# Copyright (C) 2007-2010 ElectroMagnetic Potatoes (EMP). #\r\n# See the AUTHORS file for more information. #\r\n# #\r\n# This library is free software; you can redistribute it and/or #\r\n# modify it under the terms of the GNU Lesser General Public #\r\n# License as published by the Free Software Foundation; either #\r\n# version 2.1 of the License, or (at your option) any later version. #\r\n# #\r\n# This library is distributed in the hope that it will be useful, #\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of #\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #\r\n# Lesser General Public License for more details. #\r\n# #\r\n# You should have received a copy of the GNU Lesser General Public License #\r\n# along with this program. If not, see <http://www.gnu.org/licenses/>. #\r\n# #\r\n################################################################################\r\n\r\nimport os\r\nimport build\r\n\r\n################################################################\r\n# Default options (will be overriden by command line switches) #\r\n################################################################\r\n\r\n# Parallel build\r\nSetOption('num_jobs', 4)\r\n\r\n# include cache\r\nSetOption('implicit_cache', 1)\r\n\r\n##########################################################\r\n# Command-line parameters (overriden by localconfig.py) #\r\n##########################################################\r\n\r\nbuildVariables = Variables(\"localconfig.py\")\r\nbuildVariables.Add(PathVariable(\"QTDIR\", \"Qt4 root directory\", \"/usr/share/qt4\", PathVariable.PathIsDir))\r\nbuildVariables.Add(PathVariable(\"OGRE_HOME\", \"Ogre1.6 root directory (windows only)\", None, PathVariable.PathIsDir))\r\nbuildVariables.Add(PathVariable(\"PTHREADWIN32_HOME\", \"PthreadWin32 root directory (windows only)\", None, PathVariable.PathIsDir))\r\nbuildVariables.Add(PathVariable(\"ODE_HOME\", \"ODE 0.11 root directory\", None, PathVariable.PathIsDir))\r\nbuildVariables.Add(BoolVariable(\"DEBUG\", \"If true, build in debug configuration\", False))\r\nbuildVariables.Add(BoolVariable(\"FORCE_MINGW\", \"When both MinGW and VC++ are installed, force the use of the MinGW compiler instead of the default (windows only)\", False))\r\nbuildVariables.Add(BoolVariable(\"DISABLE_GRAPH\", \"Disable dependency graph generation\", False))\r\n\r\n##############################################################################\r\n# Variable value extraction (nasty, should be updated when the API evolves) #\r\n# The reason for having this here is that we have to access variables before #\r\n# we can create the real construction environment (for tools selection) #\r\n##############################################################################\r\n\r\ncurrentVariables = Environment(variables = buildVariables).Dictionary()\r\n\r\n####################\r\n# Base environment #\r\n####################\r\n\r\nbaseTools = [\"qt\"]\r\nif os.name == \"nt\":\r\n\tif currentVariables[\"FORCE_MINGW\"]:\r\n\t\tbaseTools.append(\"mingw\")\r\n\telse:\r\n\t\tbaseTools.append(\"default\")\r\nelse:\r\n\tbaseTools.append(\"default\")\r\n\r\nbaseEnvironment = Environment(tools = baseTools, variables = buildVariables)\r\n\r\n# additional variables\r\nbaseEnvironment[\"OSNAME\"] = os.name\r\nbaseEnvironment[\"SYSPATH\"] = os.environ[\"PATH\"].split(os.pathsep)\r\n\r\nif baseEnvironment[\"CC\"] == \"cl\":\r\n\tbaseEnvironment.AppendUnique(CPPFLAGS = [\"/EHsc\"])\r\n\r\n# debug symbols vs. optimization\r\nif baseEnvironment[\"DEBUG\"]:\r\n\tif baseEnvironment[\"CC\"] == \"cl\":\r\n\t\tbaseEnvironment.AppendUnique(CPPFLAGS = [\"/Z7\"])\r\n\telse:\r\n\t\tbaseEnvironment.AppendUnique(CPPFLAGS = [\"-g\"])\r\nelse:\r\n\tif baseEnvironment[\"CC\"] == \"cl\":\r\n\t\tbaseEnvironment.AppendUnique(CPPFLAGS = [\"/Ox\"])\r\n\telse:\r\n\t\tbaseEnvironment.AppendUnique(CPPFLAGS = [\"-O2\"])\r\n\r\n# Qt tool workaround\r\nbaseEnvironment.Replace(LIBS = [])\r\nbaseEnvironment.Replace(LIBPATH = [])\r\nbaseEnvironment.Replace(CPPPATH = [])\r\n\r\n# Qt UI builder\r\nuiBuilder = Builder(action = '$QT_UIC $QT_UICDECLFLAGS -o ${TARGETS[0]} $SOURCE')\r\nbaseEnvironment.Append(BUILDERS = {'Ui' : uiBuilder})\r\n\r\n# Qt RC builder\r\nrcBuilder = Builder(action = '$QT_BINPATH/rcc $QT_RCCDECLFLAGS -o ${TARGETS[0]} $SOURCE')\r\nbaseEnvironment.Append(BUILDERS = {'Rc' : rcBuilder})\r\n\r\n# Under windows, add the platform SDK\r\nif os.name == \"nt\" and baseEnvironment[\"CC\"] == \"cl\":\r\n\timport _winreg\r\n\tkey = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, \"Software\\\\Microsoft\\\\Microsoft SDKs\\\\Windows\")\r\n\twinSdkHome = _winreg.QueryValueEx(key, \"CurrentInstallFolder\")[0]\r\n\t_winreg.CloseKey(key)\r\n\tbaseEnvironment[\"WINSDK_HOME\"] = winSdkHome\r\n\tbaseEnvironment.AppendUnique(CPPPATH = [\"$WINSDK_HOME/Include\"])\r\n\tbaseEnvironment.AppendUnique(LIBPATH = [\"$WINSDK_HOME/Lib\"])\r\n\r\n# Do not rely on VC++ runtime library\r\nif os.name == \"nt\" and baseEnvironment[\"CC\"] == \"cl\":\r\n\tbaseEnvironment.AppendUnique(CPPFLAGS = [\"/MD\"])\r\n\r\n# Speed up change analysis\r\nbaseEnvironment.Decider('MD5-timestamp')\r\n\r\n#####################\r\n# Command-line help #\r\n#####################\r\n\r\nHelp(buildVariables.GenerateHelpText(baseEnvironment))\r\n\r\n##################################\r\n# SCons environment declarations #\r\n##################################\r\n\r\nwalker = build.DependencyWalker()\r\n\r\n# external component database\r\nfor script in Glob(\"components.*.py\"):\r\n\tSConscript(script, exports = \"walker\", variant_dir = \"build\", duplicate = 0)\r\n\r\nwalker.makeEnvironments(baseEnvironment)\r\nif not baseEnvironment[\"DISABLE_GRAPH\"]:\r\n\twalker.makeDependencyGraph(\"dependencies.png\")\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def send_archs_from_queue_to_main_dataset(context):
background_queue_qs = Design.objects.filter(activecontext_id__exact=
context.eosscontext.activecontext.id)
arch_list = []
for design in background_queue_qs.all():
design.activecontext = None
design.eosscontext = context.eosscontext
design.save()
context.eosscontext.added_archs_count += 1
context.eosscontext.save()
arch_list.append({'id': design.id, 'inputs': json.loads(design.
inputs), 'outputs': json.loads(design.outputs)})
if context.eosscontext.added_archs_count >= 5:
context.eosscontext.added_archs_count = 0
context.eosscontext.save()
activate_diversifier(context.eosscontext)
return arch_list
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def send_archs_back(channel_layer, channel_name, archs):
async_to_sync(channel_layer.send)(channel_name, {'type': 'ga.new_archs',
'archs': archs})
def send_archs_from_queue_to_main_dataset(context):
background_queue_qs = Design.objects.filter(activecontext_id__exact=
context.eosscontext.activecontext.id)
arch_list = []
for design in background_queue_qs.all():
design.activecontext = None
design.eosscontext = context.eosscontext
design.save()
context.eosscontext.added_archs_count += 1
context.eosscontext.save()
arch_list.append({'id': design.id, 'inputs': json.loads(design.
inputs), 'outputs': json.loads(design.outputs)})
if context.eosscontext.added_archs_count >= 5:
context.eosscontext.added_archs_count = 0
context.eosscontext.save()
activate_diversifier(context.eosscontext)
return arch_list
<|reserved_special_token_1|>
import json
from asgiref.sync import async_to_sync
from daphne_API.diversifier import activate_diversifier
from daphne_API.models import Design
def send_archs_back(channel_layer, channel_name, archs):
async_to_sync(channel_layer.send)(channel_name, {'type': 'ga.new_archs',
'archs': archs})
def send_archs_from_queue_to_main_dataset(context):
background_queue_qs = Design.objects.filter(activecontext_id__exact=
context.eosscontext.activecontext.id)
arch_list = []
for design in background_queue_qs.all():
design.activecontext = None
design.eosscontext = context.eosscontext
design.save()
context.eosscontext.added_archs_count += 1
context.eosscontext.save()
arch_list.append({'id': design.id, 'inputs': json.loads(design.
inputs), 'outputs': json.loads(design.outputs)})
if context.eosscontext.added_archs_count >= 5:
context.eosscontext.added_archs_count = 0
context.eosscontext.save()
activate_diversifier(context.eosscontext)
return arch_list
<|reserved_special_token_1|>
import json
from asgiref.sync import async_to_sync
from daphne_API.diversifier import activate_diversifier
from daphne_API.models import Design
def send_archs_back(channel_layer, channel_name, archs):
async_to_sync(channel_layer.send)(channel_name,
{
'type': 'ga.new_archs',
'archs': archs
})
def send_archs_from_queue_to_main_dataset(context):
background_queue_qs = Design.objects.filter(activecontext_id__exact=context.eosscontext.activecontext.id)
arch_list = []
for design in background_queue_qs.all():
design.activecontext = None
design.eosscontext = context.eosscontext
design.save()
context.eosscontext.added_archs_count += 1
context.eosscontext.save()
arch_list.append({
'id': design.id,
'inputs': json.loads(design.inputs),
'outputs': json.loads(design.outputs),
})
if context.eosscontext.added_archs_count >= 5:
context.eosscontext.added_archs_count = 0
context.eosscontext.save()
activate_diversifier(context.eosscontext)
return arch_list
|
flexible
|
{
"blob_id": "564c613491b0d1797b216a0bd425690e9fae12bc",
"index": 7725,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef send_archs_from_queue_to_main_dataset(context):\n background_queue_qs = Design.objects.filter(activecontext_id__exact=\n context.eosscontext.activecontext.id)\n arch_list = []\n for design in background_queue_qs.all():\n design.activecontext = None\n design.eosscontext = context.eosscontext\n design.save()\n context.eosscontext.added_archs_count += 1\n context.eosscontext.save()\n arch_list.append({'id': design.id, 'inputs': json.loads(design.\n inputs), 'outputs': json.loads(design.outputs)})\n if context.eosscontext.added_archs_count >= 5:\n context.eosscontext.added_archs_count = 0\n context.eosscontext.save()\n activate_diversifier(context.eosscontext)\n return arch_list\n",
"step-3": "<mask token>\n\n\ndef send_archs_back(channel_layer, channel_name, archs):\n async_to_sync(channel_layer.send)(channel_name, {'type': 'ga.new_archs',\n 'archs': archs})\n\n\ndef send_archs_from_queue_to_main_dataset(context):\n background_queue_qs = Design.objects.filter(activecontext_id__exact=\n context.eosscontext.activecontext.id)\n arch_list = []\n for design in background_queue_qs.all():\n design.activecontext = None\n design.eosscontext = context.eosscontext\n design.save()\n context.eosscontext.added_archs_count += 1\n context.eosscontext.save()\n arch_list.append({'id': design.id, 'inputs': json.loads(design.\n inputs), 'outputs': json.loads(design.outputs)})\n if context.eosscontext.added_archs_count >= 5:\n context.eosscontext.added_archs_count = 0\n context.eosscontext.save()\n activate_diversifier(context.eosscontext)\n return arch_list\n",
"step-4": "import json\nfrom asgiref.sync import async_to_sync\nfrom daphne_API.diversifier import activate_diversifier\nfrom daphne_API.models import Design\n\n\ndef send_archs_back(channel_layer, channel_name, archs):\n async_to_sync(channel_layer.send)(channel_name, {'type': 'ga.new_archs',\n 'archs': archs})\n\n\ndef send_archs_from_queue_to_main_dataset(context):\n background_queue_qs = Design.objects.filter(activecontext_id__exact=\n context.eosscontext.activecontext.id)\n arch_list = []\n for design in background_queue_qs.all():\n design.activecontext = None\n design.eosscontext = context.eosscontext\n design.save()\n context.eosscontext.added_archs_count += 1\n context.eosscontext.save()\n arch_list.append({'id': design.id, 'inputs': json.loads(design.\n inputs), 'outputs': json.loads(design.outputs)})\n if context.eosscontext.added_archs_count >= 5:\n context.eosscontext.added_archs_count = 0\n context.eosscontext.save()\n activate_diversifier(context.eosscontext)\n return arch_list\n",
"step-5": "import json\n\nfrom asgiref.sync import async_to_sync\n\nfrom daphne_API.diversifier import activate_diversifier\nfrom daphne_API.models import Design\n\n\ndef send_archs_back(channel_layer, channel_name, archs):\n async_to_sync(channel_layer.send)(channel_name,\n {\n 'type': 'ga.new_archs',\n 'archs': archs\n })\n\n\ndef send_archs_from_queue_to_main_dataset(context):\n background_queue_qs = Design.objects.filter(activecontext_id__exact=context.eosscontext.activecontext.id)\n arch_list = []\n for design in background_queue_qs.all():\n design.activecontext = None\n design.eosscontext = context.eosscontext\n design.save()\n context.eosscontext.added_archs_count += 1\n context.eosscontext.save()\n arch_list.append({\n 'id': design.id,\n 'inputs': json.loads(design.inputs),\n 'outputs': json.loads(design.outputs),\n })\n\n if context.eosscontext.added_archs_count >= 5:\n context.eosscontext.added_archs_count = 0\n context.eosscontext.save()\n activate_diversifier(context.eosscontext)\n\n return arch_list",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for episode in range(EPISODE_COUNT):
obs = env.reset()
agent.reset()
steps = 0
if (episode + 1) % 100 == 0:
state = {'model_state': agent.model.state_dict(), 'optim_state':
agent.optim.state_dict()}
torch.save(state, 'models/' + str(episode + 1) + '.pt')
env.save_value_and_policy_map_for_A2C(agent.model, 'images/' + str(
episode + 1) + '.png')
while True:
env.render()
action = agent.action(obs)
obs = env.step(action)
agent.reward(obs.reward)
if obs.done:
agent.train(obs)
break
steps += 1
if steps % STEP_COUNT == 0:
agent.train(obs)
continue
print(str(episode + 1) + ': ' + str(agent.episode_reward))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
feature_size = 8, 8
env = TIenv(frame_rate=0, num_marks=1, feature_size=feature_size)
agent = TIagent(feature_size=feature_size, learning_rate=0.0001)
EPISODE_COUNT = 50000
STEP_COUNT = 40
for episode in range(EPISODE_COUNT):
obs = env.reset()
agent.reset()
steps = 0
if (episode + 1) % 100 == 0:
state = {'model_state': agent.model.state_dict(), 'optim_state':
agent.optim.state_dict()}
torch.save(state, 'models/' + str(episode + 1) + '.pt')
env.save_value_and_policy_map_for_A2C(agent.model, 'images/' + str(
episode + 1) + '.png')
while True:
env.render()
action = agent.action(obs)
obs = env.step(action)
agent.reward(obs.reward)
if obs.done:
agent.train(obs)
break
steps += 1
if steps % STEP_COUNT == 0:
agent.train(obs)
continue
print(str(episode + 1) + ': ' + str(agent.episode_reward))
<|reserved_special_token_1|>
from Common.TreasureIsland import TIenv
from .Agent import TIagent
import torch
feature_size = 8, 8
env = TIenv(frame_rate=0, num_marks=1, feature_size=feature_size)
agent = TIagent(feature_size=feature_size, learning_rate=0.0001)
EPISODE_COUNT = 50000
STEP_COUNT = 40
for episode in range(EPISODE_COUNT):
obs = env.reset()
agent.reset()
steps = 0
if (episode + 1) % 100 == 0:
state = {'model_state': agent.model.state_dict(), 'optim_state':
agent.optim.state_dict()}
torch.save(state, 'models/' + str(episode + 1) + '.pt')
env.save_value_and_policy_map_for_A2C(agent.model, 'images/' + str(
episode + 1) + '.png')
while True:
env.render()
action = agent.action(obs)
obs = env.step(action)
agent.reward(obs.reward)
if obs.done:
agent.train(obs)
break
steps += 1
if steps % STEP_COUNT == 0:
agent.train(obs)
continue
print(str(episode + 1) + ': ' + str(agent.episode_reward))
<|reserved_special_token_1|>
from Common.TreasureIsland import TIenv
from .Agent import TIagent
import torch
feature_size = (8, 8)
env = TIenv(frame_rate=0, num_marks=1, feature_size=feature_size)
agent = TIagent(feature_size=feature_size, learning_rate=0.0001)
EPISODE_COUNT = 50000
STEP_COUNT = 40
for episode in range(EPISODE_COUNT):
obs = env.reset()
agent.reset()
steps = 0
if (episode + 1) % 100 == 0:
state = {
'model_state': agent.model.state_dict(),
'optim_state': agent.optim.state_dict()
}
torch.save(state, "models/" + str(episode+1) + '.pt')
env.save_value_and_policy_map_for_A2C(agent.model, 'images/' + str(episode+1) + '.png')
while True:
env.render()
action = agent.action(obs)
obs = env.step(action)
agent.reward(obs.reward)
if obs.done:
agent.train(obs)
break
steps += 1
if steps % STEP_COUNT == 0:
agent.train(obs)
continue
print(str(episode + 1) + ": " + str(agent.episode_reward))
|
flexible
|
{
"blob_id": "bf133e73f0c842603dbd7cc3a103a2aa95e2236e",
"index": 4359,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor episode in range(EPISODE_COUNT):\n obs = env.reset()\n agent.reset()\n steps = 0\n if (episode + 1) % 100 == 0:\n state = {'model_state': agent.model.state_dict(), 'optim_state':\n agent.optim.state_dict()}\n torch.save(state, 'models/' + str(episode + 1) + '.pt')\n env.save_value_and_policy_map_for_A2C(agent.model, 'images/' + str(\n episode + 1) + '.png')\n while True:\n env.render()\n action = agent.action(obs)\n obs = env.step(action)\n agent.reward(obs.reward)\n if obs.done:\n agent.train(obs)\n break\n steps += 1\n if steps % STEP_COUNT == 0:\n agent.train(obs)\n continue\n print(str(episode + 1) + ': ' + str(agent.episode_reward))\n",
"step-3": "<mask token>\nfeature_size = 8, 8\nenv = TIenv(frame_rate=0, num_marks=1, feature_size=feature_size)\nagent = TIagent(feature_size=feature_size, learning_rate=0.0001)\nEPISODE_COUNT = 50000\nSTEP_COUNT = 40\nfor episode in range(EPISODE_COUNT):\n obs = env.reset()\n agent.reset()\n steps = 0\n if (episode + 1) % 100 == 0:\n state = {'model_state': agent.model.state_dict(), 'optim_state':\n agent.optim.state_dict()}\n torch.save(state, 'models/' + str(episode + 1) + '.pt')\n env.save_value_and_policy_map_for_A2C(agent.model, 'images/' + str(\n episode + 1) + '.png')\n while True:\n env.render()\n action = agent.action(obs)\n obs = env.step(action)\n agent.reward(obs.reward)\n if obs.done:\n agent.train(obs)\n break\n steps += 1\n if steps % STEP_COUNT == 0:\n agent.train(obs)\n continue\n print(str(episode + 1) + ': ' + str(agent.episode_reward))\n",
"step-4": "from Common.TreasureIsland import TIenv\nfrom .Agent import TIagent\nimport torch\nfeature_size = 8, 8\nenv = TIenv(frame_rate=0, num_marks=1, feature_size=feature_size)\nagent = TIagent(feature_size=feature_size, learning_rate=0.0001)\nEPISODE_COUNT = 50000\nSTEP_COUNT = 40\nfor episode in range(EPISODE_COUNT):\n obs = env.reset()\n agent.reset()\n steps = 0\n if (episode + 1) % 100 == 0:\n state = {'model_state': agent.model.state_dict(), 'optim_state':\n agent.optim.state_dict()}\n torch.save(state, 'models/' + str(episode + 1) + '.pt')\n env.save_value_and_policy_map_for_A2C(agent.model, 'images/' + str(\n episode + 1) + '.png')\n while True:\n env.render()\n action = agent.action(obs)\n obs = env.step(action)\n agent.reward(obs.reward)\n if obs.done:\n agent.train(obs)\n break\n steps += 1\n if steps % STEP_COUNT == 0:\n agent.train(obs)\n continue\n print(str(episode + 1) + ': ' + str(agent.episode_reward))\n",
"step-5": "from Common.TreasureIsland import TIenv\nfrom .Agent import TIagent\n\nimport torch\n\nfeature_size = (8, 8)\n\nenv = TIenv(frame_rate=0, num_marks=1, feature_size=feature_size)\nagent = TIagent(feature_size=feature_size, learning_rate=0.0001)\n\nEPISODE_COUNT = 50000\nSTEP_COUNT = 40\n\n\nfor episode in range(EPISODE_COUNT):\n\n obs = env.reset()\n agent.reset()\n\n steps = 0\n\n if (episode + 1) % 100 == 0:\n state = {\n 'model_state': agent.model.state_dict(),\n 'optim_state': agent.optim.state_dict()\n }\n\n torch.save(state, \"models/\" + str(episode+1) + '.pt')\n env.save_value_and_policy_map_for_A2C(agent.model, 'images/' + str(episode+1) + '.png')\n\n while True:\n env.render()\n\n action = agent.action(obs)\n obs = env.step(action)\n agent.reward(obs.reward)\n\n if obs.done:\n agent.train(obs)\n break\n\n steps += 1\n if steps % STEP_COUNT == 0:\n agent.train(obs)\n continue\n\n print(str(episode + 1) + \": \" + str(agent.episode_reward))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for n in range(len(user_input)):
explosion_strength = 0
if user_input[n] == '>':
explosion_strength += int(user_input[n + 1])
if user_input[n + explosion_strength] != '>':
exploded_str = user_input[:n] + user_input[n +
explosion_strength + 1:]
user_input = exploded_str
else:
explosion_strength += user_input[n + explosion_strength + 1]
<|reserved_special_token_1|>
user_input = input()
exploded_str = user_input
for n in range(len(user_input)):
explosion_strength = 0
if user_input[n] == '>':
explosion_strength += int(user_input[n + 1])
if user_input[n + explosion_strength] != '>':
exploded_str = user_input[:n] + user_input[n +
explosion_strength + 1:]
user_input = exploded_str
else:
explosion_strength += user_input[n + explosion_strength + 1]
<|reserved_special_token_1|>
user_input = input() #abv>1>1>2>2asdasd
exploded_str = user_input
for n in range(len(user_input)):
explosion_strength = 0
if user_input[n] == ">":
explosion_strength += int(user_input[n+1])
if user_input[n+explosion_strength] != ">":
exploded_str = user_input[:n] + user_input[n+explosion_strength+1:]
user_input = exploded_str
else:
explosion_strength += user_input[n+explosion_strength+1]
|
flexible
|
{
"blob_id": "7930bb813bd546747c7c65b661900939f5ba93f1",
"index": 273,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor n in range(len(user_input)):\n explosion_strength = 0\n if user_input[n] == '>':\n explosion_strength += int(user_input[n + 1])\n if user_input[n + explosion_strength] != '>':\n exploded_str = user_input[:n] + user_input[n +\n explosion_strength + 1:]\n user_input = exploded_str\n else:\n explosion_strength += user_input[n + explosion_strength + 1]\n",
"step-3": "user_input = input()\nexploded_str = user_input\nfor n in range(len(user_input)):\n explosion_strength = 0\n if user_input[n] == '>':\n explosion_strength += int(user_input[n + 1])\n if user_input[n + explosion_strength] != '>':\n exploded_str = user_input[:n] + user_input[n +\n explosion_strength + 1:]\n user_input = exploded_str\n else:\n explosion_strength += user_input[n + explosion_strength + 1]\n",
"step-4": "user_input = input() #abv>1>1>2>2asdasd\nexploded_str = user_input\n\nfor n in range(len(user_input)):\n explosion_strength = 0\n if user_input[n] == \">\":\n explosion_strength += int(user_input[n+1])\n if user_input[n+explosion_strength] != \">\":\n exploded_str = user_input[:n] + user_input[n+explosion_strength+1:]\n user_input = exploded_str\n else:\n explosion_strength += user_input[n+explosion_strength+1]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""asks the user for english words to latinize"""
def latinize_word(word):
"""performs bee latin on a word"""
if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':
word = word[1:] + word[0] + 'uzz'
else:
word += 'buzz'
return word.lower()
def latinize_sentence(sentence):
"""performs bee latin on a sentence"""
words = sentence.split()
latanized_words = [latinize_word(word) for word in words]
return " ".join(latanized_words)
def main():
"""main function"""
english_sentence = input('Enter English sentence: ')
while english_sentence != 'q':
print(f'Bee latin = {latinize_sentence(english_sentence)}')
english_sentence = input('Enter English sentence: ')
print(latinize_word('goodbye'))
main()
|
normal
|
{
"blob_id": "5810739300067e8f207d09bf971484a278372a9a",
"index": 5246,
"step-1": "<mask token>\n\n\ndef latinize_word(word):\n \"\"\"performs bee latin on a word\"\"\"\n if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':\n word = word[1:] + word[0] + 'uzz'\n else:\n word += 'buzz'\n return word.lower()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef latinize_word(word):\n \"\"\"performs bee latin on a word\"\"\"\n if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':\n word = word[1:] + word[0] + 'uzz'\n else:\n word += 'buzz'\n return word.lower()\n\n\n<mask token>\n\n\ndef main():\n \"\"\"main function\"\"\"\n english_sentence = input('Enter English sentence: ')\n while english_sentence != 'q':\n print(f'Bee latin = {latinize_sentence(english_sentence)}')\n english_sentence = input('Enter English sentence: ')\n print(latinize_word('goodbye'))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef latinize_word(word):\n \"\"\"performs bee latin on a word\"\"\"\n if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':\n word = word[1:] + word[0] + 'uzz'\n else:\n word += 'buzz'\n return word.lower()\n\n\ndef latinize_sentence(sentence):\n \"\"\"performs bee latin on a sentence\"\"\"\n words = sentence.split()\n latanized_words = [latinize_word(word) for word in words]\n return ' '.join(latanized_words)\n\n\ndef main():\n \"\"\"main function\"\"\"\n english_sentence = input('Enter English sentence: ')\n while english_sentence != 'q':\n print(f'Bee latin = {latinize_sentence(english_sentence)}')\n english_sentence = input('Enter English sentence: ')\n print(latinize_word('goodbye'))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef latinize_word(word):\n \"\"\"performs bee latin on a word\"\"\"\n if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':\n word = word[1:] + word[0] + 'uzz'\n else:\n word += 'buzz'\n return word.lower()\n\n\ndef latinize_sentence(sentence):\n \"\"\"performs bee latin on a sentence\"\"\"\n words = sentence.split()\n latanized_words = [latinize_word(word) for word in words]\n return ' '.join(latanized_words)\n\n\ndef main():\n \"\"\"main function\"\"\"\n english_sentence = input('Enter English sentence: ')\n while english_sentence != 'q':\n print(f'Bee latin = {latinize_sentence(english_sentence)}')\n english_sentence = input('Enter English sentence: ')\n print(latinize_word('goodbye'))\n\n\nmain()\n",
"step-5": "\"\"\"asks the user for english words to latinize\"\"\"\n\n\ndef latinize_word(word):\n \"\"\"performs bee latin on a word\"\"\"\n if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':\n word = word[1:] + word[0] + 'uzz'\n else:\n word += 'buzz'\n return word.lower()\n\n\ndef latinize_sentence(sentence):\n \"\"\"performs bee latin on a sentence\"\"\"\n words = sentence.split()\n latanized_words = [latinize_word(word) for word in words]\n return \" \".join(latanized_words)\n\n\ndef main():\n \"\"\"main function\"\"\"\n english_sentence = input('Enter English sentence: ')\n while english_sentence != 'q':\n print(f'Bee latin = {latinize_sentence(english_sentence)}')\n english_sentence = input('Enter English sentence: ')\n print(latinize_word('goodbye'))\n\n\nmain()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Your name is ' + name + ', you are ' + age +
' years old, and your username is ' + reddit)
<|reserved_special_token_1|>
name = input('What is your name? ')
age = input('How old are you? ')
reddit = input('What is your reddit username? ')
print('Your name is ' + name + ', you are ' + age +
' years old, and your username is ' + reddit)
<|reserved_special_token_1|>
# Create a program that will ask the users name, age, and reddit username.
# Have it tell them the information back, in the format:
#
# Your name is (blank), you are (blank) years old, and your username is (blank)
#
# For extra credit, have the program log this information in a file to be accessed later.
#
name = input("What is your name? ")
age = input("How old are you? ")
reddit = input("What is your reddit username? ")
print("Your name is " + name + ", you are " + age + " years old, and your username is " + reddit)
|
flexible
|
{
"blob_id": "00531c5a7fdcd24204b0546c081bbe7d63d0a6b2",
"index": 1520,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Your name is ' + name + ', you are ' + age +\n ' years old, and your username is ' + reddit)\n",
"step-3": "name = input('What is your name? ')\nage = input('How old are you? ')\nreddit = input('What is your reddit username? ')\nprint('Your name is ' + name + ', you are ' + age +\n ' years old, and your username is ' + reddit)\n",
"step-4": "# Create a program that will ask the users name, age, and reddit username. \n# Have it tell them the information back, in the format:\n# \n# Your name is (blank), you are (blank) years old, and your username is (blank)\n# \n# For extra credit, have the program log this information in a file to be accessed later.\n# \n\nname = input(\"What is your name? \")\nage = input(\"How old are you? \")\nreddit = input(\"What is your reddit username? \")\n\nprint(\"Your name is \" + name + \", you are \" + age + \" years old, and your username is \" + reddit)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import socket
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(20,GPIO.OUT,initial=GPIO.LOW) #green
GPIO.setup(21,GPIO.OUT,initial=GPIO.LOW) #red
GPIO.setwarnings(False)
host = '192.168.87.191'
port = 5560
def setupServer():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Socket created.")
try:
s.bind((host, port))
except socket.error as msg:
print(msg)
print("Socket bind comlete.")
return s
def setupConnection():
s.listen(1) # Allows one connection at a time.
conn, address = s.accept()
print("Connected to: " + address[0] + ":" + str(address[1]))
return conn
def RED(t):
#Red LED
GPIO.output(21,1)
time.sleep(1)
GPIO.output(21,0)
def GREEN(t):
#GREEN LED
GPIO.outdefput(20,1)
time.sleep(t)
GPIO.output(20,0)
def dataTransfer(conn):
# A big loop that receives data until told not to.
while True:
# Receive the data
data = conn.recv(1024) # receive the data
data = data.decode('utf-8')
# Split the data such that you separate the command
# from the rest of the data.
dataMessage = data.split(' ', 1)
# Command
command = dataMessage[0]
# parameter
para=dataMessage[1]
y=int(para)
if len(command)>0:
print(command)
if command == 'RED':
RED(y)
elif command == 'GREEN':
GREEN(y)
elif command == 'KILL':
print("Our server is shutting down.")
s.close()
break
else:
print('Unknown Command')
#conn.close()
s = setupServer()
#while True:
# try:
# conn = setupConnection()
# dataTransfer(conn)
# except:
# break
def main():
try:
while True:
try:
conn = setupConnection()
dataTransfer(conn)
except:
break
except KeyboardInterrupt:
print("program terminated")
finally:
GPIO.cleanup()
conn.close()
#Runs Main Function
if __name__=="__main__":
main()
|
normal
|
{
"blob_id": "78efe97d838774cb831ef205186db29f392e1953",
"index": 1584,
"step-1": "<mask token>\n\n\ndef RED(t):\n GPIO.output(21, 1)\n time.sleep(1)\n GPIO.output(21, 0)\n\n\n<mask token>\n\n\ndef dataTransfer(conn):\n while True:\n data = conn.recv(1024)\n data = data.decode('utf-8')\n dataMessage = data.split(' ', 1)\n command = dataMessage[0]\n para = dataMessage[1]\n y = int(para)\n if len(command) > 0:\n print(command)\n if command == 'RED':\n RED(y)\n elif command == 'GREEN':\n GREEN(y)\n elif command == 'KILL':\n print('Our server is shutting down.')\n s.close()\n break\n else:\n print('Unknown Command')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef setupServer():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print('Socket created.')\n try:\n s.bind((host, port))\n except socket.error as msg:\n print(msg)\n print('Socket bind comlete.')\n return s\n\n\ndef setupConnection():\n s.listen(1)\n conn, address = s.accept()\n print('Connected to: ' + address[0] + ':' + str(address[1]))\n return conn\n\n\ndef RED(t):\n GPIO.output(21, 1)\n time.sleep(1)\n GPIO.output(21, 0)\n\n\n<mask token>\n\n\ndef dataTransfer(conn):\n while True:\n data = conn.recv(1024)\n data = data.decode('utf-8')\n dataMessage = data.split(' ', 1)\n command = dataMessage[0]\n para = dataMessage[1]\n y = int(para)\n if len(command) > 0:\n print(command)\n if command == 'RED':\n RED(y)\n elif command == 'GREEN':\n GREEN(y)\n elif command == 'KILL':\n print('Our server is shutting down.')\n s.close()\n break\n else:\n print('Unknown Command')\n\n\n<mask token>\n",
"step-3": "<mask token>\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(20, GPIO.OUT, initial=GPIO.LOW)\nGPIO.setup(21, GPIO.OUT, initial=GPIO.LOW)\nGPIO.setwarnings(False)\n<mask token>\n\n\ndef setupServer():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print('Socket created.')\n try:\n s.bind((host, port))\n except socket.error as msg:\n print(msg)\n print('Socket bind comlete.')\n return s\n\n\ndef setupConnection():\n s.listen(1)\n conn, address = s.accept()\n print('Connected to: ' + address[0] + ':' + str(address[1]))\n return conn\n\n\ndef RED(t):\n GPIO.output(21, 1)\n time.sleep(1)\n GPIO.output(21, 0)\n\n\ndef GREEN(t):\n GPIO.outdefput(20, 1)\n time.sleep(t)\n GPIO.output(20, 0)\n\n\ndef dataTransfer(conn):\n while True:\n data = conn.recv(1024)\n data = data.decode('utf-8')\n dataMessage = data.split(' ', 1)\n command = dataMessage[0]\n para = dataMessage[1]\n y = int(para)\n if len(command) > 0:\n print(command)\n if command == 'RED':\n RED(y)\n elif command == 'GREEN':\n GREEN(y)\n elif command == 'KILL':\n print('Our server is shutting down.')\n s.close()\n break\n else:\n print('Unknown Command')\n\n\n<mask token>\n\n\ndef main():\n try:\n while True:\n try:\n conn = setupConnection()\n dataTransfer(conn)\n except:\n break\n except KeyboardInterrupt:\n print('program terminated')\n finally:\n GPIO.cleanup()\n conn.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import socket\nimport RPi.GPIO as GPIO\nimport time\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(20, GPIO.OUT, initial=GPIO.LOW)\nGPIO.setup(21, GPIO.OUT, initial=GPIO.LOW)\nGPIO.setwarnings(False)\nhost = '192.168.87.191'\nport = 5560\n\n\ndef setupServer():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print('Socket created.')\n try:\n s.bind((host, port))\n except socket.error as msg:\n print(msg)\n print('Socket bind comlete.')\n return s\n\n\ndef setupConnection():\n s.listen(1)\n conn, address = s.accept()\n print('Connected to: ' + address[0] + ':' + str(address[1]))\n return conn\n\n\ndef RED(t):\n GPIO.output(21, 1)\n time.sleep(1)\n GPIO.output(21, 0)\n\n\ndef GREEN(t):\n GPIO.outdefput(20, 1)\n time.sleep(t)\n GPIO.output(20, 0)\n\n\ndef dataTransfer(conn):\n while True:\n data = conn.recv(1024)\n data = data.decode('utf-8')\n dataMessage = data.split(' ', 1)\n command = dataMessage[0]\n para = dataMessage[1]\n y = int(para)\n if len(command) > 0:\n print(command)\n if command == 'RED':\n RED(y)\n elif command == 'GREEN':\n GREEN(y)\n elif command == 'KILL':\n print('Our server is shutting down.')\n s.close()\n break\n else:\n print('Unknown Command')\n\n\ns = setupServer()\n\n\ndef main():\n try:\n while True:\n try:\n conn = setupConnection()\n dataTransfer(conn)\n except:\n break\n except KeyboardInterrupt:\n print('program terminated')\n finally:\n GPIO.cleanup()\n conn.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import socket\nimport RPi.GPIO as GPIO\nimport time\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(20,GPIO.OUT,initial=GPIO.LOW) #green\nGPIO.setup(21,GPIO.OUT,initial=GPIO.LOW) #red\nGPIO.setwarnings(False)\n\nhost = '192.168.87.191'\nport = 5560\n\ndef setupServer():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print(\"Socket created.\")\n try:\n s.bind((host, port))\n except socket.error as msg:\n print(msg)\n print(\"Socket bind comlete.\")\n return s\n\ndef setupConnection():\n s.listen(1) # Allows one connection at a time.\n conn, address = s.accept()\n print(\"Connected to: \" + address[0] + \":\" + str(address[1]))\n return conn\n\ndef RED(t):\n #Red LED\n GPIO.output(21,1)\n time.sleep(1)\n GPIO.output(21,0)\n\ndef GREEN(t):\n #GREEN LED\n GPIO.outdefput(20,1)\n time.sleep(t)\n GPIO.output(20,0)\n\ndef dataTransfer(conn):\n # A big loop that receives data until told not to.\n\n while True:\n # Receive the data\n data = conn.recv(1024) # receive the data\n data = data.decode('utf-8')\n\n # Split the data such that you separate the command\n # from the rest of the data.\n dataMessage = data.split(' ', 1)\n # Command\n command = dataMessage[0]\n # parameter\n para=dataMessage[1]\n y=int(para)\n if len(command)>0:\n print(command)\n if command == 'RED':\n RED(y)\n elif command == 'GREEN':\n GREEN(y)\n elif command == 'KILL':\n print(\"Our server is shutting down.\")\n s.close()\n break\n else:\n print('Unknown Command')\n #conn.close()\ns = setupServer()\n#while True:\n# try:\n# conn = setupConnection()\n# dataTransfer(conn)\n# except:\n# break\ndef main():\n try:\n while True:\n try:\n conn = setupConnection()\n dataTransfer(conn)\n except:\n break\n except KeyboardInterrupt:\n print(\"program terminated\")\n finally:\n GPIO.cleanup()\n conn.close()\n#Runs Main Function\nif __name__==\"__main__\":\n main()\n\n",
"step-ids": [
2,
4,
7,
9,
10
]
}
|
[
2,
4,
7,
9,
10
] |
def check22(y, x, board):
dirs = [[0, 1], [1, 0], [1, 1]]
ret = [(y, x)]
for d in dirs:
dy, dx = y + d[0], x + d[1]
if not (0 <= dy < len(board) and 0 <= dx < len(board[0]) and board[
dy][dx] != '0' and board[y][x] == board[dy][dx]):
return False
else:
ret.append((dy, dx))
return ret
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def check22(y, x, board):
dirs = [[0, 1], [1, 0], [1, 1]]
ret = [(y, x)]
for d in dirs:
dy, dx = y + d[0], x + d[1]
if not (0 <= dy < len(board) and 0 <= dx < len(board[0]) and board[
dy][dx] != '0' and board[y][x] == board[dy][dx]):
return False
else:
ret.append((dy, dx))
return ret
<|reserved_special_token_0|>
def deleteBoard(delete, board):
for delNode in delete:
board[delNode[0]][delNode[1]] = '0'
return board
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def check22(y, x, board):
dirs = [[0, 1], [1, 0], [1, 1]]
ret = [(y, x)]
for d in dirs:
dy, dx = y + d[0], x + d[1]
if not (0 <= dy < len(board) and 0 <= dx < len(board[0]) and board[
dy][dx] != '0' and board[y][x] == board[dy][dx]):
return False
else:
ret.append((dy, dx))
return ret
<|reserved_special_token_0|>
def deleteBoard(delete, board):
for delNode in delete:
board[delNode[0]][delNode[1]] = '0'
return board
def solution(m, n, board):
answer = 0
for i in range(len(board)):
board[i] = list(board[i])
while True:
delete = set([])
for y in range(len(board)):
for x in range(len(board[0])):
tmp = check22(y, x, board)
if tmp:
delete |= set(tmp)
delete = list(delete)
if not delete:
break
answer += len(delete)
board = deleteBoard(delete, board)
board = dropdown(board)
return answer
<|reserved_special_token_1|>
def check22(y, x, board):
dirs = [[0, 1], [1, 0], [1, 1]]
ret = [(y, x)]
for d in dirs:
dy, dx = y + d[0], x + d[1]
if not (0 <= dy < len(board) and 0 <= dx < len(board[0]) and board[
dy][dx] != '0' and board[y][x] == board[dy][dx]):
return False
else:
ret.append((dy, dx))
return ret
def dropdown(board):
for x in range(len(board[0])):
cnt = 0
movable = False
for y in range(len(board) - 1, -1, -1):
if board[y][x] == '0':
cnt += 1
movable = True
if board[y][x] != '0' and movable:
board[y + cnt][x] = board[y][x]
board[y][x] = '0'
return board
def deleteBoard(delete, board):
for delNode in delete:
board[delNode[0]][delNode[1]] = '0'
return board
def solution(m, n, board):
answer = 0
for i in range(len(board)):
board[i] = list(board[i])
while True:
delete = set([])
for y in range(len(board)):
for x in range(len(board[0])):
tmp = check22(y, x, board)
if tmp:
delete |= set(tmp)
delete = list(delete)
if not delete:
break
answer += len(delete)
board = deleteBoard(delete, board)
board = dropdown(board)
return answer
<|reserved_special_token_1|>
# 체크는 오른쪽+아래로만 체크합니다.
def check22(y, x, board) :
dirs = [[0,1], [1,0], [1,1]]
ret = [(y,x)]
for d in dirs :
dy, dx = y+d[0], x+d[1]
if not ( (0<=dy<len(board)) and (0<=dx<len(board[0])) and board[dy][dx]!='0' and board[y][x]==board[dy][dx] ) :
return False
else :
ret.append((dy,dx))
return ret # 나중에 한 번에 삭제될 거임
def dropdown(board) :
for x in range(len(board[0])) :
cnt = 0
movable = False
for y in range(len(board)-1, -1, -1) :
# if y == len(board)-1 :
# if board[y][x] == '0' : break
if board[y][x] == '0' :
cnt += 1
movable = True
if board[y][x] != '0' and movable :
# 위에 떠있는 블록임. cnt만큼 내리면 됨
board[y+cnt][x] = board[y][x]
board[y][x] = '0'
return board
def deleteBoard(delete, board) :
for delNode in delete :
board[delNode[0]][delNode[1]] = '0'
return board
def solution(m, n, board):
answer = 0
for i in range(len(board)) :
board[i] = list(board[i])
while True :
delete = set([])
for y in range(len(board)) :
for x in range(len(board[0])) :
tmp = check22(y, x, board)
if tmp :
delete |= set(tmp)
delete = list(delete)
if not delete : break
answer += len(delete)
board = deleteBoard(delete, board)
# print(board)
board = dropdown(board)
# print(board)
return answer
|
flexible
|
{
"blob_id": "938c4325480608b904bfbe0b11c081166aad694b",
"index": 7291,
"step-1": "def check22(y, x, board):\n dirs = [[0, 1], [1, 0], [1, 1]]\n ret = [(y, x)]\n for d in dirs:\n dy, dx = y + d[0], x + d[1]\n if not (0 <= dy < len(board) and 0 <= dx < len(board[0]) and board[\n dy][dx] != '0' and board[y][x] == board[dy][dx]):\n return False\n else:\n ret.append((dy, dx))\n return ret\n\n\n<mask token>\n",
"step-2": "def check22(y, x, board):\n dirs = [[0, 1], [1, 0], [1, 1]]\n ret = [(y, x)]\n for d in dirs:\n dy, dx = y + d[0], x + d[1]\n if not (0 <= dy < len(board) and 0 <= dx < len(board[0]) and board[\n dy][dx] != '0' and board[y][x] == board[dy][dx]):\n return False\n else:\n ret.append((dy, dx))\n return ret\n\n\n<mask token>\n\n\ndef deleteBoard(delete, board):\n for delNode in delete:\n board[delNode[0]][delNode[1]] = '0'\n return board\n\n\n<mask token>\n",
"step-3": "def check22(y, x, board):\n dirs = [[0, 1], [1, 0], [1, 1]]\n ret = [(y, x)]\n for d in dirs:\n dy, dx = y + d[0], x + d[1]\n if not (0 <= dy < len(board) and 0 <= dx < len(board[0]) and board[\n dy][dx] != '0' and board[y][x] == board[dy][dx]):\n return False\n else:\n ret.append((dy, dx))\n return ret\n\n\n<mask token>\n\n\ndef deleteBoard(delete, board):\n for delNode in delete:\n board[delNode[0]][delNode[1]] = '0'\n return board\n\n\ndef solution(m, n, board):\n answer = 0\n for i in range(len(board)):\n board[i] = list(board[i])\n while True:\n delete = set([])\n for y in range(len(board)):\n for x in range(len(board[0])):\n tmp = check22(y, x, board)\n if tmp:\n delete |= set(tmp)\n delete = list(delete)\n if not delete:\n break\n answer += len(delete)\n board = deleteBoard(delete, board)\n board = dropdown(board)\n return answer\n",
"step-4": "def check22(y, x, board):\n dirs = [[0, 1], [1, 0], [1, 1]]\n ret = [(y, x)]\n for d in dirs:\n dy, dx = y + d[0], x + d[1]\n if not (0 <= dy < len(board) and 0 <= dx < len(board[0]) and board[\n dy][dx] != '0' and board[y][x] == board[dy][dx]):\n return False\n else:\n ret.append((dy, dx))\n return ret\n\n\ndef dropdown(board):\n for x in range(len(board[0])):\n cnt = 0\n movable = False\n for y in range(len(board) - 1, -1, -1):\n if board[y][x] == '0':\n cnt += 1\n movable = True\n if board[y][x] != '0' and movable:\n board[y + cnt][x] = board[y][x]\n board[y][x] = '0'\n return board\n\n\ndef deleteBoard(delete, board):\n for delNode in delete:\n board[delNode[0]][delNode[1]] = '0'\n return board\n\n\ndef solution(m, n, board):\n answer = 0\n for i in range(len(board)):\n board[i] = list(board[i])\n while True:\n delete = set([])\n for y in range(len(board)):\n for x in range(len(board[0])):\n tmp = check22(y, x, board)\n if tmp:\n delete |= set(tmp)\n delete = list(delete)\n if not delete:\n break\n answer += len(delete)\n board = deleteBoard(delete, board)\n board = dropdown(board)\n return answer\n",
"step-5": "# 체크는 오른쪽+아래로만 체크합니다.\ndef check22(y, x, board) : \n \n dirs = [[0,1], [1,0], [1,1]]\n \n ret = [(y,x)]\n for d in dirs :\n dy, dx = y+d[0], x+d[1]\n if not ( (0<=dy<len(board)) and (0<=dx<len(board[0])) and board[dy][dx]!='0' and board[y][x]==board[dy][dx] ) :\n return False\n else :\n ret.append((dy,dx))\n\n return ret # 나중에 한 번에 삭제될 거임\n\ndef dropdown(board) :\n \n for x in range(len(board[0])) :\n cnt = 0\n movable = False\n for y in range(len(board)-1, -1, -1) :\n # if y == len(board)-1 :\n # if board[y][x] == '0' : break\n if board[y][x] == '0' :\n cnt += 1\n movable = True\n if board[y][x] != '0' and movable :\n # 위에 떠있는 블록임. cnt만큼 내리면 됨\n board[y+cnt][x] = board[y][x]\n board[y][x] = '0'\n \n return board\n \ndef deleteBoard(delete, board) :\n \n for delNode in delete :\n board[delNode[0]][delNode[1]] = '0'\n \n return board\n\ndef solution(m, n, board):\n answer = 0\n \n for i in range(len(board)) :\n board[i] = list(board[i])\n \n \n while True :\n \n delete = set([])\n \n for y in range(len(board)) :\n for x in range(len(board[0])) :\n tmp = check22(y, x, board)\n if tmp :\n delete |= set(tmp)\n \n delete = list(delete)\n if not delete : break\n \n answer += len(delete)\n \n board = deleteBoard(delete, board)\n # print(board)\n board = dropdown(board)\n # print(board)\n \n return answer\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/8/15 下午5:04
# @Author : Zessay
from .ffm import *
from .fm import *
from .utils import *
from .base_model import *
from .base_trainer import *
from .logger import *
from .metric import *
from .input_fn import *
|
normal
|
{
"blob_id": "bbdb07a81d785bdf067707c4e56622a2ada76b7b",
"index": 1692,
"step-1": "<mask token>\n",
"step-2": "from .ffm import *\nfrom .fm import *\nfrom .utils import *\nfrom .base_model import *\nfrom .base_trainer import *\nfrom .logger import *\nfrom .metric import *\nfrom .input_fn import *\n",
"step-3": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/8/15 下午5:04\n# @Author : Zessay\n\nfrom .ffm import *\nfrom .fm import *\nfrom .utils import *\nfrom .base_model import *\nfrom .base_trainer import *\nfrom .logger import * \nfrom .metric import *\nfrom .input_fn import *",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def evaluate_comparison_range(node):
""" evaluate the numerical range of Comparison node, if any
else returns None """
return None
def is_comparison(node):
""" test if node is a Comparison node or not """
return isinstance(node, Comparison)
<|reserved_special_token_0|>
def evaluate_range(optree, update_interval=False, memoization_map=None):
""" evaluate the range of an Operation node
Args:
optree (ML_Operation): input Node
Return:
sollya Interval: evaluated range of optree or None if no range
could be determined
"""
if memoization_map is None:
memoization_map = {}
init_interval = optree.get_interval()
if not init_interval is None:
return init_interval
else:
if optree in memoization_map:
return memoization_map[optree]
elif isinstance(optree, ML_LeafNode):
op_range = optree.get_interval()
elif is_comparison(optree):
op_range = evaluate_comparison_range(optree)
if update_interval:
optree.set_interval(op_range)
elif isinstance(optree, PlaceHolder):
op_range = evaluate_range(optree.get_input(0), update_interval=
update_interval, memoization_map=memoization_map)
if update_interval:
optree.set_interval(op_range)
else:
args_interval = tuple(evaluate_range(op, update_interval=
update_interval, memoization_map=memoization_map) for op in
optree.get_inputs())
args_interval_map = {op: op_interval for op, op_interval in zip
(optree.inputs, args_interval)}
ops_interval_get = lambda op: args_interval_map[op]
op_range = optree.range_function(optree.inputs,
ops_interval_getter=ops_interval_get)
if update_interval:
optree.set_interval(op_range)
Log.report(LOG_VERBOSE_EVALUATE_RANGE, 'range of {} is {}', optree,
op_range)
memoization_map[optree] = op_range
return op_range
def forward_attributes(src, dst):
""" forward compatible attributes from src node to dst node
:param src: source source for attributes values
:type src: ML_Operation
:param dst: destination node for attributes copies
:type dst: ML_Operation
"""
dst.set_tag(src.get_tag())
dst.set_debug(src.get_debug())
dst.set_handle(src.get_handle())
if hasattr(src.attributes, 'init_stage'):
forward_stage_attributes(src, dst)
if isinstance(src, BooleanOperation) and isinstance(dst, BooleanOperation):
dst.likely = src.likely
def forward_stage_attributes(src, dst):
""" copy node's stage attributes from src node to dst node """
dst.attributes.init_stage = src.attributes.init_stage
def depth_node_ordering(start_node, end_nodes):
""" order the node between root start_node end end_nodes
by depth (root first, starting with start_node)
:param start_node: root of the sort (first node)
:type start_node: ML_Operation
:param end_nodes: nodes where the depth sort must end
:type end_nodes: iterator over ML_Operation
:return: depth ordered list of nodes
:rtype: list(ML_Operation)
"""
ordered_list = []
ordered_set = set()
working_list = [start_node]
while working_list != []:
node = working_list.pop(0)
if not node in ordered_set:
ordered_set.add(node)
ordered_list.append(node)
if not is_leaf_node(node) and not node in end_nodes:
for node_op in node.get_inputs():
working_list.append(node_op)
return ordered_list
def logical_reduce(op_list, op_ctor=LogicalOr, precision=ML_Bool, **kw):
""" Logical/Boolean operand list reduction """
local_list = [node for node in op_list]
while len(local_list) > 1:
op0 = local_list.pop(0)
op1 = local_list.pop(0)
local_list.append(op_ctor(op0, op1, precision=precision))
result = local_list[0]
result.set_attributes(**kw)
return result
<|reserved_special_token_0|>
def uniform_list_check(value_list):
""" Check that value_list is made of only a single value replicated in
each element """
return reduce(lambda acc, value: acc and value == value_list[0],
value_list, True)
<|reserved_special_token_0|>
def uniform_shift_check(optree):
""" check whether optree is a bit shift by a uniform vector constant """
if isinstance(optree, (BitLogicLeftShift, BitLogicRightShift,
BitArithmeticRightShift)):
return uniform_vector_constant_check(optree.get_input(1)
) or not optree.get_input(1).get_precision().is_vector_format()
return False
def is_false(node):
""" check if node is a Constant node whose value is equal to boolean False """
return is_scalar_cst(node, False) or is_vector_uniform_cst(node, False)
def is_true(node):
""" check if node is a Constant node whose value is equal to boolean True """
return is_scalar_cst(node, True) or is_vector_uniform_cst(node, True)
def is_scalar_cst(node, value):
""" check if node is a constant node with value equals to value """
return isinstance(node, Constant) and not node.get_precision(
).is_vector_format() and node.get_value() == value
def is_vector_uniform_cst(node, scalar_value):
""" check if node is a vector constant node with each value equals to
scalar_value """
return isinstance(node, Constant) and node.get_precision(
).is_vector_format() and node.get_value() == [scalar_value
] * node.get_precision().get_vector_size()
def extract_tables(node):
""" extract the set of all ML_Table nodes in the graph rooted at node """
processed_set = set([node])
table_set = set()
working_set = [node]
while working_set:
elt = working_set.pop(0)
if isinstance(elt, ML_NewTable):
table_set.add(elt)
elif not isinstance(elt, ML_LeafNode):
for op_node in elt.inputs:
if not op_node in processed_set:
processed_set.add(op_node)
working_set.append(op_node)
return table_set
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def evaluate_comparison_range(node):
""" evaluate the numerical range of Comparison node, if any
else returns None """
return None
def is_comparison(node):
""" test if node is a Comparison node or not """
return isinstance(node, Comparison)
<|reserved_special_token_0|>
def evaluate_range(optree, update_interval=False, memoization_map=None):
""" evaluate the range of an Operation node
Args:
optree (ML_Operation): input Node
Return:
sollya Interval: evaluated range of optree or None if no range
could be determined
"""
if memoization_map is None:
memoization_map = {}
init_interval = optree.get_interval()
if not init_interval is None:
return init_interval
else:
if optree in memoization_map:
return memoization_map[optree]
elif isinstance(optree, ML_LeafNode):
op_range = optree.get_interval()
elif is_comparison(optree):
op_range = evaluate_comparison_range(optree)
if update_interval:
optree.set_interval(op_range)
elif isinstance(optree, PlaceHolder):
op_range = evaluate_range(optree.get_input(0), update_interval=
update_interval, memoization_map=memoization_map)
if update_interval:
optree.set_interval(op_range)
else:
args_interval = tuple(evaluate_range(op, update_interval=
update_interval, memoization_map=memoization_map) for op in
optree.get_inputs())
args_interval_map = {op: op_interval for op, op_interval in zip
(optree.inputs, args_interval)}
ops_interval_get = lambda op: args_interval_map[op]
op_range = optree.range_function(optree.inputs,
ops_interval_getter=ops_interval_get)
if update_interval:
optree.set_interval(op_range)
Log.report(LOG_VERBOSE_EVALUATE_RANGE, 'range of {} is {}', optree,
op_range)
memoization_map[optree] = op_range
return op_range
def forward_attributes(src, dst):
""" forward compatible attributes from src node to dst node
:param src: source source for attributes values
:type src: ML_Operation
:param dst: destination node for attributes copies
:type dst: ML_Operation
"""
dst.set_tag(src.get_tag())
dst.set_debug(src.get_debug())
dst.set_handle(src.get_handle())
if hasattr(src.attributes, 'init_stage'):
forward_stage_attributes(src, dst)
if isinstance(src, BooleanOperation) and isinstance(dst, BooleanOperation):
dst.likely = src.likely
def forward_stage_attributes(src, dst):
""" copy node's stage attributes from src node to dst node """
dst.attributes.init_stage = src.attributes.init_stage
def depth_node_ordering(start_node, end_nodes):
""" order the node between root start_node end end_nodes
by depth (root first, starting with start_node)
:param start_node: root of the sort (first node)
:type start_node: ML_Operation
:param end_nodes: nodes where the depth sort must end
:type end_nodes: iterator over ML_Operation
:return: depth ordered list of nodes
:rtype: list(ML_Operation)
"""
ordered_list = []
ordered_set = set()
working_list = [start_node]
while working_list != []:
node = working_list.pop(0)
if not node in ordered_set:
ordered_set.add(node)
ordered_list.append(node)
if not is_leaf_node(node) and not node in end_nodes:
for node_op in node.get_inputs():
working_list.append(node_op)
return ordered_list
def logical_reduce(op_list, op_ctor=LogicalOr, precision=ML_Bool, **kw):
""" Logical/Boolean operand list reduction """
local_list = [node for node in op_list]
while len(local_list) > 1:
op0 = local_list.pop(0)
op1 = local_list.pop(0)
local_list.append(op_ctor(op0, op1, precision=precision))
result = local_list[0]
result.set_attributes(**kw)
return result
<|reserved_special_token_0|>
def uniform_list_check(value_list):
""" Check that value_list is made of only a single value replicated in
each element """
return reduce(lambda acc, value: acc and value == value_list[0],
value_list, True)
def uniform_vector_constant_check(optree):
""" check whether optree is a uniform vector constant """
if isinstance(optree, Constant) and not optree.get_precision(
) is None and optree.get_precision().is_vector_format():
return uniform_list_check(optree.get_value())
return False
def uniform_shift_check(optree):
""" check whether optree is a bit shift by a uniform vector constant """
if isinstance(optree, (BitLogicLeftShift, BitLogicRightShift,
BitArithmeticRightShift)):
return uniform_vector_constant_check(optree.get_input(1)
) or not optree.get_input(1).get_precision().is_vector_format()
return False
def is_false(node):
""" check if node is a Constant node whose value is equal to boolean False """
return is_scalar_cst(node, False) or is_vector_uniform_cst(node, False)
def is_true(node):
""" check if node is a Constant node whose value is equal to boolean True """
return is_scalar_cst(node, True) or is_vector_uniform_cst(node, True)
def is_scalar_cst(node, value):
""" check if node is a constant node with value equals to value """
return isinstance(node, Constant) and not node.get_precision(
).is_vector_format() and node.get_value() == value
def is_vector_uniform_cst(node, scalar_value):
""" check if node is a vector constant node with each value equals to
scalar_value """
return isinstance(node, Constant) and node.get_precision(
).is_vector_format() and node.get_value() == [scalar_value
] * node.get_precision().get_vector_size()
def extract_tables(node):
""" extract the set of all ML_Table nodes in the graph rooted at node """
processed_set = set([node])
table_set = set()
working_set = [node]
while working_set:
elt = working_set.pop(0)
if isinstance(elt, ML_NewTable):
table_set.add(elt)
elif not isinstance(elt, ML_LeafNode):
for op_node in elt.inputs:
if not op_node in processed_set:
processed_set.add(op_node)
working_set.append(op_node)
return table_set
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def evaluate_comparison_range(node):
""" evaluate the numerical range of Comparison node, if any
else returns None """
return None
def is_comparison(node):
""" test if node is a Comparison node or not """
return isinstance(node, Comparison)
LOG_VERBOSE_EVALUATE_RANGE = Log.LogLevel('EvaluateRangeVerbose')
def evaluate_range(optree, update_interval=False, memoization_map=None):
""" evaluate the range of an Operation node
Args:
optree (ML_Operation): input Node
Return:
sollya Interval: evaluated range of optree or None if no range
could be determined
"""
if memoization_map is None:
memoization_map = {}
init_interval = optree.get_interval()
if not init_interval is None:
return init_interval
else:
if optree in memoization_map:
return memoization_map[optree]
elif isinstance(optree, ML_LeafNode):
op_range = optree.get_interval()
elif is_comparison(optree):
op_range = evaluate_comparison_range(optree)
if update_interval:
optree.set_interval(op_range)
elif isinstance(optree, PlaceHolder):
op_range = evaluate_range(optree.get_input(0), update_interval=
update_interval, memoization_map=memoization_map)
if update_interval:
optree.set_interval(op_range)
else:
args_interval = tuple(evaluate_range(op, update_interval=
update_interval, memoization_map=memoization_map) for op in
optree.get_inputs())
args_interval_map = {op: op_interval for op, op_interval in zip
(optree.inputs, args_interval)}
ops_interval_get = lambda op: args_interval_map[op]
op_range = optree.range_function(optree.inputs,
ops_interval_getter=ops_interval_get)
if update_interval:
optree.set_interval(op_range)
Log.report(LOG_VERBOSE_EVALUATE_RANGE, 'range of {} is {}', optree,
op_range)
memoization_map[optree] = op_range
return op_range
def forward_attributes(src, dst):
""" forward compatible attributes from src node to dst node
:param src: source source for attributes values
:type src: ML_Operation
:param dst: destination node for attributes copies
:type dst: ML_Operation
"""
dst.set_tag(src.get_tag())
dst.set_debug(src.get_debug())
dst.set_handle(src.get_handle())
if hasattr(src.attributes, 'init_stage'):
forward_stage_attributes(src, dst)
if isinstance(src, BooleanOperation) and isinstance(dst, BooleanOperation):
dst.likely = src.likely
def forward_stage_attributes(src, dst):
""" copy node's stage attributes from src node to dst node """
dst.attributes.init_stage = src.attributes.init_stage
def depth_node_ordering(start_node, end_nodes):
""" order the node between root start_node end end_nodes
by depth (root first, starting with start_node)
:param start_node: root of the sort (first node)
:type start_node: ML_Operation
:param end_nodes: nodes where the depth sort must end
:type end_nodes: iterator over ML_Operation
:return: depth ordered list of nodes
:rtype: list(ML_Operation)
"""
ordered_list = []
ordered_set = set()
working_list = [start_node]
while working_list != []:
node = working_list.pop(0)
if not node in ordered_set:
ordered_set.add(node)
ordered_list.append(node)
if not is_leaf_node(node) and not node in end_nodes:
for node_op in node.get_inputs():
working_list.append(node_op)
return ordered_list
def logical_reduce(op_list, op_ctor=LogicalOr, precision=ML_Bool, **kw):
""" Logical/Boolean operand list reduction """
local_list = [node for node in op_list]
while len(local_list) > 1:
op0 = local_list.pop(0)
op1 = local_list.pop(0)
local_list.append(op_ctor(op0, op1, precision=precision))
result = local_list[0]
result.set_attributes(**kw)
return result
logical_or_reduce = lambda op_list, **kw: logical_reduce(op_list, LogicalOr,
ML_Bool, **kw)
logical_and_reduce = lambda op_list, **kw: logical_reduce(op_list,
LogicalAnd, ML_Bool, **kw)
def uniform_list_check(value_list):
""" Check that value_list is made of only a single value replicated in
each element """
return reduce(lambda acc, value: acc and value == value_list[0],
value_list, True)
def uniform_vector_constant_check(optree):
""" check whether optree is a uniform vector constant """
if isinstance(optree, Constant) and not optree.get_precision(
) is None and optree.get_precision().is_vector_format():
return uniform_list_check(optree.get_value())
return False
def uniform_shift_check(optree):
""" check whether optree is a bit shift by a uniform vector constant """
if isinstance(optree, (BitLogicLeftShift, BitLogicRightShift,
BitArithmeticRightShift)):
return uniform_vector_constant_check(optree.get_input(1)
) or not optree.get_input(1).get_precision().is_vector_format()
return False
def is_false(node):
""" check if node is a Constant node whose value is equal to boolean False """
return is_scalar_cst(node, False) or is_vector_uniform_cst(node, False)
def is_true(node):
""" check if node is a Constant node whose value is equal to boolean True """
return is_scalar_cst(node, True) or is_vector_uniform_cst(node, True)
def is_scalar_cst(node, value):
""" check if node is a constant node with value equals to value """
return isinstance(node, Constant) and not node.get_precision(
).is_vector_format() and node.get_value() == value
def is_vector_uniform_cst(node, scalar_value):
""" check if node is a vector constant node with each value equals to
scalar_value """
return isinstance(node, Constant) and node.get_precision(
).is_vector_format() and node.get_value() == [scalar_value
] * node.get_precision().get_vector_size()
def extract_tables(node):
""" extract the set of all ML_Table nodes in the graph rooted at node """
processed_set = set([node])
table_set = set()
working_set = [node]
while working_set:
elt = working_set.pop(0)
if isinstance(elt, ML_NewTable):
table_set.add(elt)
elif not isinstance(elt, ML_LeafNode):
for op_node in elt.inputs:
if not op_node in processed_set:
processed_set.add(op_node)
working_set.append(op_node)
return table_set
<|reserved_special_token_1|>
from functools import reduce
from metalibm_core.core.ml_formats import ML_Bool
from metalibm_core.core.ml_operations import ML_LeafNode, Comparison, BooleanOperation, is_leaf_node, LogicalAnd, LogicalOr, Constant, BitLogicLeftShift, BitLogicRightShift, BitArithmeticRightShift
from metalibm_core.core.advanced_operations import PlaceHolder
from metalibm_core.core.ml_table import ML_NewTable
from metalibm_core.utility.log_report import Log
def evaluate_comparison_range(node):
""" evaluate the numerical range of Comparison node, if any
else returns None """
return None
def is_comparison(node):
""" test if node is a Comparison node or not """
return isinstance(node, Comparison)
LOG_VERBOSE_EVALUATE_RANGE = Log.LogLevel('EvaluateRangeVerbose')
def evaluate_range(optree, update_interval=False, memoization_map=None):
""" evaluate the range of an Operation node
Args:
optree (ML_Operation): input Node
Return:
sollya Interval: evaluated range of optree or None if no range
could be determined
"""
if memoization_map is None:
memoization_map = {}
init_interval = optree.get_interval()
if not init_interval is None:
return init_interval
else:
if optree in memoization_map:
return memoization_map[optree]
elif isinstance(optree, ML_LeafNode):
op_range = optree.get_interval()
elif is_comparison(optree):
op_range = evaluate_comparison_range(optree)
if update_interval:
optree.set_interval(op_range)
elif isinstance(optree, PlaceHolder):
op_range = evaluate_range(optree.get_input(0), update_interval=
update_interval, memoization_map=memoization_map)
if update_interval:
optree.set_interval(op_range)
else:
args_interval = tuple(evaluate_range(op, update_interval=
update_interval, memoization_map=memoization_map) for op in
optree.get_inputs())
args_interval_map = {op: op_interval for op, op_interval in zip
(optree.inputs, args_interval)}
ops_interval_get = lambda op: args_interval_map[op]
op_range = optree.range_function(optree.inputs,
ops_interval_getter=ops_interval_get)
if update_interval:
optree.set_interval(op_range)
Log.report(LOG_VERBOSE_EVALUATE_RANGE, 'range of {} is {}', optree,
op_range)
memoization_map[optree] = op_range
return op_range
def forward_attributes(src, dst):
""" forward compatible attributes from src node to dst node
:param src: source source for attributes values
:type src: ML_Operation
:param dst: destination node for attributes copies
:type dst: ML_Operation
"""
dst.set_tag(src.get_tag())
dst.set_debug(src.get_debug())
dst.set_handle(src.get_handle())
if hasattr(src.attributes, 'init_stage'):
forward_stage_attributes(src, dst)
if isinstance(src, BooleanOperation) and isinstance(dst, BooleanOperation):
dst.likely = src.likely
def forward_stage_attributes(src, dst):
""" copy node's stage attributes from src node to dst node """
dst.attributes.init_stage = src.attributes.init_stage
def depth_node_ordering(start_node, end_nodes):
""" order the node between root start_node end end_nodes
by depth (root first, starting with start_node)
:param start_node: root of the sort (first node)
:type start_node: ML_Operation
:param end_nodes: nodes where the depth sort must end
:type end_nodes: iterator over ML_Operation
:return: depth ordered list of nodes
:rtype: list(ML_Operation)
"""
ordered_list = []
ordered_set = set()
working_list = [start_node]
while working_list != []:
node = working_list.pop(0)
if not node in ordered_set:
ordered_set.add(node)
ordered_list.append(node)
if not is_leaf_node(node) and not node in end_nodes:
for node_op in node.get_inputs():
working_list.append(node_op)
return ordered_list
def logical_reduce(op_list, op_ctor=LogicalOr, precision=ML_Bool, **kw):
""" Logical/Boolean operand list reduction """
local_list = [node for node in op_list]
while len(local_list) > 1:
op0 = local_list.pop(0)
op1 = local_list.pop(0)
local_list.append(op_ctor(op0, op1, precision=precision))
result = local_list[0]
result.set_attributes(**kw)
return result
logical_or_reduce = lambda op_list, **kw: logical_reduce(op_list, LogicalOr,
ML_Bool, **kw)
logical_and_reduce = lambda op_list, **kw: logical_reduce(op_list,
LogicalAnd, ML_Bool, **kw)
def uniform_list_check(value_list):
""" Check that value_list is made of only a single value replicated in
each element """
return reduce(lambda acc, value: acc and value == value_list[0],
value_list, True)
def uniform_vector_constant_check(optree):
""" check whether optree is a uniform vector constant """
if isinstance(optree, Constant) and not optree.get_precision(
) is None and optree.get_precision().is_vector_format():
return uniform_list_check(optree.get_value())
return False
def uniform_shift_check(optree):
""" check whether optree is a bit shift by a uniform vector constant """
if isinstance(optree, (BitLogicLeftShift, BitLogicRightShift,
BitArithmeticRightShift)):
return uniform_vector_constant_check(optree.get_input(1)
) or not optree.get_input(1).get_precision().is_vector_format()
return False
def is_false(node):
""" check if node is a Constant node whose value is equal to boolean False """
return is_scalar_cst(node, False) or is_vector_uniform_cst(node, False)
def is_true(node):
""" check if node is a Constant node whose value is equal to boolean True """
return is_scalar_cst(node, True) or is_vector_uniform_cst(node, True)
def is_scalar_cst(node, value):
""" check if node is a constant node with value equals to value """
return isinstance(node, Constant) and not node.get_precision(
).is_vector_format() and node.get_value() == value
def is_vector_uniform_cst(node, scalar_value):
""" check if node is a vector constant node with each value equals to
scalar_value """
return isinstance(node, Constant) and node.get_precision(
).is_vector_format() and node.get_value() == [scalar_value
] * node.get_precision().get_vector_size()
def extract_tables(node):
""" extract the set of all ML_Table nodes in the graph rooted at node """
processed_set = set([node])
table_set = set()
working_set = [node]
while working_set:
elt = working_set.pop(0)
if isinstance(elt, ML_NewTable):
table_set.add(elt)
elif not isinstance(elt, ML_LeafNode):
for op_node in elt.inputs:
if not op_node in processed_set:
processed_set.add(op_node)
working_set.append(op_node)
return table_set
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
###############################################################################
# This file is part of metalibm (https://github.com/kalray/metalibm)
###############################################################################
# MIT License
#
# Copyright (c) 2018 Kalray
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
# Author(s): Nicolas Brunie (nbrunie@kalray.eu)
# Created: Aug 8th, 2017
# last-modified: Mar 7th, 2018
###############################################################################
from functools import reduce
from metalibm_core.core.ml_formats import ML_Bool
from metalibm_core.core.ml_operations import (
ML_LeafNode, Comparison, BooleanOperation,
is_leaf_node,
LogicalAnd, LogicalOr, Constant,
BitLogicLeftShift, BitLogicRightShift,
BitArithmeticRightShift,
)
from metalibm_core.core.advanced_operations import PlaceHolder
from metalibm_core.core.ml_table import ML_NewTable
from metalibm_core.utility.log_report import Log
def evaluate_comparison_range(node):
""" evaluate the numerical range of Comparison node, if any
else returns None """
return None
def is_comparison(node):
""" test if node is a Comparison node or not """
return isinstance(node, Comparison)
LOG_VERBOSE_EVALUATE_RANGE = Log.LogLevel("EvaluateRangeVerbose")
## Assuming @p optree has no pre-defined range, recursively compute a range
# from the node inputs
def evaluate_range(optree, update_interval=False, memoization_map=None):
""" evaluate the range of an Operation node
Args:
optree (ML_Operation): input Node
Return:
sollya Interval: evaluated range of optree or None if no range
could be determined
"""
if memoization_map is None:
memoization_map = {}
init_interval = optree.get_interval()
if not init_interval is None:
return init_interval
else:
if optree in memoization_map:
return memoization_map[optree]
elif isinstance(optree, ML_LeafNode):
op_range = optree.get_interval()
elif is_comparison(optree):
op_range = evaluate_comparison_range(optree)
if update_interval:
optree.set_interval(op_range)
elif isinstance(optree, PlaceHolder):
op_range = evaluate_range(optree.get_input(0),
update_interval=update_interval,
memoization_map=memoization_map)
if update_interval:
optree.set_interval(op_range)
else:
args_interval = tuple(
evaluate_range(op, update_interval=update_interval,
memoization_map=memoization_map
) for op in optree.get_inputs())
args_interval_map = {op: op_interval for op, op_interval in zip(optree.inputs, args_interval)}
# evaluate_range cannot rely on bare_range_function only as some
# operations (e.g. CountLeadingZeros) do not base interval computation
# on their inputs' intervals but on other parameters
ops_interval_get = lambda op: args_interval_map[op]
op_range = optree.range_function(optree.inputs,
ops_interval_getter=ops_interval_get)
if update_interval:
optree.set_interval(op_range)
Log.report(LOG_VERBOSE_EVALUATE_RANGE, "range of {} is {}", optree, op_range)
memoization_map[optree] = op_range
return op_range
def forward_attributes(src, dst):
""" forward compatible attributes from src node to dst node
:param src: source source for attributes values
:type src: ML_Operation
:param dst: destination node for attributes copies
:type dst: ML_Operation
"""
dst.set_tag(src.get_tag())
dst.set_debug(src.get_debug())
dst.set_handle(src.get_handle())
if hasattr(src.attributes, "init_stage"):
forward_stage_attributes(src, dst)
if isinstance(src, BooleanOperation) and isinstance(dst, BooleanOperation):
dst.likely = src.likely
def forward_stage_attributes(src, dst):
""" copy node's stage attributes from src node to dst node """
dst.attributes.init_stage = src.attributes.init_stage
def depth_node_ordering(start_node, end_nodes):
""" order the node between root start_node end end_nodes
by depth (root first, starting with start_node)
:param start_node: root of the sort (first node)
:type start_node: ML_Operation
:param end_nodes: nodes where the depth sort must end
:type end_nodes: iterator over ML_Operation
:return: depth ordered list of nodes
:rtype: list(ML_Operation)
"""
ordered_list = []
ordered_set = set()
working_list = [start_node]
while working_list != []:
node = working_list.pop(0)
if not node in ordered_set:
ordered_set.add(node)
ordered_list.append(node)
if not is_leaf_node(node) and not node in end_nodes:
for node_op in node.get_inputs():
working_list.append(node_op)
return ordered_list
def logical_reduce(op_list, op_ctor=LogicalOr, precision=ML_Bool, **kw):
""" Logical/Boolean operand list reduction """
local_list = [node for node in op_list]
while len(local_list) > 1:
op0 = local_list.pop(0)
op1 = local_list.pop(0)
local_list.append(
op_ctor(op0, op1, precision=precision)
)
# assigning attributes to the resulting node
result = local_list[0]
result.set_attributes(**kw)
return result
## Specialization of logical reduce to OR operation
logical_or_reduce = lambda op_list, **kw: logical_reduce(op_list, LogicalOr, ML_Bool, **kw)
## Specialization of logical reduce to AND operation
logical_and_reduce = lambda op_list, **kw: logical_reduce(op_list, LogicalAnd, ML_Bool, **kw)
def uniform_list_check(value_list):
""" Check that value_list is made of only a single value replicated in
each element """
return reduce((lambda acc, value: acc and value == value_list[0]), value_list, True)
def uniform_vector_constant_check(optree):
""" check whether optree is a uniform vector constant """
if isinstance(optree, Constant) and not optree.get_precision() is None \
and optree.get_precision().is_vector_format():
return uniform_list_check(optree.get_value())
return False
def uniform_shift_check(optree):
""" check whether optree is a bit shift by a uniform vector constant """
if isinstance(optree, (BitLogicLeftShift, BitLogicRightShift, BitArithmeticRightShift)):
return uniform_vector_constant_check(optree.get_input(1)) \
or not optree.get_input(1).get_precision().is_vector_format()
return False
def is_false(node):
""" check if node is a Constant node whose value is equal to boolean False """
return is_scalar_cst(node, False) or is_vector_uniform_cst(node, False)
def is_true(node):
""" check if node is a Constant node whose value is equal to boolean True """
return is_scalar_cst(node, True) or is_vector_uniform_cst(node, True)
def is_scalar_cst(node, value):
""" check if node is a constant node with value equals to value """
return isinstance(node, Constant) and not node.get_precision().is_vector_format() and node.get_value() == value
def is_vector_uniform_cst(node, scalar_value):
""" check if node is a vector constant node with each value equals to
scalar_value """
return isinstance(node, Constant) and node.get_precision().is_vector_format() and node.get_value() == [scalar_value] * node.get_precision().get_vector_size()
def extract_tables(node):
""" extract the set of all ML_Table nodes in the graph rooted at node """
processed_set = set([node])
table_set = set()
working_set = [node]
while working_set:
elt = working_set.pop(0)
if isinstance(elt, ML_NewTable):
table_set.add(elt)
elif not isinstance(elt, ML_LeafNode):
for op_node in elt.inputs:
if not op_node in processed_set:
processed_set.add(op_node)
working_set.append(op_node)
return table_set
|
flexible
|
{
"blob_id": "3a05ebee8e70321fe53637b4792f5821ce7044be",
"index": 4264,
"step-1": "<mask token>\n\n\ndef evaluate_comparison_range(node):\n \"\"\" evaluate the numerical range of Comparison node, if any\n else returns None \"\"\"\n return None\n\n\ndef is_comparison(node):\n \"\"\" test if node is a Comparison node or not \"\"\"\n return isinstance(node, Comparison)\n\n\n<mask token>\n\n\ndef evaluate_range(optree, update_interval=False, memoization_map=None):\n \"\"\" evaluate the range of an Operation node\n\n Args:\n optree (ML_Operation): input Node\n\n Return:\n sollya Interval: evaluated range of optree or None if no range\n could be determined\n \"\"\"\n if memoization_map is None:\n memoization_map = {}\n init_interval = optree.get_interval()\n if not init_interval is None:\n return init_interval\n else:\n if optree in memoization_map:\n return memoization_map[optree]\n elif isinstance(optree, ML_LeafNode):\n op_range = optree.get_interval()\n elif is_comparison(optree):\n op_range = evaluate_comparison_range(optree)\n if update_interval:\n optree.set_interval(op_range)\n elif isinstance(optree, PlaceHolder):\n op_range = evaluate_range(optree.get_input(0), update_interval=\n update_interval, memoization_map=memoization_map)\n if update_interval:\n optree.set_interval(op_range)\n else:\n args_interval = tuple(evaluate_range(op, update_interval=\n update_interval, memoization_map=memoization_map) for op in\n optree.get_inputs())\n args_interval_map = {op: op_interval for op, op_interval in zip\n (optree.inputs, args_interval)}\n ops_interval_get = lambda op: args_interval_map[op]\n op_range = optree.range_function(optree.inputs,\n ops_interval_getter=ops_interval_get)\n if update_interval:\n optree.set_interval(op_range)\n Log.report(LOG_VERBOSE_EVALUATE_RANGE, 'range of {} is {}', optree,\n op_range)\n memoization_map[optree] = op_range\n return op_range\n\n\ndef forward_attributes(src, dst):\n \"\"\" forward compatible attributes from src node to dst node\n\n :param src: source source for attributes values\n :type src: ML_Operation\n :param dst: destination node for attributes copies\n :type dst: ML_Operation\n \"\"\"\n dst.set_tag(src.get_tag())\n dst.set_debug(src.get_debug())\n dst.set_handle(src.get_handle())\n if hasattr(src.attributes, 'init_stage'):\n forward_stage_attributes(src, dst)\n if isinstance(src, BooleanOperation) and isinstance(dst, BooleanOperation):\n dst.likely = src.likely\n\n\ndef forward_stage_attributes(src, dst):\n \"\"\" copy node's stage attributes from src node to dst node \"\"\"\n dst.attributes.init_stage = src.attributes.init_stage\n\n\ndef depth_node_ordering(start_node, end_nodes):\n \"\"\" order the node between root start_node end end_nodes\n by depth (root first, starting with start_node)\n\n :param start_node: root of the sort (first node)\n :type start_node: ML_Operation\n :param end_nodes: nodes where the depth sort must end\n :type end_nodes: iterator over ML_Operation\n :return: depth ordered list of nodes\n :rtype: list(ML_Operation)\n \"\"\"\n ordered_list = []\n ordered_set = set()\n working_list = [start_node]\n while working_list != []:\n node = working_list.pop(0)\n if not node in ordered_set:\n ordered_set.add(node)\n ordered_list.append(node)\n if not is_leaf_node(node) and not node in end_nodes:\n for node_op in node.get_inputs():\n working_list.append(node_op)\n return ordered_list\n\n\ndef logical_reduce(op_list, op_ctor=LogicalOr, precision=ML_Bool, **kw):\n \"\"\" Logical/Boolean operand list reduction \"\"\"\n local_list = [node for node in op_list]\n while len(local_list) > 1:\n op0 = local_list.pop(0)\n op1 = local_list.pop(0)\n local_list.append(op_ctor(op0, op1, precision=precision))\n result = local_list[0]\n result.set_attributes(**kw)\n return result\n\n\n<mask token>\n\n\ndef uniform_list_check(value_list):\n \"\"\" Check that value_list is made of only a single value replicated in\n each element \"\"\"\n return reduce(lambda acc, value: acc and value == value_list[0],\n value_list, True)\n\n\n<mask token>\n\n\ndef uniform_shift_check(optree):\n \"\"\" check whether optree is a bit shift by a uniform vector constant \"\"\"\n if isinstance(optree, (BitLogicLeftShift, BitLogicRightShift,\n BitArithmeticRightShift)):\n return uniform_vector_constant_check(optree.get_input(1)\n ) or not optree.get_input(1).get_precision().is_vector_format()\n return False\n\n\ndef is_false(node):\n \"\"\" check if node is a Constant node whose value is equal to boolean False \"\"\"\n return is_scalar_cst(node, False) or is_vector_uniform_cst(node, False)\n\n\ndef is_true(node):\n \"\"\" check if node is a Constant node whose value is equal to boolean True \"\"\"\n return is_scalar_cst(node, True) or is_vector_uniform_cst(node, True)\n\n\ndef is_scalar_cst(node, value):\n \"\"\" check if node is a constant node with value equals to value \"\"\"\n return isinstance(node, Constant) and not node.get_precision(\n ).is_vector_format() and node.get_value() == value\n\n\ndef is_vector_uniform_cst(node, scalar_value):\n \"\"\" check if node is a vector constant node with each value equals to\n scalar_value \"\"\"\n return isinstance(node, Constant) and node.get_precision(\n ).is_vector_format() and node.get_value() == [scalar_value\n ] * node.get_precision().get_vector_size()\n\n\ndef extract_tables(node):\n \"\"\" extract the set of all ML_Table nodes in the graph rooted at node \"\"\"\n processed_set = set([node])\n table_set = set()\n working_set = [node]\n while working_set:\n elt = working_set.pop(0)\n if isinstance(elt, ML_NewTable):\n table_set.add(elt)\n elif not isinstance(elt, ML_LeafNode):\n for op_node in elt.inputs:\n if not op_node in processed_set:\n processed_set.add(op_node)\n working_set.append(op_node)\n return table_set\n",
"step-2": "<mask token>\n\n\ndef evaluate_comparison_range(node):\n \"\"\" evaluate the numerical range of Comparison node, if any\n else returns None \"\"\"\n return None\n\n\ndef is_comparison(node):\n \"\"\" test if node is a Comparison node or not \"\"\"\n return isinstance(node, Comparison)\n\n\n<mask token>\n\n\ndef evaluate_range(optree, update_interval=False, memoization_map=None):\n \"\"\" evaluate the range of an Operation node\n\n Args:\n optree (ML_Operation): input Node\n\n Return:\n sollya Interval: evaluated range of optree or None if no range\n could be determined\n \"\"\"\n if memoization_map is None:\n memoization_map = {}\n init_interval = optree.get_interval()\n if not init_interval is None:\n return init_interval\n else:\n if optree in memoization_map:\n return memoization_map[optree]\n elif isinstance(optree, ML_LeafNode):\n op_range = optree.get_interval()\n elif is_comparison(optree):\n op_range = evaluate_comparison_range(optree)\n if update_interval:\n optree.set_interval(op_range)\n elif isinstance(optree, PlaceHolder):\n op_range = evaluate_range(optree.get_input(0), update_interval=\n update_interval, memoization_map=memoization_map)\n if update_interval:\n optree.set_interval(op_range)\n else:\n args_interval = tuple(evaluate_range(op, update_interval=\n update_interval, memoization_map=memoization_map) for op in\n optree.get_inputs())\n args_interval_map = {op: op_interval for op, op_interval in zip\n (optree.inputs, args_interval)}\n ops_interval_get = lambda op: args_interval_map[op]\n op_range = optree.range_function(optree.inputs,\n ops_interval_getter=ops_interval_get)\n if update_interval:\n optree.set_interval(op_range)\n Log.report(LOG_VERBOSE_EVALUATE_RANGE, 'range of {} is {}', optree,\n op_range)\n memoization_map[optree] = op_range\n return op_range\n\n\ndef forward_attributes(src, dst):\n \"\"\" forward compatible attributes from src node to dst node\n\n :param src: source source for attributes values\n :type src: ML_Operation\n :param dst: destination node for attributes copies\n :type dst: ML_Operation\n \"\"\"\n dst.set_tag(src.get_tag())\n dst.set_debug(src.get_debug())\n dst.set_handle(src.get_handle())\n if hasattr(src.attributes, 'init_stage'):\n forward_stage_attributes(src, dst)\n if isinstance(src, BooleanOperation) and isinstance(dst, BooleanOperation):\n dst.likely = src.likely\n\n\ndef forward_stage_attributes(src, dst):\n \"\"\" copy node's stage attributes from src node to dst node \"\"\"\n dst.attributes.init_stage = src.attributes.init_stage\n\n\ndef depth_node_ordering(start_node, end_nodes):\n \"\"\" order the node between root start_node end end_nodes\n by depth (root first, starting with start_node)\n\n :param start_node: root of the sort (first node)\n :type start_node: ML_Operation\n :param end_nodes: nodes where the depth sort must end\n :type end_nodes: iterator over ML_Operation\n :return: depth ordered list of nodes\n :rtype: list(ML_Operation)\n \"\"\"\n ordered_list = []\n ordered_set = set()\n working_list = [start_node]\n while working_list != []:\n node = working_list.pop(0)\n if not node in ordered_set:\n ordered_set.add(node)\n ordered_list.append(node)\n if not is_leaf_node(node) and not node in end_nodes:\n for node_op in node.get_inputs():\n working_list.append(node_op)\n return ordered_list\n\n\ndef logical_reduce(op_list, op_ctor=LogicalOr, precision=ML_Bool, **kw):\n \"\"\" Logical/Boolean operand list reduction \"\"\"\n local_list = [node for node in op_list]\n while len(local_list) > 1:\n op0 = local_list.pop(0)\n op1 = local_list.pop(0)\n local_list.append(op_ctor(op0, op1, precision=precision))\n result = local_list[0]\n result.set_attributes(**kw)\n return result\n\n\n<mask token>\n\n\ndef uniform_list_check(value_list):\n \"\"\" Check that value_list is made of only a single value replicated in\n each element \"\"\"\n return reduce(lambda acc, value: acc and value == value_list[0],\n value_list, True)\n\n\ndef uniform_vector_constant_check(optree):\n \"\"\" check whether optree is a uniform vector constant \"\"\"\n if isinstance(optree, Constant) and not optree.get_precision(\n ) is None and optree.get_precision().is_vector_format():\n return uniform_list_check(optree.get_value())\n return False\n\n\ndef uniform_shift_check(optree):\n \"\"\" check whether optree is a bit shift by a uniform vector constant \"\"\"\n if isinstance(optree, (BitLogicLeftShift, BitLogicRightShift,\n BitArithmeticRightShift)):\n return uniform_vector_constant_check(optree.get_input(1)\n ) or not optree.get_input(1).get_precision().is_vector_format()\n return False\n\n\ndef is_false(node):\n \"\"\" check if node is a Constant node whose value is equal to boolean False \"\"\"\n return is_scalar_cst(node, False) or is_vector_uniform_cst(node, False)\n\n\ndef is_true(node):\n \"\"\" check if node is a Constant node whose value is equal to boolean True \"\"\"\n return is_scalar_cst(node, True) or is_vector_uniform_cst(node, True)\n\n\ndef is_scalar_cst(node, value):\n \"\"\" check if node is a constant node with value equals to value \"\"\"\n return isinstance(node, Constant) and not node.get_precision(\n ).is_vector_format() and node.get_value() == value\n\n\ndef is_vector_uniform_cst(node, scalar_value):\n \"\"\" check if node is a vector constant node with each value equals to\n scalar_value \"\"\"\n return isinstance(node, Constant) and node.get_precision(\n ).is_vector_format() and node.get_value() == [scalar_value\n ] * node.get_precision().get_vector_size()\n\n\ndef extract_tables(node):\n \"\"\" extract the set of all ML_Table nodes in the graph rooted at node \"\"\"\n processed_set = set([node])\n table_set = set()\n working_set = [node]\n while working_set:\n elt = working_set.pop(0)\n if isinstance(elt, ML_NewTable):\n table_set.add(elt)\n elif not isinstance(elt, ML_LeafNode):\n for op_node in elt.inputs:\n if not op_node in processed_set:\n processed_set.add(op_node)\n working_set.append(op_node)\n return table_set\n",
"step-3": "<mask token>\n\n\ndef evaluate_comparison_range(node):\n \"\"\" evaluate the numerical range of Comparison node, if any\n else returns None \"\"\"\n return None\n\n\ndef is_comparison(node):\n \"\"\" test if node is a Comparison node or not \"\"\"\n return isinstance(node, Comparison)\n\n\nLOG_VERBOSE_EVALUATE_RANGE = Log.LogLevel('EvaluateRangeVerbose')\n\n\ndef evaluate_range(optree, update_interval=False, memoization_map=None):\n \"\"\" evaluate the range of an Operation node\n\n Args:\n optree (ML_Operation): input Node\n\n Return:\n sollya Interval: evaluated range of optree or None if no range\n could be determined\n \"\"\"\n if memoization_map is None:\n memoization_map = {}\n init_interval = optree.get_interval()\n if not init_interval is None:\n return init_interval\n else:\n if optree in memoization_map:\n return memoization_map[optree]\n elif isinstance(optree, ML_LeafNode):\n op_range = optree.get_interval()\n elif is_comparison(optree):\n op_range = evaluate_comparison_range(optree)\n if update_interval:\n optree.set_interval(op_range)\n elif isinstance(optree, PlaceHolder):\n op_range = evaluate_range(optree.get_input(0), update_interval=\n update_interval, memoization_map=memoization_map)\n if update_interval:\n optree.set_interval(op_range)\n else:\n args_interval = tuple(evaluate_range(op, update_interval=\n update_interval, memoization_map=memoization_map) for op in\n optree.get_inputs())\n args_interval_map = {op: op_interval for op, op_interval in zip\n (optree.inputs, args_interval)}\n ops_interval_get = lambda op: args_interval_map[op]\n op_range = optree.range_function(optree.inputs,\n ops_interval_getter=ops_interval_get)\n if update_interval:\n optree.set_interval(op_range)\n Log.report(LOG_VERBOSE_EVALUATE_RANGE, 'range of {} is {}', optree,\n op_range)\n memoization_map[optree] = op_range\n return op_range\n\n\ndef forward_attributes(src, dst):\n \"\"\" forward compatible attributes from src node to dst node\n\n :param src: source source for attributes values\n :type src: ML_Operation\n :param dst: destination node for attributes copies\n :type dst: ML_Operation\n \"\"\"\n dst.set_tag(src.get_tag())\n dst.set_debug(src.get_debug())\n dst.set_handle(src.get_handle())\n if hasattr(src.attributes, 'init_stage'):\n forward_stage_attributes(src, dst)\n if isinstance(src, BooleanOperation) and isinstance(dst, BooleanOperation):\n dst.likely = src.likely\n\n\ndef forward_stage_attributes(src, dst):\n \"\"\" copy node's stage attributes from src node to dst node \"\"\"\n dst.attributes.init_stage = src.attributes.init_stage\n\n\ndef depth_node_ordering(start_node, end_nodes):\n \"\"\" order the node between root start_node end end_nodes\n by depth (root first, starting with start_node)\n\n :param start_node: root of the sort (first node)\n :type start_node: ML_Operation\n :param end_nodes: nodes where the depth sort must end\n :type end_nodes: iterator over ML_Operation\n :return: depth ordered list of nodes\n :rtype: list(ML_Operation)\n \"\"\"\n ordered_list = []\n ordered_set = set()\n working_list = [start_node]\n while working_list != []:\n node = working_list.pop(0)\n if not node in ordered_set:\n ordered_set.add(node)\n ordered_list.append(node)\n if not is_leaf_node(node) and not node in end_nodes:\n for node_op in node.get_inputs():\n working_list.append(node_op)\n return ordered_list\n\n\ndef logical_reduce(op_list, op_ctor=LogicalOr, precision=ML_Bool, **kw):\n \"\"\" Logical/Boolean operand list reduction \"\"\"\n local_list = [node for node in op_list]\n while len(local_list) > 1:\n op0 = local_list.pop(0)\n op1 = local_list.pop(0)\n local_list.append(op_ctor(op0, op1, precision=precision))\n result = local_list[0]\n result.set_attributes(**kw)\n return result\n\n\nlogical_or_reduce = lambda op_list, **kw: logical_reduce(op_list, LogicalOr,\n ML_Bool, **kw)\nlogical_and_reduce = lambda op_list, **kw: logical_reduce(op_list,\n LogicalAnd, ML_Bool, **kw)\n\n\ndef uniform_list_check(value_list):\n \"\"\" Check that value_list is made of only a single value replicated in\n each element \"\"\"\n return reduce(lambda acc, value: acc and value == value_list[0],\n value_list, True)\n\n\ndef uniform_vector_constant_check(optree):\n \"\"\" check whether optree is a uniform vector constant \"\"\"\n if isinstance(optree, Constant) and not optree.get_precision(\n ) is None and optree.get_precision().is_vector_format():\n return uniform_list_check(optree.get_value())\n return False\n\n\ndef uniform_shift_check(optree):\n \"\"\" check whether optree is a bit shift by a uniform vector constant \"\"\"\n if isinstance(optree, (BitLogicLeftShift, BitLogicRightShift,\n BitArithmeticRightShift)):\n return uniform_vector_constant_check(optree.get_input(1)\n ) or not optree.get_input(1).get_precision().is_vector_format()\n return False\n\n\ndef is_false(node):\n \"\"\" check if node is a Constant node whose value is equal to boolean False \"\"\"\n return is_scalar_cst(node, False) or is_vector_uniform_cst(node, False)\n\n\ndef is_true(node):\n \"\"\" check if node is a Constant node whose value is equal to boolean True \"\"\"\n return is_scalar_cst(node, True) or is_vector_uniform_cst(node, True)\n\n\ndef is_scalar_cst(node, value):\n \"\"\" check if node is a constant node with value equals to value \"\"\"\n return isinstance(node, Constant) and not node.get_precision(\n ).is_vector_format() and node.get_value() == value\n\n\ndef is_vector_uniform_cst(node, scalar_value):\n \"\"\" check if node is a vector constant node with each value equals to\n scalar_value \"\"\"\n return isinstance(node, Constant) and node.get_precision(\n ).is_vector_format() and node.get_value() == [scalar_value\n ] * node.get_precision().get_vector_size()\n\n\ndef extract_tables(node):\n \"\"\" extract the set of all ML_Table nodes in the graph rooted at node \"\"\"\n processed_set = set([node])\n table_set = set()\n working_set = [node]\n while working_set:\n elt = working_set.pop(0)\n if isinstance(elt, ML_NewTable):\n table_set.add(elt)\n elif not isinstance(elt, ML_LeafNode):\n for op_node in elt.inputs:\n if not op_node in processed_set:\n processed_set.add(op_node)\n working_set.append(op_node)\n return table_set\n",
"step-4": "from functools import reduce\nfrom metalibm_core.core.ml_formats import ML_Bool\nfrom metalibm_core.core.ml_operations import ML_LeafNode, Comparison, BooleanOperation, is_leaf_node, LogicalAnd, LogicalOr, Constant, BitLogicLeftShift, BitLogicRightShift, BitArithmeticRightShift\nfrom metalibm_core.core.advanced_operations import PlaceHolder\nfrom metalibm_core.core.ml_table import ML_NewTable\nfrom metalibm_core.utility.log_report import Log\n\n\ndef evaluate_comparison_range(node):\n \"\"\" evaluate the numerical range of Comparison node, if any\n else returns None \"\"\"\n return None\n\n\ndef is_comparison(node):\n \"\"\" test if node is a Comparison node or not \"\"\"\n return isinstance(node, Comparison)\n\n\nLOG_VERBOSE_EVALUATE_RANGE = Log.LogLevel('EvaluateRangeVerbose')\n\n\ndef evaluate_range(optree, update_interval=False, memoization_map=None):\n \"\"\" evaluate the range of an Operation node\n\n Args:\n optree (ML_Operation): input Node\n\n Return:\n sollya Interval: evaluated range of optree or None if no range\n could be determined\n \"\"\"\n if memoization_map is None:\n memoization_map = {}\n init_interval = optree.get_interval()\n if not init_interval is None:\n return init_interval\n else:\n if optree in memoization_map:\n return memoization_map[optree]\n elif isinstance(optree, ML_LeafNode):\n op_range = optree.get_interval()\n elif is_comparison(optree):\n op_range = evaluate_comparison_range(optree)\n if update_interval:\n optree.set_interval(op_range)\n elif isinstance(optree, PlaceHolder):\n op_range = evaluate_range(optree.get_input(0), update_interval=\n update_interval, memoization_map=memoization_map)\n if update_interval:\n optree.set_interval(op_range)\n else:\n args_interval = tuple(evaluate_range(op, update_interval=\n update_interval, memoization_map=memoization_map) for op in\n optree.get_inputs())\n args_interval_map = {op: op_interval for op, op_interval in zip\n (optree.inputs, args_interval)}\n ops_interval_get = lambda op: args_interval_map[op]\n op_range = optree.range_function(optree.inputs,\n ops_interval_getter=ops_interval_get)\n if update_interval:\n optree.set_interval(op_range)\n Log.report(LOG_VERBOSE_EVALUATE_RANGE, 'range of {} is {}', optree,\n op_range)\n memoization_map[optree] = op_range\n return op_range\n\n\ndef forward_attributes(src, dst):\n \"\"\" forward compatible attributes from src node to dst node\n\n :param src: source source for attributes values\n :type src: ML_Operation\n :param dst: destination node for attributes copies\n :type dst: ML_Operation\n \"\"\"\n dst.set_tag(src.get_tag())\n dst.set_debug(src.get_debug())\n dst.set_handle(src.get_handle())\n if hasattr(src.attributes, 'init_stage'):\n forward_stage_attributes(src, dst)\n if isinstance(src, BooleanOperation) and isinstance(dst, BooleanOperation):\n dst.likely = src.likely\n\n\ndef forward_stage_attributes(src, dst):\n \"\"\" copy node's stage attributes from src node to dst node \"\"\"\n dst.attributes.init_stage = src.attributes.init_stage\n\n\ndef depth_node_ordering(start_node, end_nodes):\n \"\"\" order the node between root start_node end end_nodes\n by depth (root first, starting with start_node)\n\n :param start_node: root of the sort (first node)\n :type start_node: ML_Operation\n :param end_nodes: nodes where the depth sort must end\n :type end_nodes: iterator over ML_Operation\n :return: depth ordered list of nodes\n :rtype: list(ML_Operation)\n \"\"\"\n ordered_list = []\n ordered_set = set()\n working_list = [start_node]\n while working_list != []:\n node = working_list.pop(0)\n if not node in ordered_set:\n ordered_set.add(node)\n ordered_list.append(node)\n if not is_leaf_node(node) and not node in end_nodes:\n for node_op in node.get_inputs():\n working_list.append(node_op)\n return ordered_list\n\n\ndef logical_reduce(op_list, op_ctor=LogicalOr, precision=ML_Bool, **kw):\n \"\"\" Logical/Boolean operand list reduction \"\"\"\n local_list = [node for node in op_list]\n while len(local_list) > 1:\n op0 = local_list.pop(0)\n op1 = local_list.pop(0)\n local_list.append(op_ctor(op0, op1, precision=precision))\n result = local_list[0]\n result.set_attributes(**kw)\n return result\n\n\nlogical_or_reduce = lambda op_list, **kw: logical_reduce(op_list, LogicalOr,\n ML_Bool, **kw)\nlogical_and_reduce = lambda op_list, **kw: logical_reduce(op_list,\n LogicalAnd, ML_Bool, **kw)\n\n\ndef uniform_list_check(value_list):\n \"\"\" Check that value_list is made of only a single value replicated in\n each element \"\"\"\n return reduce(lambda acc, value: acc and value == value_list[0],\n value_list, True)\n\n\ndef uniform_vector_constant_check(optree):\n \"\"\" check whether optree is a uniform vector constant \"\"\"\n if isinstance(optree, Constant) and not optree.get_precision(\n ) is None and optree.get_precision().is_vector_format():\n return uniform_list_check(optree.get_value())\n return False\n\n\ndef uniform_shift_check(optree):\n \"\"\" check whether optree is a bit shift by a uniform vector constant \"\"\"\n if isinstance(optree, (BitLogicLeftShift, BitLogicRightShift,\n BitArithmeticRightShift)):\n return uniform_vector_constant_check(optree.get_input(1)\n ) or not optree.get_input(1).get_precision().is_vector_format()\n return False\n\n\ndef is_false(node):\n \"\"\" check if node is a Constant node whose value is equal to boolean False \"\"\"\n return is_scalar_cst(node, False) or is_vector_uniform_cst(node, False)\n\n\ndef is_true(node):\n \"\"\" check if node is a Constant node whose value is equal to boolean True \"\"\"\n return is_scalar_cst(node, True) or is_vector_uniform_cst(node, True)\n\n\ndef is_scalar_cst(node, value):\n \"\"\" check if node is a constant node with value equals to value \"\"\"\n return isinstance(node, Constant) and not node.get_precision(\n ).is_vector_format() and node.get_value() == value\n\n\ndef is_vector_uniform_cst(node, scalar_value):\n \"\"\" check if node is a vector constant node with each value equals to\n scalar_value \"\"\"\n return isinstance(node, Constant) and node.get_precision(\n ).is_vector_format() and node.get_value() == [scalar_value\n ] * node.get_precision().get_vector_size()\n\n\ndef extract_tables(node):\n \"\"\" extract the set of all ML_Table nodes in the graph rooted at node \"\"\"\n processed_set = set([node])\n table_set = set()\n working_set = [node]\n while working_set:\n elt = working_set.pop(0)\n if isinstance(elt, ML_NewTable):\n table_set.add(elt)\n elif not isinstance(elt, ML_LeafNode):\n for op_node in elt.inputs:\n if not op_node in processed_set:\n processed_set.add(op_node)\n working_set.append(op_node)\n return table_set\n",
"step-5": "# -*- coding: utf-8 -*-\n\n###############################################################################\n# This file is part of metalibm (https://github.com/kalray/metalibm)\n###############################################################################\n# MIT License\n#\n# Copyright (c) 2018 Kalray\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n###############################################################################\n# Author(s): Nicolas Brunie (nbrunie@kalray.eu)\n# Created: Aug 8th, 2017\n# last-modified: Mar 7th, 2018\n###############################################################################\n\nfrom functools import reduce\n\nfrom metalibm_core.core.ml_formats import ML_Bool\nfrom metalibm_core.core.ml_operations import (\n ML_LeafNode, Comparison, BooleanOperation,\n is_leaf_node,\n LogicalAnd, LogicalOr, Constant,\n BitLogicLeftShift, BitLogicRightShift,\n BitArithmeticRightShift,\n)\nfrom metalibm_core.core.advanced_operations import PlaceHolder\nfrom metalibm_core.core.ml_table import ML_NewTable\n\nfrom metalibm_core.utility.log_report import Log\n\n\ndef evaluate_comparison_range(node):\n \"\"\" evaluate the numerical range of Comparison node, if any\n else returns None \"\"\"\n return None\n\ndef is_comparison(node):\n \"\"\" test if node is a Comparison node or not \"\"\"\n return isinstance(node, Comparison)\n\nLOG_VERBOSE_EVALUATE_RANGE = Log.LogLevel(\"EvaluateRangeVerbose\")\n\n## Assuming @p optree has no pre-defined range, recursively compute a range\n# from the node inputs\ndef evaluate_range(optree, update_interval=False, memoization_map=None):\n \"\"\" evaluate the range of an Operation node\n\n Args:\n optree (ML_Operation): input Node\n\n Return:\n sollya Interval: evaluated range of optree or None if no range\n could be determined\n \"\"\"\n if memoization_map is None:\n memoization_map = {}\n init_interval = optree.get_interval()\n if not init_interval is None:\n return init_interval\n else:\n if optree in memoization_map:\n return memoization_map[optree]\n elif isinstance(optree, ML_LeafNode):\n op_range = optree.get_interval()\n elif is_comparison(optree):\n op_range = evaluate_comparison_range(optree)\n if update_interval:\n optree.set_interval(op_range)\n elif isinstance(optree, PlaceHolder):\n op_range = evaluate_range(optree.get_input(0),\n update_interval=update_interval,\n memoization_map=memoization_map)\n if update_interval:\n optree.set_interval(op_range)\n else:\n args_interval = tuple(\n evaluate_range(op, update_interval=update_interval,\n memoization_map=memoization_map\n ) for op in optree.get_inputs())\n args_interval_map = {op: op_interval for op, op_interval in zip(optree.inputs, args_interval)}\n # evaluate_range cannot rely on bare_range_function only as some\n # operations (e.g. CountLeadingZeros) do not base interval computation\n # on their inputs' intervals but on other parameters\n ops_interval_get = lambda op: args_interval_map[op]\n op_range = optree.range_function(optree.inputs,\n ops_interval_getter=ops_interval_get)\n if update_interval:\n optree.set_interval(op_range)\n Log.report(LOG_VERBOSE_EVALUATE_RANGE, \"range of {} is {}\", optree, op_range)\n memoization_map[optree] = op_range\n return op_range\n\n\ndef forward_attributes(src, dst):\n \"\"\" forward compatible attributes from src node to dst node\n\n :param src: source source for attributes values\n :type src: ML_Operation\n :param dst: destination node for attributes copies\n :type dst: ML_Operation\n \"\"\"\n dst.set_tag(src.get_tag())\n dst.set_debug(src.get_debug())\n dst.set_handle(src.get_handle())\n if hasattr(src.attributes, \"init_stage\"):\n forward_stage_attributes(src, dst)\n if isinstance(src, BooleanOperation) and isinstance(dst, BooleanOperation):\n dst.likely = src.likely\n\n\ndef forward_stage_attributes(src, dst):\n \"\"\" copy node's stage attributes from src node to dst node \"\"\"\n dst.attributes.init_stage = src.attributes.init_stage\n\n\ndef depth_node_ordering(start_node, end_nodes):\n \"\"\" order the node between root start_node end end_nodes\n by depth (root first, starting with start_node)\n\n :param start_node: root of the sort (first node)\n :type start_node: ML_Operation\n :param end_nodes: nodes where the depth sort must end\n :type end_nodes: iterator over ML_Operation\n :return: depth ordered list of nodes\n :rtype: list(ML_Operation)\n \"\"\"\n ordered_list = []\n ordered_set = set()\n working_list = [start_node]\n while working_list != []:\n node = working_list.pop(0)\n if not node in ordered_set:\n ordered_set.add(node)\n ordered_list.append(node)\n if not is_leaf_node(node) and not node in end_nodes:\n for node_op in node.get_inputs():\n working_list.append(node_op)\n return ordered_list\n\ndef logical_reduce(op_list, op_ctor=LogicalOr, precision=ML_Bool, **kw):\n \"\"\" Logical/Boolean operand list reduction \"\"\"\n local_list = [node for node in op_list]\n while len(local_list) > 1:\n op0 = local_list.pop(0)\n op1 = local_list.pop(0)\n local_list.append(\n op_ctor(op0, op1, precision=precision)\n )\n # assigning attributes to the resulting node\n result = local_list[0]\n result.set_attributes(**kw)\n return result\n\n## Specialization of logical reduce to OR operation\nlogical_or_reduce = lambda op_list, **kw: logical_reduce(op_list, LogicalOr, ML_Bool, **kw)\n## Specialization of logical reduce to AND operation\nlogical_and_reduce = lambda op_list, **kw: logical_reduce(op_list, LogicalAnd, ML_Bool, **kw)\n\n\n\ndef uniform_list_check(value_list):\n \"\"\" Check that value_list is made of only a single value replicated in\n each element \"\"\"\n return reduce((lambda acc, value: acc and value == value_list[0]), value_list, True)\n\ndef uniform_vector_constant_check(optree):\n \"\"\" check whether optree is a uniform vector constant \"\"\"\n if isinstance(optree, Constant) and not optree.get_precision() is None \\\n and optree.get_precision().is_vector_format():\n return uniform_list_check(optree.get_value())\n return False\n\ndef uniform_shift_check(optree):\n \"\"\" check whether optree is a bit shift by a uniform vector constant \"\"\"\n if isinstance(optree, (BitLogicLeftShift, BitLogicRightShift, BitArithmeticRightShift)):\n return uniform_vector_constant_check(optree.get_input(1)) \\\n or not optree.get_input(1).get_precision().is_vector_format()\n return False\n\n\ndef is_false(node):\n \"\"\" check if node is a Constant node whose value is equal to boolean False \"\"\"\n return is_scalar_cst(node, False) or is_vector_uniform_cst(node, False)\ndef is_true(node):\n \"\"\" check if node is a Constant node whose value is equal to boolean True \"\"\"\n return is_scalar_cst(node, True) or is_vector_uniform_cst(node, True)\n\ndef is_scalar_cst(node, value):\n \"\"\" check if node is a constant node with value equals to value \"\"\"\n return isinstance(node, Constant) and not node.get_precision().is_vector_format() and node.get_value() == value\ndef is_vector_uniform_cst(node, scalar_value):\n \"\"\" check if node is a vector constant node with each value equals to\n scalar_value \"\"\"\n return isinstance(node, Constant) and node.get_precision().is_vector_format() and node.get_value() == [scalar_value] * node.get_precision().get_vector_size()\n\n\ndef extract_tables(node):\n \"\"\" extract the set of all ML_Table nodes in the graph rooted at node \"\"\"\n processed_set = set([node])\n table_set = set()\n working_set = [node]\n while working_set:\n elt = working_set.pop(0)\n if isinstance(elt, ML_NewTable):\n table_set.add(elt)\n elif not isinstance(elt, ML_LeafNode):\n for op_node in elt.inputs:\n if not op_node in processed_set:\n processed_set.add(op_node)\n working_set.append(op_node)\n return table_set\n",
"step-ids": [
14,
15,
16,
17,
18
]
}
|
[
14,
15,
16,
17,
18
] |
# terrascript/external/__init__.py
import terrascript
class external(terrascript.Provider):
pass
|
normal
|
{
"blob_id": "04e57739e6fb98cd237fbe09caecd17c728c1797",
"index": 5548,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass external(terrascript.Provider):\n pass\n",
"step-3": "import terrascript\n\n\nclass external(terrascript.Provider):\n pass\n",
"step-4": "# terrascript/external/__init__.py\n\nimport terrascript\n\nclass external(terrascript.Provider):\n pass",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.mark.parametrize(('server_type', 'expected_type'), DB_TYPES + [
param('GEOMETRY', dt.geometry, marks=[skipif_no_geospatial_deps]),
param('GEOGRAPHY', dt.geography, marks=[skipif_no_geospatial_deps])] +
[param('DATETIME2(4)', dt.timestamp(scale=4), marks=[
broken_sqlalchemy_autoload]), param('DATETIMEOFFSET(5)', dt.timestamp(
scale=5, timezone='UTC'), marks=[broken_sqlalchemy_autoload])], ids=str)
def test_get_schema_from_query(con, server_type, expected_type, temp_table):
expected_schema = ibis.schema(dict(x=expected_type))
with con.begin() as c:
c.exec_driver_sql(f'CREATE TABLE [{temp_table}] (x {server_type})')
expected_schema = ibis.schema(dict(x=expected_type))
result_schema = con._get_schema_using_query(f'SELECT * FROM [{temp_table}]'
)
assert result_schema == expected_schema
t = con.table(temp_table)
assert t.schema() == expected_schema
<|reserved_special_token_1|>
<|reserved_special_token_0|>
DB_TYPES = [('BIGINT', dt.int64), ('BIT', dt.boolean), ('DECIMAL', dt.
Decimal(precision=18, scale=0)), ('DECIMAL(5, 2)', dt.Decimal(precision
=5, scale=2)), ('INT', dt.int32), ('MONEY', dt.int64), ('NUMERIC', dt.
Decimal(18, 0)), ('NUMERIC(10,5)', dt.Decimal(10, 5)), ('NUMERIC(14,3)',
dt.Decimal(14, 3)), ('SMALLINT', dt.int16), ('SMALLMONEY', dt.int32), (
'TINYINT', dt.int8), ('REAL', dt.float32), ('FLOAT', dt.float64), (
'FLOAT(3)', dt.float32), ('FLOAT(25)', dt.float64), ('DATE', dt.date),
('TIME', dt.time), ('DATETIME2', dt.timestamp(scale=7)), (
'DATETIMEOFFSET', dt.timestamp(scale=7, timezone='UTC')), (
'SMALLDATETIME', dt.timestamp), ('DATETIME', dt.timestamp), ('CHAR', dt
.string), ('TEXT', dt.string), ('VARCHAR', dt.string), ('NCHAR', dt.
string), ('NTEXT', dt.string), ('NVARCHAR', dt.string), ('BINARY', dt.
binary), ('VARBINARY', dt.binary), ('IMAGE', dt.binary), (
'UNIQUEIDENTIFIER', dt.uuid), ('TIMESTAMP', dt.binary(nullable=False))]
skipif_no_geospatial_deps = pytest.mark.skipif(not geospatial_supported,
reason='geospatial dependencies not installed')
broken_sqlalchemy_autoload = pytest.mark.xfail(reason=
'scale not inferred by sqlalchemy autoload')
@pytest.mark.parametrize(('server_type', 'expected_type'), DB_TYPES + [
param('GEOMETRY', dt.geometry, marks=[skipif_no_geospatial_deps]),
param('GEOGRAPHY', dt.geography, marks=[skipif_no_geospatial_deps])] +
[param('DATETIME2(4)', dt.timestamp(scale=4), marks=[
broken_sqlalchemy_autoload]), param('DATETIMEOFFSET(5)', dt.timestamp(
scale=5, timezone='UTC'), marks=[broken_sqlalchemy_autoload])], ids=str)
def test_get_schema_from_query(con, server_type, expected_type, temp_table):
expected_schema = ibis.schema(dict(x=expected_type))
with con.begin() as c:
c.exec_driver_sql(f'CREATE TABLE [{temp_table}] (x {server_type})')
expected_schema = ibis.schema(dict(x=expected_type))
result_schema = con._get_schema_using_query(f'SELECT * FROM [{temp_table}]'
)
assert result_schema == expected_schema
t = con.table(temp_table)
assert t.schema() == expected_schema
<|reserved_special_token_1|>
from __future__ import annotations
import pytest
from pytest import param
import ibis
import ibis.expr.datatypes as dt
from ibis.backends.base.sql.alchemy.geospatial import geospatial_supported
DB_TYPES = [('BIGINT', dt.int64), ('BIT', dt.boolean), ('DECIMAL', dt.
Decimal(precision=18, scale=0)), ('DECIMAL(5, 2)', dt.Decimal(precision
=5, scale=2)), ('INT', dt.int32), ('MONEY', dt.int64), ('NUMERIC', dt.
Decimal(18, 0)), ('NUMERIC(10,5)', dt.Decimal(10, 5)), ('NUMERIC(14,3)',
dt.Decimal(14, 3)), ('SMALLINT', dt.int16), ('SMALLMONEY', dt.int32), (
'TINYINT', dt.int8), ('REAL', dt.float32), ('FLOAT', dt.float64), (
'FLOAT(3)', dt.float32), ('FLOAT(25)', dt.float64), ('DATE', dt.date),
('TIME', dt.time), ('DATETIME2', dt.timestamp(scale=7)), (
'DATETIMEOFFSET', dt.timestamp(scale=7, timezone='UTC')), (
'SMALLDATETIME', dt.timestamp), ('DATETIME', dt.timestamp), ('CHAR', dt
.string), ('TEXT', dt.string), ('VARCHAR', dt.string), ('NCHAR', dt.
string), ('NTEXT', dt.string), ('NVARCHAR', dt.string), ('BINARY', dt.
binary), ('VARBINARY', dt.binary), ('IMAGE', dt.binary), (
'UNIQUEIDENTIFIER', dt.uuid), ('TIMESTAMP', dt.binary(nullable=False))]
skipif_no_geospatial_deps = pytest.mark.skipif(not geospatial_supported,
reason='geospatial dependencies not installed')
broken_sqlalchemy_autoload = pytest.mark.xfail(reason=
'scale not inferred by sqlalchemy autoload')
@pytest.mark.parametrize(('server_type', 'expected_type'), DB_TYPES + [
param('GEOMETRY', dt.geometry, marks=[skipif_no_geospatial_deps]),
param('GEOGRAPHY', dt.geography, marks=[skipif_no_geospatial_deps])] +
[param('DATETIME2(4)', dt.timestamp(scale=4), marks=[
broken_sqlalchemy_autoload]), param('DATETIMEOFFSET(5)', dt.timestamp(
scale=5, timezone='UTC'), marks=[broken_sqlalchemy_autoload])], ids=str)
def test_get_schema_from_query(con, server_type, expected_type, temp_table):
expected_schema = ibis.schema(dict(x=expected_type))
with con.begin() as c:
c.exec_driver_sql(f'CREATE TABLE [{temp_table}] (x {server_type})')
expected_schema = ibis.schema(dict(x=expected_type))
result_schema = con._get_schema_using_query(f'SELECT * FROM [{temp_table}]'
)
assert result_schema == expected_schema
t = con.table(temp_table)
assert t.schema() == expected_schema
<|reserved_special_token_1|>
from __future__ import annotations
import pytest
from pytest import param
import ibis
import ibis.expr.datatypes as dt
from ibis.backends.base.sql.alchemy.geospatial import geospatial_supported
DB_TYPES = [
# Exact numbers
("BIGINT", dt.int64),
("BIT", dt.boolean),
("DECIMAL", dt.Decimal(precision=18, scale=0)),
("DECIMAL(5, 2)", dt.Decimal(precision=5, scale=2)),
("INT", dt.int32),
("MONEY", dt.int64),
("NUMERIC", dt.Decimal(18, 0)),
("NUMERIC(10,5)", dt.Decimal(10, 5)),
("NUMERIC(14,3)", dt.Decimal(14, 3)),
("SMALLINT", dt.int16),
("SMALLMONEY", dt.int32),
("TINYINT", dt.int8),
# Approximate numerics
("REAL", dt.float32),
("FLOAT", dt.float64),
("FLOAT(3)", dt.float32),
("FLOAT(25)", dt.float64),
# Date and time
("DATE", dt.date),
("TIME", dt.time),
("DATETIME2", dt.timestamp(scale=7)),
("DATETIMEOFFSET", dt.timestamp(scale=7, timezone="UTC")),
("SMALLDATETIME", dt.timestamp),
("DATETIME", dt.timestamp),
# Characters strings
("CHAR", dt.string),
("TEXT", dt.string),
("VARCHAR", dt.string),
# Unicode character strings
("NCHAR", dt.string),
("NTEXT", dt.string),
("NVARCHAR", dt.string),
# Binary strings
("BINARY", dt.binary),
("VARBINARY", dt.binary),
("IMAGE", dt.binary),
# Other data types
("UNIQUEIDENTIFIER", dt.uuid),
("TIMESTAMP", dt.binary(nullable=False)),
]
skipif_no_geospatial_deps = pytest.mark.skipif(
not geospatial_supported, reason="geospatial dependencies not installed"
)
broken_sqlalchemy_autoload = pytest.mark.xfail(
reason="scale not inferred by sqlalchemy autoload"
)
@pytest.mark.parametrize(
("server_type", "expected_type"),
DB_TYPES
+ [
param("GEOMETRY", dt.geometry, marks=[skipif_no_geospatial_deps]),
param("GEOGRAPHY", dt.geography, marks=[skipif_no_geospatial_deps]),
]
+ [
param(
"DATETIME2(4)", dt.timestamp(scale=4), marks=[broken_sqlalchemy_autoload]
),
param(
"DATETIMEOFFSET(5)",
dt.timestamp(scale=5, timezone="UTC"),
marks=[broken_sqlalchemy_autoload],
),
],
ids=str,
)
def test_get_schema_from_query(con, server_type, expected_type, temp_table):
expected_schema = ibis.schema(dict(x=expected_type))
with con.begin() as c:
c.exec_driver_sql(f"CREATE TABLE [{temp_table}] (x {server_type})")
expected_schema = ibis.schema(dict(x=expected_type))
result_schema = con._get_schema_using_query(f"SELECT * FROM [{temp_table}]")
assert result_schema == expected_schema
t = con.table(temp_table)
assert t.schema() == expected_schema
|
flexible
|
{
"blob_id": "00e9872136e5753364117adbf60793e660c8bef0",
"index": 485,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@pytest.mark.parametrize(('server_type', 'expected_type'), DB_TYPES + [\n param('GEOMETRY', dt.geometry, marks=[skipif_no_geospatial_deps]),\n param('GEOGRAPHY', dt.geography, marks=[skipif_no_geospatial_deps])] +\n [param('DATETIME2(4)', dt.timestamp(scale=4), marks=[\n broken_sqlalchemy_autoload]), param('DATETIMEOFFSET(5)', dt.timestamp(\n scale=5, timezone='UTC'), marks=[broken_sqlalchemy_autoload])], ids=str)\ndef test_get_schema_from_query(con, server_type, expected_type, temp_table):\n expected_schema = ibis.schema(dict(x=expected_type))\n with con.begin() as c:\n c.exec_driver_sql(f'CREATE TABLE [{temp_table}] (x {server_type})')\n expected_schema = ibis.schema(dict(x=expected_type))\n result_schema = con._get_schema_using_query(f'SELECT * FROM [{temp_table}]'\n )\n assert result_schema == expected_schema\n t = con.table(temp_table)\n assert t.schema() == expected_schema\n",
"step-3": "<mask token>\nDB_TYPES = [('BIGINT', dt.int64), ('BIT', dt.boolean), ('DECIMAL', dt.\n Decimal(precision=18, scale=0)), ('DECIMAL(5, 2)', dt.Decimal(precision\n =5, scale=2)), ('INT', dt.int32), ('MONEY', dt.int64), ('NUMERIC', dt.\n Decimal(18, 0)), ('NUMERIC(10,5)', dt.Decimal(10, 5)), ('NUMERIC(14,3)',\n dt.Decimal(14, 3)), ('SMALLINT', dt.int16), ('SMALLMONEY', dt.int32), (\n 'TINYINT', dt.int8), ('REAL', dt.float32), ('FLOAT', dt.float64), (\n 'FLOAT(3)', dt.float32), ('FLOAT(25)', dt.float64), ('DATE', dt.date),\n ('TIME', dt.time), ('DATETIME2', dt.timestamp(scale=7)), (\n 'DATETIMEOFFSET', dt.timestamp(scale=7, timezone='UTC')), (\n 'SMALLDATETIME', dt.timestamp), ('DATETIME', dt.timestamp), ('CHAR', dt\n .string), ('TEXT', dt.string), ('VARCHAR', dt.string), ('NCHAR', dt.\n string), ('NTEXT', dt.string), ('NVARCHAR', dt.string), ('BINARY', dt.\n binary), ('VARBINARY', dt.binary), ('IMAGE', dt.binary), (\n 'UNIQUEIDENTIFIER', dt.uuid), ('TIMESTAMP', dt.binary(nullable=False))]\nskipif_no_geospatial_deps = pytest.mark.skipif(not geospatial_supported,\n reason='geospatial dependencies not installed')\nbroken_sqlalchemy_autoload = pytest.mark.xfail(reason=\n 'scale not inferred by sqlalchemy autoload')\n\n\n@pytest.mark.parametrize(('server_type', 'expected_type'), DB_TYPES + [\n param('GEOMETRY', dt.geometry, marks=[skipif_no_geospatial_deps]),\n param('GEOGRAPHY', dt.geography, marks=[skipif_no_geospatial_deps])] +\n [param('DATETIME2(4)', dt.timestamp(scale=4), marks=[\n broken_sqlalchemy_autoload]), param('DATETIMEOFFSET(5)', dt.timestamp(\n scale=5, timezone='UTC'), marks=[broken_sqlalchemy_autoload])], ids=str)\ndef test_get_schema_from_query(con, server_type, expected_type, temp_table):\n expected_schema = ibis.schema(dict(x=expected_type))\n with con.begin() as c:\n c.exec_driver_sql(f'CREATE TABLE [{temp_table}] (x {server_type})')\n expected_schema = ibis.schema(dict(x=expected_type))\n result_schema = con._get_schema_using_query(f'SELECT * FROM [{temp_table}]'\n )\n assert result_schema == expected_schema\n t = con.table(temp_table)\n assert t.schema() == expected_schema\n",
"step-4": "from __future__ import annotations\nimport pytest\nfrom pytest import param\nimport ibis\nimport ibis.expr.datatypes as dt\nfrom ibis.backends.base.sql.alchemy.geospatial import geospatial_supported\nDB_TYPES = [('BIGINT', dt.int64), ('BIT', dt.boolean), ('DECIMAL', dt.\n Decimal(precision=18, scale=0)), ('DECIMAL(5, 2)', dt.Decimal(precision\n =5, scale=2)), ('INT', dt.int32), ('MONEY', dt.int64), ('NUMERIC', dt.\n Decimal(18, 0)), ('NUMERIC(10,5)', dt.Decimal(10, 5)), ('NUMERIC(14,3)',\n dt.Decimal(14, 3)), ('SMALLINT', dt.int16), ('SMALLMONEY', dt.int32), (\n 'TINYINT', dt.int8), ('REAL', dt.float32), ('FLOAT', dt.float64), (\n 'FLOAT(3)', dt.float32), ('FLOAT(25)', dt.float64), ('DATE', dt.date),\n ('TIME', dt.time), ('DATETIME2', dt.timestamp(scale=7)), (\n 'DATETIMEOFFSET', dt.timestamp(scale=7, timezone='UTC')), (\n 'SMALLDATETIME', dt.timestamp), ('DATETIME', dt.timestamp), ('CHAR', dt\n .string), ('TEXT', dt.string), ('VARCHAR', dt.string), ('NCHAR', dt.\n string), ('NTEXT', dt.string), ('NVARCHAR', dt.string), ('BINARY', dt.\n binary), ('VARBINARY', dt.binary), ('IMAGE', dt.binary), (\n 'UNIQUEIDENTIFIER', dt.uuid), ('TIMESTAMP', dt.binary(nullable=False))]\nskipif_no_geospatial_deps = pytest.mark.skipif(not geospatial_supported,\n reason='geospatial dependencies not installed')\nbroken_sqlalchemy_autoload = pytest.mark.xfail(reason=\n 'scale not inferred by sqlalchemy autoload')\n\n\n@pytest.mark.parametrize(('server_type', 'expected_type'), DB_TYPES + [\n param('GEOMETRY', dt.geometry, marks=[skipif_no_geospatial_deps]),\n param('GEOGRAPHY', dt.geography, marks=[skipif_no_geospatial_deps])] +\n [param('DATETIME2(4)', dt.timestamp(scale=4), marks=[\n broken_sqlalchemy_autoload]), param('DATETIMEOFFSET(5)', dt.timestamp(\n scale=5, timezone='UTC'), marks=[broken_sqlalchemy_autoload])], ids=str)\ndef test_get_schema_from_query(con, server_type, expected_type, temp_table):\n expected_schema = ibis.schema(dict(x=expected_type))\n with con.begin() as c:\n c.exec_driver_sql(f'CREATE TABLE [{temp_table}] (x {server_type})')\n expected_schema = ibis.schema(dict(x=expected_type))\n result_schema = con._get_schema_using_query(f'SELECT * FROM [{temp_table}]'\n )\n assert result_schema == expected_schema\n t = con.table(temp_table)\n assert t.schema() == expected_schema\n",
"step-5": "from __future__ import annotations\n\nimport pytest\nfrom pytest import param\n\nimport ibis\nimport ibis.expr.datatypes as dt\nfrom ibis.backends.base.sql.alchemy.geospatial import geospatial_supported\n\nDB_TYPES = [\n # Exact numbers\n (\"BIGINT\", dt.int64),\n (\"BIT\", dt.boolean),\n (\"DECIMAL\", dt.Decimal(precision=18, scale=0)),\n (\"DECIMAL(5, 2)\", dt.Decimal(precision=5, scale=2)),\n (\"INT\", dt.int32),\n (\"MONEY\", dt.int64),\n (\"NUMERIC\", dt.Decimal(18, 0)),\n (\"NUMERIC(10,5)\", dt.Decimal(10, 5)),\n (\"NUMERIC(14,3)\", dt.Decimal(14, 3)),\n (\"SMALLINT\", dt.int16),\n (\"SMALLMONEY\", dt.int32),\n (\"TINYINT\", dt.int8),\n # Approximate numerics\n (\"REAL\", dt.float32),\n (\"FLOAT\", dt.float64),\n (\"FLOAT(3)\", dt.float32),\n (\"FLOAT(25)\", dt.float64),\n # Date and time\n (\"DATE\", dt.date),\n (\"TIME\", dt.time),\n (\"DATETIME2\", dt.timestamp(scale=7)),\n (\"DATETIMEOFFSET\", dt.timestamp(scale=7, timezone=\"UTC\")),\n (\"SMALLDATETIME\", dt.timestamp),\n (\"DATETIME\", dt.timestamp),\n # Characters strings\n (\"CHAR\", dt.string),\n (\"TEXT\", dt.string),\n (\"VARCHAR\", dt.string),\n # Unicode character strings\n (\"NCHAR\", dt.string),\n (\"NTEXT\", dt.string),\n (\"NVARCHAR\", dt.string),\n # Binary strings\n (\"BINARY\", dt.binary),\n (\"VARBINARY\", dt.binary),\n (\"IMAGE\", dt.binary),\n # Other data types\n (\"UNIQUEIDENTIFIER\", dt.uuid),\n (\"TIMESTAMP\", dt.binary(nullable=False)),\n]\n\n\nskipif_no_geospatial_deps = pytest.mark.skipif(\n not geospatial_supported, reason=\"geospatial dependencies not installed\"\n)\n\nbroken_sqlalchemy_autoload = pytest.mark.xfail(\n reason=\"scale not inferred by sqlalchemy autoload\"\n)\n\n\n@pytest.mark.parametrize(\n (\"server_type\", \"expected_type\"),\n DB_TYPES\n + [\n param(\"GEOMETRY\", dt.geometry, marks=[skipif_no_geospatial_deps]),\n param(\"GEOGRAPHY\", dt.geography, marks=[skipif_no_geospatial_deps]),\n ]\n + [\n param(\n \"DATETIME2(4)\", dt.timestamp(scale=4), marks=[broken_sqlalchemy_autoload]\n ),\n param(\n \"DATETIMEOFFSET(5)\",\n dt.timestamp(scale=5, timezone=\"UTC\"),\n marks=[broken_sqlalchemy_autoload],\n ),\n ],\n ids=str,\n)\ndef test_get_schema_from_query(con, server_type, expected_type, temp_table):\n expected_schema = ibis.schema(dict(x=expected_type))\n with con.begin() as c:\n c.exec_driver_sql(f\"CREATE TABLE [{temp_table}] (x {server_type})\")\n expected_schema = ibis.schema(dict(x=expected_type))\n result_schema = con._get_schema_using_query(f\"SELECT * FROM [{temp_table}]\")\n assert result_schema == expected_schema\n t = con.table(temp_table)\n assert t.schema() == expected_schema\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='DiscounInfo', fields=[('id',
models.BigIntegerField(primary_key=True, serialize=False)), (
'title', models.CharField(max_length=500)), ('lkurl', models.
CharField(max_length=500)), ('imgurl', models.CharField(max_length=
500))])]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='DiscounInfo', fields=[('id',
models.BigIntegerField(primary_key=True, serialize=False)), (
'title', models.CharField(max_length=500)), ('lkurl', models.
CharField(max_length=500)), ('imgurl', models.CharField(max_length=
500))])]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-01-26 05:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DiscounInfo',
fields=[
('id', models.BigIntegerField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=500)),
('lkurl', models.CharField(max_length=500)),
('imgurl', models.CharField(max_length=500)),
],
),
]
|
flexible
|
{
"blob_id": "957db647500433fd73723fdeb3933037ba0641b1",
"index": 1527,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='DiscounInfo', fields=[('id',\n models.BigIntegerField(primary_key=True, serialize=False)), (\n 'title', models.CharField(max_length=500)), ('lkurl', models.\n CharField(max_length=500)), ('imgurl', models.CharField(max_length=\n 500))])]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='DiscounInfo', fields=[('id',\n models.BigIntegerField(primary_key=True, serialize=False)), (\n 'title', models.CharField(max_length=500)), ('lkurl', models.\n CharField(max_length=500)), ('imgurl', models.CharField(max_length=\n 500))])]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.7 on 2018-01-26 05:04\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='DiscounInfo',\n fields=[\n ('id', models.BigIntegerField(primary_key=True, serialize=False)),\n ('title', models.CharField(max_length=500)),\n ('lkurl', models.CharField(max_length=500)),\n ('imgurl', models.CharField(max_length=500)),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Imports
import numpy as np
from ctf.functions2d.function2d import Function2D
# Problem
class StyblinskiTang(Function2D):
""" Styblinski-Tang Function. """
def __init__(self):
""" Constructor. """
# Information
self.min = np.array([-2.903534, -2.903534])
self.value = -39.16599*2.0
self.domain = np.array([[-5.0, 5.0], [-5.0, 5.0]])
self.n = 2
self.smooth = True
self.info = [True, True, True]
# Description
self.latex_name = "Styblinski-Tang Function"
self.latex_type = "Other"
self.latex_cost = r'\[ f(\mathbf{x}) = \frac{1}{2} \sum_{i=0}^{d-1} (x_i^4 - 16 x_i^2 + 5 x_i) \]'
self.latex_desc = "The local minima are separated by a local maximum. There is only a single global minimum."
def cost(self, x):
""" Cost function. """
# Cost
c = np.zeros(x.shape[1:])
# Calculate Cost
c = 0.5*(x[0]**4.0 - 16*x[0]**2.0 + 5.0*x[0] + x[1]**4.0 - 16*x[1]**2.0 + 5.0*x[1])
# Return Cost
return c
def grad(self, x):
""" Grad function. """
# Grad
g = np.zeros(x.shape)
# Calculate Grads
g[0] = -16.0*x[0]**1.0 + 2.0*x[0]**3.0 + 2.5
g[1] = -16.0*x[1]**1.0 + 2.0*x[1]**3.0 + 2.5
# Return Grad
return g
def hess(self, x):
""" Hess function. """
# Hess
h = np.zeros((2, 2) + x.shape[1:])
# Calculate Hess
h[0][0] = 6.0*x[0]**2.0 - 16.0
h[0][1] = 0.0
h[1][0] = h[0][1]
h[1][1] = 6.0*x[1]**2.0 - 16.0
# Return Hess
return h
|
normal
|
{
"blob_id": "5d8715dd02feff4e13919858051abeb5b6828011",
"index": 6798,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass StyblinskiTang(Function2D):\n <mask token>\n <mask token>\n <mask token>\n\n def grad(self, x):\n \"\"\" Grad function. \"\"\"\n g = np.zeros(x.shape)\n g[0] = -16.0 * x[0] ** 1.0 + 2.0 * x[0] ** 3.0 + 2.5\n g[1] = -16.0 * x[1] ** 1.0 + 2.0 * x[1] ** 3.0 + 2.5\n return g\n\n def hess(self, x):\n \"\"\" Hess function. \"\"\"\n h = np.zeros((2, 2) + x.shape[1:])\n h[0][0] = 6.0 * x[0] ** 2.0 - 16.0\n h[0][1] = 0.0\n h[1][0] = h[0][1]\n h[1][1] = 6.0 * x[1] ** 2.0 - 16.0\n return h\n",
"step-3": "<mask token>\n\n\nclass StyblinskiTang(Function2D):\n \"\"\" Styblinski-Tang Function. \"\"\"\n\n def __init__(self):\n \"\"\" Constructor. \"\"\"\n self.min = np.array([-2.903534, -2.903534])\n self.value = -39.16599 * 2.0\n self.domain = np.array([[-5.0, 5.0], [-5.0, 5.0]])\n self.n = 2\n self.smooth = True\n self.info = [True, True, True]\n self.latex_name = 'Styblinski-Tang Function'\n self.latex_type = 'Other'\n self.latex_cost = (\n '\\\\[ f(\\\\mathbf{x}) = \\\\frac{1}{2} \\\\sum_{i=0}^{d-1} (x_i^4 - 16 x_i^2 + 5 x_i) \\\\]'\n )\n self.latex_desc = (\n 'The local minima are separated by a local maximum. There is only a single global minimum.'\n )\n\n def cost(self, x):\n \"\"\" Cost function. \"\"\"\n c = np.zeros(x.shape[1:])\n c = 0.5 * (x[0] ** 4.0 - 16 * x[0] ** 2.0 + 5.0 * x[0] + x[1] ** \n 4.0 - 16 * x[1] ** 2.0 + 5.0 * x[1])\n return c\n\n def grad(self, x):\n \"\"\" Grad function. \"\"\"\n g = np.zeros(x.shape)\n g[0] = -16.0 * x[0] ** 1.0 + 2.0 * x[0] ** 3.0 + 2.5\n g[1] = -16.0 * x[1] ** 1.0 + 2.0 * x[1] ** 3.0 + 2.5\n return g\n\n def hess(self, x):\n \"\"\" Hess function. \"\"\"\n h = np.zeros((2, 2) + x.shape[1:])\n h[0][0] = 6.0 * x[0] ** 2.0 - 16.0\n h[0][1] = 0.0\n h[1][0] = h[0][1]\n h[1][1] = 6.0 * x[1] ** 2.0 - 16.0\n return h\n",
"step-4": "import numpy as np\nfrom ctf.functions2d.function2d import Function2D\n\n\nclass StyblinskiTang(Function2D):\n \"\"\" Styblinski-Tang Function. \"\"\"\n\n def __init__(self):\n \"\"\" Constructor. \"\"\"\n self.min = np.array([-2.903534, -2.903534])\n self.value = -39.16599 * 2.0\n self.domain = np.array([[-5.0, 5.0], [-5.0, 5.0]])\n self.n = 2\n self.smooth = True\n self.info = [True, True, True]\n self.latex_name = 'Styblinski-Tang Function'\n self.latex_type = 'Other'\n self.latex_cost = (\n '\\\\[ f(\\\\mathbf{x}) = \\\\frac{1}{2} \\\\sum_{i=0}^{d-1} (x_i^4 - 16 x_i^2 + 5 x_i) \\\\]'\n )\n self.latex_desc = (\n 'The local minima are separated by a local maximum. There is only a single global minimum.'\n )\n\n def cost(self, x):\n \"\"\" Cost function. \"\"\"\n c = np.zeros(x.shape[1:])\n c = 0.5 * (x[0] ** 4.0 - 16 * x[0] ** 2.0 + 5.0 * x[0] + x[1] ** \n 4.0 - 16 * x[1] ** 2.0 + 5.0 * x[1])\n return c\n\n def grad(self, x):\n \"\"\" Grad function. \"\"\"\n g = np.zeros(x.shape)\n g[0] = -16.0 * x[0] ** 1.0 + 2.0 * x[0] ** 3.0 + 2.5\n g[1] = -16.0 * x[1] ** 1.0 + 2.0 * x[1] ** 3.0 + 2.5\n return g\n\n def hess(self, x):\n \"\"\" Hess function. \"\"\"\n h = np.zeros((2, 2) + x.shape[1:])\n h[0][0] = 6.0 * x[0] ** 2.0 - 16.0\n h[0][1] = 0.0\n h[1][0] = h[0][1]\n h[1][1] = 6.0 * x[1] ** 2.0 - 16.0\n return h\n",
"step-5": "# Imports\nimport numpy as np\n\nfrom ctf.functions2d.function2d import Function2D\n\n\n\n# Problem\nclass StyblinskiTang(Function2D):\n \"\"\" Styblinski-Tang Function. \"\"\"\n\n def __init__(self):\n \"\"\" Constructor. \"\"\"\n # Information\n self.min = np.array([-2.903534, -2.903534])\n self.value = -39.16599*2.0\n self.domain = np.array([[-5.0, 5.0], [-5.0, 5.0]])\n self.n = 2\n self.smooth = True\n self.info = [True, True, True]\n # Description\n self.latex_name = \"Styblinski-Tang Function\"\n self.latex_type = \"Other\"\n self.latex_cost = r'\\[ f(\\mathbf{x}) = \\frac{1}{2} \\sum_{i=0}^{d-1} (x_i^4 - 16 x_i^2 + 5 x_i) \\]'\n self.latex_desc = \"The local minima are separated by a local maximum. There is only a single global minimum.\"\n\n def cost(self, x):\n \"\"\" Cost function. \"\"\"\n # Cost\n c = np.zeros(x.shape[1:])\n # Calculate Cost\n c = 0.5*(x[0]**4.0 - 16*x[0]**2.0 + 5.0*x[0] + x[1]**4.0 - 16*x[1]**2.0 + 5.0*x[1])\n # Return Cost\n return c\n\n def grad(self, x):\n \"\"\" Grad function. \"\"\"\n # Grad\n g = np.zeros(x.shape)\n # Calculate Grads\n g[0] = -16.0*x[0]**1.0 + 2.0*x[0]**3.0 + 2.5\n g[1] = -16.0*x[1]**1.0 + 2.0*x[1]**3.0 + 2.5\n # Return Grad\n return g\n\n def hess(self, x):\n \"\"\" Hess function. \"\"\"\n # Hess\n h = np.zeros((2, 2) + x.shape[1:])\n # Calculate Hess\n h[0][0] = 6.0*x[0]**2.0 - 16.0\n h[0][1] = 0.0\n h[1][0] = h[0][1]\n h[1][1] = 6.0*x[1]**2.0 - 16.0\n # Return Hess\n return h",
"step-ids": [
0,
3,
6,
7,
8
]
}
|
[
0,
3,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if 0 < cal < 2400:
print('Tuesday')
elif cal < 0:
print('Monday')
else:
print('Wednesday')
<|reserved_special_token_1|>
offset = input()
cal = 1030 + int(offset) * 100
if 0 < cal < 2400:
print('Tuesday')
elif cal < 0:
print('Monday')
else:
print('Wednesday')
|
flexible
|
{
"blob_id": "aefb49410e077180a660d17c4c646265a75969a7",
"index": 7509,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif 0 < cal < 2400:\n print('Tuesday')\nelif cal < 0:\n print('Monday')\nelse:\n print('Wednesday')\n",
"step-3": "offset = input()\ncal = 1030 + int(offset) * 100\nif 0 < cal < 2400:\n print('Tuesday')\nelif cal < 0:\n print('Monday')\nelse:\n print('Wednesday')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
@login_manager.user_loader
def load_user(id):
user = Student.query.get(int(id))
if user is not None:
return user
else:
return Instructor.query.get(int(id))
<|reserved_special_token_0|>
@app.route(base_url + 'instructorProfile', methods=['GET'])
def instructorhome():
return render_template('Instructor_Profile.html')
@app.route(base_url + 'Register', methods=['POST', 'GET'])
def createAccount():
if request.method == 'POST':
if request.form['options'] == 'STUDENT':
new_user = Student(request.form['first-name'], request.form[
'last-name'], request.form['email'], request.form['pwd'])
db.session.add(new_user)
db.session.commit()
db.session.refresh(new_user)
while Instructor.query.filter_by(id=new_user.id).first(
) is not None:
new_user.id = new_user.id + 1
db.session.commit()
db.session.refresh(new_user)
login_user(new_user)
return redirect(url_for('studenthome'))
elif request.form['options'] == 'INSTRUCTOR':
new_user = Instructor(request.form['first-name'], request.form[
'last-name'], request.form['email'], request.form['pwd'])
db.session.add(new_user)
db.session.commit()
db.session.refresh(new_user)
while Student.query.filter_by(id=new_user.id).first() is not None:
new_user.id = new_user.id + 1
db.session.commit()
db.session.refresh(new_user)
login_user(new_user)
return redirect(url_for('instructorhome'))
return redirect(url_for('login'))
@app.route(base_url + 'instructors', methods=['POST'])
def createInstructor():
instructor = Instructor(**request.json)
db.session.add(instructor)
db.session.commit()
db.session.refresh(instructor)
return jsonify({'status': 1, 'instructor': instructor_to_obj(instructor)}
), 200
@app.route(base_url + 'post', methods=['POST', 'GET'])
def post():
if request.method == 'POST':
new_job = Jobs(request.form['position'], request.form['Semester'],
request.form['pay'], request.form['gpa_required'])
db.session.add(new_job)
db.session.commit()
db.session.refresh(new_job)
return render_template('instructorPortal.html', applicates=
Job_Application.query.all())
@app.route(base_url + 'apply', methods=['POST', 'GET'])
@login_required
def apply():
if request.method == 'POST':
new_app = Job_Application(grade_recieved=request.form['Grade'],
Avalialability=request.form['Avalialability'], bio=request.form
['bio'], gpa_overall=request.form['gpa_overall'], job_status=
request.form['job_status'], owner=current_user)
new_app.job_status = 'Submited'
db.session.add(new_app)
db.session.commit()
db.session.refresh(new_app)
flash('Job Application successfully Submited')
return render_template('studenPortal.html', Jobs=Jobs.query.all(),
Appliedjobs=Job_Application.query.filter_by(id=current_user.id))
<|reserved_special_token_0|>
@app.route(base_url + 'updateApplication', methods=['POST'])
@login_required
def update_application(applicate):
if request.method == 'POST':
student = Student.query.filter_by(id=applicate.owner_id)
student.Job_Application.job_status = 'Rejected'
db, session.add(student)
db.session.commit()
db.session.refresh(student)
return render_template('instructorPortal.html', applicates=
Job_Application.query.all())
<|reserved_special_token_0|>
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('login'))
def main():
db.create_all()
app.run(debug=True)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@login_manager.user_loader
def load_user(id):
user = Student.query.get(int(id))
if user is not None:
return user
else:
return Instructor.query.get(int(id))
<|reserved_special_token_0|>
@app.route(base_url + 'instructorProfile', methods=['GET'])
def instructorhome():
return render_template('Instructor_Profile.html')
@app.route(base_url + 'Register', methods=['POST', 'GET'])
def createAccount():
if request.method == 'POST':
if request.form['options'] == 'STUDENT':
new_user = Student(request.form['first-name'], request.form[
'last-name'], request.form['email'], request.form['pwd'])
db.session.add(new_user)
db.session.commit()
db.session.refresh(new_user)
while Instructor.query.filter_by(id=new_user.id).first(
) is not None:
new_user.id = new_user.id + 1
db.session.commit()
db.session.refresh(new_user)
login_user(new_user)
return redirect(url_for('studenthome'))
elif request.form['options'] == 'INSTRUCTOR':
new_user = Instructor(request.form['first-name'], request.form[
'last-name'], request.form['email'], request.form['pwd'])
db.session.add(new_user)
db.session.commit()
db.session.refresh(new_user)
while Student.query.filter_by(id=new_user.id).first() is not None:
new_user.id = new_user.id + 1
db.session.commit()
db.session.refresh(new_user)
login_user(new_user)
return redirect(url_for('instructorhome'))
return redirect(url_for('login'))
@app.route(base_url + 'instructors', methods=['POST'])
def createInstructor():
instructor = Instructor(**request.json)
db.session.add(instructor)
db.session.commit()
db.session.refresh(instructor)
return jsonify({'status': 1, 'instructor': instructor_to_obj(instructor)}
), 200
@app.route(base_url + 'post', methods=['POST', 'GET'])
def post():
if request.method == 'POST':
new_job = Jobs(request.form['position'], request.form['Semester'],
request.form['pay'], request.form['gpa_required'])
db.session.add(new_job)
db.session.commit()
db.session.refresh(new_job)
return render_template('instructorPortal.html', applicates=
Job_Application.query.all())
@app.route(base_url + 'apply', methods=['POST', 'GET'])
@login_required
def apply():
if request.method == 'POST':
new_app = Job_Application(grade_recieved=request.form['Grade'],
Avalialability=request.form['Avalialability'], bio=request.form
['bio'], gpa_overall=request.form['gpa_overall'], job_status=
request.form['job_status'], owner=current_user)
new_app.job_status = 'Submited'
db.session.add(new_app)
db.session.commit()
db.session.refresh(new_app)
flash('Job Application successfully Submited')
return render_template('studenPortal.html', Jobs=Jobs.query.all(),
Appliedjobs=Job_Application.query.filter_by(id=current_user.id))
<|reserved_special_token_0|>
@app.route(base_url + 'updateApplication', methods=['POST'])
@login_required
def update_application(applicate):
if request.method == 'POST':
student = Student.query.filter_by(id=applicate.owner_id)
student.Job_Application.job_status = 'Rejected'
db, session.add(student)
db.session.commit()
db.session.refresh(student)
return render_template('instructorPortal.html', applicates=
Job_Application.query.all())
@app.route(base_url + 'cancel_Application', methods=['DELETE'])
@login_required
def cancel_application():
if request.method == 'DELETE':
job_position = request.form['job_name']
job_pos = current_user.jobs.filter_by(position=job_position)
db.session.delete(job_pos)
db.session.commit()
db.session.refresh()
return render_template('studenPortal.html', Jobs=Jobs.query.all(),
Appliedjobs=Job_Application.query.filter_by(id=current_user.id),
applied=Jobs.query.filter_by())
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('login'))
def main():
db.create_all()
app.run(debug=True)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@login_manager.user_loader
def load_user(id):
user = Student.query.get(int(id))
if user is not None:
return user
else:
return Instructor.query.get(int(id))
@app.route(base_url, methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('studenthome'))
form = LoginForm()
if form.validate_on_submit():
user = Student.query.filter_by(email=form.email.data).first()
if user is not None and user.check_password(form.password.data):
login_user(user, remember=form.remember_me.data)
return redirect(url_for('apply'))
user = Instructor.query.filter_by(email=form.email.data).first()
if user is not None and user.check_password(form.password.data):
login_user(user, remember=form.remember_me.data)
return redirect(url_for('post'))
flash('Invalid username or password')
return redirect(url_for('login'))
return render_template('mainpage.html', title='Sign In', form=form)
@app.route(base_url + 'studentProfile', methods=['GET'])
def studenthome():
return render_template('student_Profile.html')
@app.route(base_url + 'instructorProfile', methods=['GET'])
def instructorhome():
return render_template('Instructor_Profile.html')
@app.route(base_url + 'Register', methods=['POST', 'GET'])
def createAccount():
if request.method == 'POST':
if request.form['options'] == 'STUDENT':
new_user = Student(request.form['first-name'], request.form[
'last-name'], request.form['email'], request.form['pwd'])
db.session.add(new_user)
db.session.commit()
db.session.refresh(new_user)
while Instructor.query.filter_by(id=new_user.id).first(
) is not None:
new_user.id = new_user.id + 1
db.session.commit()
db.session.refresh(new_user)
login_user(new_user)
return redirect(url_for('studenthome'))
elif request.form['options'] == 'INSTRUCTOR':
new_user = Instructor(request.form['first-name'], request.form[
'last-name'], request.form['email'], request.form['pwd'])
db.session.add(new_user)
db.session.commit()
db.session.refresh(new_user)
while Student.query.filter_by(id=new_user.id).first() is not None:
new_user.id = new_user.id + 1
db.session.commit()
db.session.refresh(new_user)
login_user(new_user)
return redirect(url_for('instructorhome'))
return redirect(url_for('login'))
@app.route(base_url + 'instructors', methods=['POST'])
def createInstructor():
instructor = Instructor(**request.json)
db.session.add(instructor)
db.session.commit()
db.session.refresh(instructor)
return jsonify({'status': 1, 'instructor': instructor_to_obj(instructor)}
), 200
@app.route(base_url + 'post', methods=['POST', 'GET'])
def post():
if request.method == 'POST':
new_job = Jobs(request.form['position'], request.form['Semester'],
request.form['pay'], request.form['gpa_required'])
db.session.add(new_job)
db.session.commit()
db.session.refresh(new_job)
return render_template('instructorPortal.html', applicates=
Job_Application.query.all())
@app.route(base_url + 'apply', methods=['POST', 'GET'])
@login_required
def apply():
if request.method == 'POST':
new_app = Job_Application(grade_recieved=request.form['Grade'],
Avalialability=request.form['Avalialability'], bio=request.form
['bio'], gpa_overall=request.form['gpa_overall'], job_status=
request.form['job_status'], owner=current_user)
new_app.job_status = 'Submited'
db.session.add(new_app)
db.session.commit()
db.session.refresh(new_app)
flash('Job Application successfully Submited')
return render_template('studenPortal.html', Jobs=Jobs.query.all(),
Appliedjobs=Job_Application.query.filter_by(id=current_user.id))
@app.route(base_url + 'students_edit', methods=['GET', 'POST'])
@login_required
def editStudent():
if request.method == 'POST':
current_user.gpa = request.form['editGpa']
current_user.major = request.form['editMajor']
db.session.add(current_user)
db.session.commit()
db.session.refresh(current_user)
return render_template('student_Profile.html', current_user=
current_user)
return render_template('student_Profile.html', current_user=current_user)
<|reserved_special_token_0|>
@app.route(base_url + 'updateApplication', methods=['POST'])
@login_required
def update_application(applicate):
if request.method == 'POST':
student = Student.query.filter_by(id=applicate.owner_id)
student.Job_Application.job_status = 'Rejected'
db, session.add(student)
db.session.commit()
db.session.refresh(student)
return render_template('instructorPortal.html', applicates=
Job_Application.query.all())
@app.route(base_url + 'cancel_Application', methods=['DELETE'])
@login_required
def cancel_application():
if request.method == 'DELETE':
job_position = request.form['job_name']
job_pos = current_user.jobs.filter_by(position=job_position)
db.session.delete(job_pos)
db.session.commit()
db.session.refresh()
return render_template('studenPortal.html', Jobs=Jobs.query.all(),
Appliedjobs=Job_Application.query.filter_by(id=current_user.id),
applied=Jobs.query.filter_by())
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('login'))
def main():
db.create_all()
app.run(debug=True)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import os
import flask_sqlalchemy as sqlalchemy
from flask import Flask, jsonify, request, render_template, redirect, url_for, json, flash
from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class
from flask_cors import CORS
import datetime
from flask_bootstrap import Bootstrap
from flask_login import LoginManager, current_user, login_user, logout_user, login_required
from flask_login import UserMixin
from hashlib import md5
from database.models import *
app = Flask(__name__, static_url_path='/static')
app.debug = True
CORS(app)
login_manager = LoginManager()
login_manager.init_app(app)
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///sqlalchemy-demo.db'
app.config['SECRET_KEY'] = 'Thisissupposedtobesecret!'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
bootstrap = Bootstrap(app)
app.config.update(DEBUG=True)
db = sqlalchemy.SQLAlchemy(app)
base_url = '/api/'
@login_manager.user_loader
def load_user(id):
user = Student.query.get(int(id))
if user is not None:
return user
else:
return Instructor.query.get(int(id))
@app.route(base_url, methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('studenthome'))
form = LoginForm()
if form.validate_on_submit():
user = Student.query.filter_by(email=form.email.data).first()
if user is not None and user.check_password(form.password.data):
login_user(user, remember=form.remember_me.data)
return redirect(url_for('apply'))
user = Instructor.query.filter_by(email=form.email.data).first()
if user is not None and user.check_password(form.password.data):
login_user(user, remember=form.remember_me.data)
return redirect(url_for('post'))
flash('Invalid username or password')
return redirect(url_for('login'))
return render_template('mainpage.html', title='Sign In', form=form)
@app.route(base_url + 'studentProfile', methods=['GET'])
def studenthome():
return render_template('student_Profile.html')
@app.route(base_url + 'instructorProfile', methods=['GET'])
def instructorhome():
return render_template('Instructor_Profile.html')
@app.route(base_url + 'Register', methods=['POST', 'GET'])
def createAccount():
if request.method == 'POST':
if request.form['options'] == 'STUDENT':
new_user = Student(request.form['first-name'], request.form[
'last-name'], request.form['email'], request.form['pwd'])
db.session.add(new_user)
db.session.commit()
db.session.refresh(new_user)
while Instructor.query.filter_by(id=new_user.id).first(
) is not None:
new_user.id = new_user.id + 1
db.session.commit()
db.session.refresh(new_user)
login_user(new_user)
return redirect(url_for('studenthome'))
elif request.form['options'] == 'INSTRUCTOR':
new_user = Instructor(request.form['first-name'], request.form[
'last-name'], request.form['email'], request.form['pwd'])
db.session.add(new_user)
db.session.commit()
db.session.refresh(new_user)
while Student.query.filter_by(id=new_user.id).first() is not None:
new_user.id = new_user.id + 1
db.session.commit()
db.session.refresh(new_user)
login_user(new_user)
return redirect(url_for('instructorhome'))
return redirect(url_for('login'))
@app.route(base_url + 'instructors', methods=['POST'])
def createInstructor():
instructor = Instructor(**request.json)
db.session.add(instructor)
db.session.commit()
db.session.refresh(instructor)
return jsonify({'status': 1, 'instructor': instructor_to_obj(instructor)}
), 200
@app.route(base_url + 'post', methods=['POST', 'GET'])
def post():
if request.method == 'POST':
new_job = Jobs(request.form['position'], request.form['Semester'],
request.form['pay'], request.form['gpa_required'])
db.session.add(new_job)
db.session.commit()
db.session.refresh(new_job)
return render_template('instructorPortal.html', applicates=
Job_Application.query.all())
@app.route(base_url + 'apply', methods=['POST', 'GET'])
@login_required
def apply():
if request.method == 'POST':
new_app = Job_Application(grade_recieved=request.form['Grade'],
Avalialability=request.form['Avalialability'], bio=request.form
['bio'], gpa_overall=request.form['gpa_overall'], job_status=
request.form['job_status'], owner=current_user)
new_app.job_status = 'Submited'
db.session.add(new_app)
db.session.commit()
db.session.refresh(new_app)
flash('Job Application successfully Submited')
return render_template('studenPortal.html', Jobs=Jobs.query.all(),
Appliedjobs=Job_Application.query.filter_by(id=current_user.id))
@app.route(base_url + 'students_edit', methods=['GET', 'POST'])
@login_required
def editStudent():
if request.method == 'POST':
current_user.gpa = request.form['editGpa']
current_user.major = request.form['editMajor']
db.session.add(current_user)
db.session.commit()
db.session.refresh(current_user)
return render_template('student_Profile.html', current_user=
current_user)
return render_template('student_Profile.html', current_user=current_user)
@app.route(base_url + 'instructors_edit', methods=['GET', 'POST'])
@login_required
def editInstructor():
if request.method == 'POST':
current_user.email = request.form['editEmail']
current_user.phone = request.form['editPhone']
current_user.office = request.form['editOffice']
db.session.add(current_user)
db.session.commit()
db.session.refresh(current_user)
return render_template('Instructor_Profile.html', current_user=
current_user)
return render_template('Instructor_Profile.html', current_user=current_user
)
@app.route(base_url + 'updateApplication', methods=['POST'])
@login_required
def update_application(applicate):
if request.method == 'POST':
student = Student.query.filter_by(id=applicate.owner_id)
student.Job_Application.job_status = 'Rejected'
db, session.add(student)
db.session.commit()
db.session.refresh(student)
return render_template('instructorPortal.html', applicates=
Job_Application.query.all())
@app.route(base_url + 'cancel_Application', methods=['DELETE'])
@login_required
def cancel_application():
if request.method == 'DELETE':
job_position = request.form['job_name']
job_pos = current_user.jobs.filter_by(position=job_position)
db.session.delete(job_pos)
db.session.commit()
db.session.refresh()
return render_template('studenPortal.html', Jobs=Jobs.query.all(),
Appliedjobs=Job_Application.query.filter_by(id=current_user.id),
applied=Jobs.query.filter_by())
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('login'))
def main():
db.create_all()
app.run(debug=True)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import os
import flask_sqlalchemy as sqlalchemy
from flask import Flask, jsonify, request,render_template,redirect,url_for,json,flash
from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class
from flask_cors import CORS
import datetime
from flask_bootstrap import Bootstrap
from flask_login import LoginManager,current_user, login_user,logout_user, login_required
from flask_login import UserMixin
from hashlib import md5
from database.models import *
#from sqlalchemy_imageattach.entity import Image, image_attachment
app = Flask(__name__,static_url_path='/static')
app.debug = True
CORS(app)
login_manager = LoginManager()
login_manager.init_app(app)
#UPLOAD_FOLDER = '../static/templates'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///sqlalchemy-demo.db'
app.config['SECRET_KEY'] = 'Thisissupposedtobesecret!'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
#app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
bootstrap = Bootstrap(app)
app.config.update(DEBUG=True)
db = sqlalchemy.SQLAlchemy(app)
base_url = '/api/'
@login_manager.user_loader
def load_user(id):
user = Student.query.get(int(id))
if user is not None:
return user
else:
return Instructor.query.get(int(id))
@app.route(base_url, methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('studenthome'))
form = LoginForm()
if form.validate_on_submit():
user = Student.query.filter_by(email=form.email.data).first()
# Login Student
if user is not None and user.check_password(form.password.data):
login_user(user, remember=form.remember_me.data)
return redirect(url_for('apply'))
user = Instructor.query.filter_by(email=form.email.data).first()
# Login Instructor
if user is not None and user.check_password(form.password.data):
login_user(user, remember=form.remember_me.data)
return redirect(url_for('post'))
# Login failed
flash('Invalid username or password')
return redirect(url_for('login'))
return render_template('mainpage.html', title='Sign In', form=form)
# Route to student Profile
@app.route(base_url + 'studentProfile', methods=['GET'])
def studenthome():
return render_template('student_Profile.html')
# Route to Instructor Profile
@app.route(base_url + 'instructorProfile', methods=['GET'])
def instructorhome():
return render_template('Instructor_Profile.html')
# Route to create a student account and main page
@app.route(base_url + 'Register', methods=['POST','GET'])
def createAccount():
if request.method == 'POST':
# Student option is checked
if request.form['options'] == 'STUDENT':
new_user = Student(request.form['first-name'],request.form['last-name'],request.form['email'],request.form['pwd'])
db.session.add(new_user)
db.session.commit()
db.session.refresh(new_user)
# Make sure id is unique
while Instructor.query.filter_by(id=new_user.id).first() is not None:
new_user.id = new_user.id + 1
db.session.commit()
db.session.refresh(new_user)
login_user(new_user)
return redirect(url_for('studenthome'))
# Instructor option is checked
elif request.form['options'] == 'INSTRUCTOR':
new_user = Instructor(request.form['first-name'],request.form['last-name'],request.form['email'],request.form['pwd'])
db.session.add(new_user)
db.session.commit()
db.session.refresh(new_user)
# Make sure id is unique
while Student.query.filter_by(id=new_user.id).first() is not None:
new_user.id = new_user.id + 1
db.session.commit()
db.session.refresh(new_user)
login_user(new_user)
return redirect(url_for('instructorhome'))
return redirect(url_for('login'))
#return render_template('studenPortal.html', Jobs = Jobs.query.all())
# Route to create a instructor account
@app.route(base_url + 'instructors', methods=['POST'])
def createInstructor():
instructor = Instructor(**request.json)
db.session.add(instructor)
db.session.commit()
db.session.refresh(instructor)
return jsonify({"status": 1, "instructor": instructor_to_obj(instructor)}), 200
# Route to post a job for Instructors
@app.route(base_url + 'post', methods=['POST','GET'])
#@login_required
def post():
if request.method == 'POST':
new_job = Jobs(request.form['position'],request.form['Semester'],request.form['pay'],request.form['gpa_required'])
db.session.add(new_job)
db.session.commit()
db.session.refresh(new_job)
#,applicates = Job_Application.query.all()
return render_template('instructorPortal.html',applicates = Job_Application.query.all())
# Route to Display jobs for students
@app.route(base_url + 'apply', methods=['POST','GET'])
@login_required
def apply():
if request.method == 'POST':
#temp_student = Student(first_name=current_user.first_name,last_name=current_user.last_name,email=current_user.email,password=current_user.password)
#db.session.add(temp_student)
#db.session.commit()
new_app = Job_Application(grade_recieved=request.form['Grade'],Avalialability=request.form['Avalialability'],bio=request.form['bio'],gpa_overall=request.form['gpa_overall'],job_status=request.form['job_status'],owner=current_user)
new_app.job_status = "Submited"
#new_app = Job_Application(owner=temp_student)
db.session.add(new_app)
db.session.commit()
db.session.refresh(new_app)
flash("Job Application successfully Submited")
return render_template('studenPortal.html', Jobs = Jobs.query.all(),Appliedjobs = Job_Application.query.filter_by(id=current_user.id))
# Route to edit info in a student account
# Edit ONLY major, gpa and grad_date
@app.route(base_url + 'students_edit', methods=['GET', 'POST'])
@login_required
def editStudent():
if request.method == 'POST':
current_user.gpa = request.form['editGpa']
current_user.major = request.form['editMajor']
db.session.add(current_user)
db.session.commit()
db.session.refresh(current_user)
return render_template('student_Profile.html',current_user=current_user)
return render_template('student_Profile.html',current_user=current_user)
# Route to edit info in an Instructor account
# Edit ONLY email, office, and phone
@app.route(base_url + 'instructors_edit', methods=['GET', 'POST'])
@login_required
def editInstructor():
if request.method == 'POST':
current_user.email = request.form['editEmail']
current_user.phone = request.form['editPhone']
current_user.office = request.form['editOffice']
db.session.add(current_user)
db.session.commit()
db.session.refresh(current_user)
return render_template('Instructor_Profile.html',current_user=current_user)
return render_template('Instructor_Profile.html',current_user=current_user)
# Route to update Student Application
@app.route(base_url + 'updateApplication', methods=['POST'])
@login_required
def update_application(applicate):
if request.method == 'POST':
student = Student.query.filter_by(id =applicate.owner_id)
student.Job_Application.job_status = "Rejected"
db,session.add(student)
db.session.commit()
db.session.refresh(student)
return render_template('instructorPortal.html',applicates = Job_Application.query.all())
# Route to Delete student Application
@app.route(base_url + 'cancel_Application', methods=['DELETE'])
@login_required
def cancel_application():
if request.method == 'DELETE':
job_position = request.form['job_name']
job_pos = current_user.jobs.filter_by(position=job_position)
db.session.delete(job_pos)
db.session.commit()
db.session.refresh()
return render_template('studenPortal.html', Jobs = Jobs.query.all(),Appliedjobs = Job_Application.query.filter_by(id=current_user.id),applied=Jobs.query.filter_by())
# Route to Login out User
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('login'))
def main():
db.create_all() # creates the tables you've provided
app.run(debug=True) # runs the Flask application
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "5dc17db0aca109720d1ba62d65b86d9b81714063",
"index": 6622,
"step-1": "<mask token>\n\n\n@login_manager.user_loader\ndef load_user(id):\n user = Student.query.get(int(id))\n if user is not None:\n return user\n else:\n return Instructor.query.get(int(id))\n\n\n<mask token>\n\n\n@app.route(base_url + 'instructorProfile', methods=['GET'])\ndef instructorhome():\n return render_template('Instructor_Profile.html')\n\n\n@app.route(base_url + 'Register', methods=['POST', 'GET'])\ndef createAccount():\n if request.method == 'POST':\n if request.form['options'] == 'STUDENT':\n new_user = Student(request.form['first-name'], request.form[\n 'last-name'], request.form['email'], request.form['pwd'])\n db.session.add(new_user)\n db.session.commit()\n db.session.refresh(new_user)\n while Instructor.query.filter_by(id=new_user.id).first(\n ) is not None:\n new_user.id = new_user.id + 1\n db.session.commit()\n db.session.refresh(new_user)\n login_user(new_user)\n return redirect(url_for('studenthome'))\n elif request.form['options'] == 'INSTRUCTOR':\n new_user = Instructor(request.form['first-name'], request.form[\n 'last-name'], request.form['email'], request.form['pwd'])\n db.session.add(new_user)\n db.session.commit()\n db.session.refresh(new_user)\n while Student.query.filter_by(id=new_user.id).first() is not None:\n new_user.id = new_user.id + 1\n db.session.commit()\n db.session.refresh(new_user)\n login_user(new_user)\n return redirect(url_for('instructorhome'))\n return redirect(url_for('login'))\n\n\n@app.route(base_url + 'instructors', methods=['POST'])\ndef createInstructor():\n instructor = Instructor(**request.json)\n db.session.add(instructor)\n db.session.commit()\n db.session.refresh(instructor)\n return jsonify({'status': 1, 'instructor': instructor_to_obj(instructor)}\n ), 200\n\n\n@app.route(base_url + 'post', methods=['POST', 'GET'])\ndef post():\n if request.method == 'POST':\n new_job = Jobs(request.form['position'], request.form['Semester'],\n request.form['pay'], request.form['gpa_required'])\n db.session.add(new_job)\n db.session.commit()\n db.session.refresh(new_job)\n return render_template('instructorPortal.html', applicates=\n Job_Application.query.all())\n\n\n@app.route(base_url + 'apply', methods=['POST', 'GET'])\n@login_required\ndef apply():\n if request.method == 'POST':\n new_app = Job_Application(grade_recieved=request.form['Grade'],\n Avalialability=request.form['Avalialability'], bio=request.form\n ['bio'], gpa_overall=request.form['gpa_overall'], job_status=\n request.form['job_status'], owner=current_user)\n new_app.job_status = 'Submited'\n db.session.add(new_app)\n db.session.commit()\n db.session.refresh(new_app)\n flash('Job Application successfully Submited')\n return render_template('studenPortal.html', Jobs=Jobs.query.all(),\n Appliedjobs=Job_Application.query.filter_by(id=current_user.id))\n\n\n<mask token>\n\n\n@app.route(base_url + 'updateApplication', methods=['POST'])\n@login_required\ndef update_application(applicate):\n if request.method == 'POST':\n student = Student.query.filter_by(id=applicate.owner_id)\n student.Job_Application.job_status = 'Rejected'\n db, session.add(student)\n db.session.commit()\n db.session.refresh(student)\n return render_template('instructorPortal.html', applicates=\n Job_Application.query.all())\n\n\n<mask token>\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('login'))\n\n\ndef main():\n db.create_all()\n app.run(debug=True)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@login_manager.user_loader\ndef load_user(id):\n user = Student.query.get(int(id))\n if user is not None:\n return user\n else:\n return Instructor.query.get(int(id))\n\n\n<mask token>\n\n\n@app.route(base_url + 'instructorProfile', methods=['GET'])\ndef instructorhome():\n return render_template('Instructor_Profile.html')\n\n\n@app.route(base_url + 'Register', methods=['POST', 'GET'])\ndef createAccount():\n if request.method == 'POST':\n if request.form['options'] == 'STUDENT':\n new_user = Student(request.form['first-name'], request.form[\n 'last-name'], request.form['email'], request.form['pwd'])\n db.session.add(new_user)\n db.session.commit()\n db.session.refresh(new_user)\n while Instructor.query.filter_by(id=new_user.id).first(\n ) is not None:\n new_user.id = new_user.id + 1\n db.session.commit()\n db.session.refresh(new_user)\n login_user(new_user)\n return redirect(url_for('studenthome'))\n elif request.form['options'] == 'INSTRUCTOR':\n new_user = Instructor(request.form['first-name'], request.form[\n 'last-name'], request.form['email'], request.form['pwd'])\n db.session.add(new_user)\n db.session.commit()\n db.session.refresh(new_user)\n while Student.query.filter_by(id=new_user.id).first() is not None:\n new_user.id = new_user.id + 1\n db.session.commit()\n db.session.refresh(new_user)\n login_user(new_user)\n return redirect(url_for('instructorhome'))\n return redirect(url_for('login'))\n\n\n@app.route(base_url + 'instructors', methods=['POST'])\ndef createInstructor():\n instructor = Instructor(**request.json)\n db.session.add(instructor)\n db.session.commit()\n db.session.refresh(instructor)\n return jsonify({'status': 1, 'instructor': instructor_to_obj(instructor)}\n ), 200\n\n\n@app.route(base_url + 'post', methods=['POST', 'GET'])\ndef post():\n if request.method == 'POST':\n new_job = Jobs(request.form['position'], request.form['Semester'],\n request.form['pay'], request.form['gpa_required'])\n db.session.add(new_job)\n db.session.commit()\n db.session.refresh(new_job)\n return render_template('instructorPortal.html', applicates=\n Job_Application.query.all())\n\n\n@app.route(base_url + 'apply', methods=['POST', 'GET'])\n@login_required\ndef apply():\n if request.method == 'POST':\n new_app = Job_Application(grade_recieved=request.form['Grade'],\n Avalialability=request.form['Avalialability'], bio=request.form\n ['bio'], gpa_overall=request.form['gpa_overall'], job_status=\n request.form['job_status'], owner=current_user)\n new_app.job_status = 'Submited'\n db.session.add(new_app)\n db.session.commit()\n db.session.refresh(new_app)\n flash('Job Application successfully Submited')\n return render_template('studenPortal.html', Jobs=Jobs.query.all(),\n Appliedjobs=Job_Application.query.filter_by(id=current_user.id))\n\n\n<mask token>\n\n\n@app.route(base_url + 'updateApplication', methods=['POST'])\n@login_required\ndef update_application(applicate):\n if request.method == 'POST':\n student = Student.query.filter_by(id=applicate.owner_id)\n student.Job_Application.job_status = 'Rejected'\n db, session.add(student)\n db.session.commit()\n db.session.refresh(student)\n return render_template('instructorPortal.html', applicates=\n Job_Application.query.all())\n\n\n@app.route(base_url + 'cancel_Application', methods=['DELETE'])\n@login_required\ndef cancel_application():\n if request.method == 'DELETE':\n job_position = request.form['job_name']\n job_pos = current_user.jobs.filter_by(position=job_position)\n db.session.delete(job_pos)\n db.session.commit()\n db.session.refresh()\n return render_template('studenPortal.html', Jobs=Jobs.query.all(),\n Appliedjobs=Job_Application.query.filter_by(id=current_user.id),\n applied=Jobs.query.filter_by())\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('login'))\n\n\ndef main():\n db.create_all()\n app.run(debug=True)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@login_manager.user_loader\ndef load_user(id):\n user = Student.query.get(int(id))\n if user is not None:\n return user\n else:\n return Instructor.query.get(int(id))\n\n\n@app.route(base_url, methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('studenthome'))\n form = LoginForm()\n if form.validate_on_submit():\n user = Student.query.filter_by(email=form.email.data).first()\n if user is not None and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n return redirect(url_for('apply'))\n user = Instructor.query.filter_by(email=form.email.data).first()\n if user is not None and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n return redirect(url_for('post'))\n flash('Invalid username or password')\n return redirect(url_for('login'))\n return render_template('mainpage.html', title='Sign In', form=form)\n\n\n@app.route(base_url + 'studentProfile', methods=['GET'])\ndef studenthome():\n return render_template('student_Profile.html')\n\n\n@app.route(base_url + 'instructorProfile', methods=['GET'])\ndef instructorhome():\n return render_template('Instructor_Profile.html')\n\n\n@app.route(base_url + 'Register', methods=['POST', 'GET'])\ndef createAccount():\n if request.method == 'POST':\n if request.form['options'] == 'STUDENT':\n new_user = Student(request.form['first-name'], request.form[\n 'last-name'], request.form['email'], request.form['pwd'])\n db.session.add(new_user)\n db.session.commit()\n db.session.refresh(new_user)\n while Instructor.query.filter_by(id=new_user.id).first(\n ) is not None:\n new_user.id = new_user.id + 1\n db.session.commit()\n db.session.refresh(new_user)\n login_user(new_user)\n return redirect(url_for('studenthome'))\n elif request.form['options'] == 'INSTRUCTOR':\n new_user = Instructor(request.form['first-name'], request.form[\n 'last-name'], request.form['email'], request.form['pwd'])\n db.session.add(new_user)\n db.session.commit()\n db.session.refresh(new_user)\n while Student.query.filter_by(id=new_user.id).first() is not None:\n new_user.id = new_user.id + 1\n db.session.commit()\n db.session.refresh(new_user)\n login_user(new_user)\n return redirect(url_for('instructorhome'))\n return redirect(url_for('login'))\n\n\n@app.route(base_url + 'instructors', methods=['POST'])\ndef createInstructor():\n instructor = Instructor(**request.json)\n db.session.add(instructor)\n db.session.commit()\n db.session.refresh(instructor)\n return jsonify({'status': 1, 'instructor': instructor_to_obj(instructor)}\n ), 200\n\n\n@app.route(base_url + 'post', methods=['POST', 'GET'])\ndef post():\n if request.method == 'POST':\n new_job = Jobs(request.form['position'], request.form['Semester'],\n request.form['pay'], request.form['gpa_required'])\n db.session.add(new_job)\n db.session.commit()\n db.session.refresh(new_job)\n return render_template('instructorPortal.html', applicates=\n Job_Application.query.all())\n\n\n@app.route(base_url + 'apply', methods=['POST', 'GET'])\n@login_required\ndef apply():\n if request.method == 'POST':\n new_app = Job_Application(grade_recieved=request.form['Grade'],\n Avalialability=request.form['Avalialability'], bio=request.form\n ['bio'], gpa_overall=request.form['gpa_overall'], job_status=\n request.form['job_status'], owner=current_user)\n new_app.job_status = 'Submited'\n db.session.add(new_app)\n db.session.commit()\n db.session.refresh(new_app)\n flash('Job Application successfully Submited')\n return render_template('studenPortal.html', Jobs=Jobs.query.all(),\n Appliedjobs=Job_Application.query.filter_by(id=current_user.id))\n\n\n@app.route(base_url + 'students_edit', methods=['GET', 'POST'])\n@login_required\ndef editStudent():\n if request.method == 'POST':\n current_user.gpa = request.form['editGpa']\n current_user.major = request.form['editMajor']\n db.session.add(current_user)\n db.session.commit()\n db.session.refresh(current_user)\n return render_template('student_Profile.html', current_user=\n current_user)\n return render_template('student_Profile.html', current_user=current_user)\n\n\n<mask token>\n\n\n@app.route(base_url + 'updateApplication', methods=['POST'])\n@login_required\ndef update_application(applicate):\n if request.method == 'POST':\n student = Student.query.filter_by(id=applicate.owner_id)\n student.Job_Application.job_status = 'Rejected'\n db, session.add(student)\n db.session.commit()\n db.session.refresh(student)\n return render_template('instructorPortal.html', applicates=\n Job_Application.query.all())\n\n\n@app.route(base_url + 'cancel_Application', methods=['DELETE'])\n@login_required\ndef cancel_application():\n if request.method == 'DELETE':\n job_position = request.form['job_name']\n job_pos = current_user.jobs.filter_by(position=job_position)\n db.session.delete(job_pos)\n db.session.commit()\n db.session.refresh()\n return render_template('studenPortal.html', Jobs=Jobs.query.all(),\n Appliedjobs=Job_Application.query.filter_by(id=current_user.id),\n applied=Jobs.query.filter_by())\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('login'))\n\n\ndef main():\n db.create_all()\n app.run(debug=True)\n\n\n<mask token>\n",
"step-4": "import os\nimport flask_sqlalchemy as sqlalchemy\nfrom flask import Flask, jsonify, request, render_template, redirect, url_for, json, flash\nfrom flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class\nfrom flask_cors import CORS\nimport datetime\nfrom flask_bootstrap import Bootstrap\nfrom flask_login import LoginManager, current_user, login_user, logout_user, login_required\nfrom flask_login import UserMixin\nfrom hashlib import md5\nfrom database.models import *\napp = Flask(__name__, static_url_path='/static')\napp.debug = True\nCORS(app)\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///sqlalchemy-demo.db'\napp.config['SECRET_KEY'] = 'Thisissupposedtobesecret!'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\nbootstrap = Bootstrap(app)\napp.config.update(DEBUG=True)\ndb = sqlalchemy.SQLAlchemy(app)\nbase_url = '/api/'\n\n\n@login_manager.user_loader\ndef load_user(id):\n user = Student.query.get(int(id))\n if user is not None:\n return user\n else:\n return Instructor.query.get(int(id))\n\n\n@app.route(base_url, methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('studenthome'))\n form = LoginForm()\n if form.validate_on_submit():\n user = Student.query.filter_by(email=form.email.data).first()\n if user is not None and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n return redirect(url_for('apply'))\n user = Instructor.query.filter_by(email=form.email.data).first()\n if user is not None and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n return redirect(url_for('post'))\n flash('Invalid username or password')\n return redirect(url_for('login'))\n return render_template('mainpage.html', title='Sign In', form=form)\n\n\n@app.route(base_url + 'studentProfile', methods=['GET'])\ndef studenthome():\n return render_template('student_Profile.html')\n\n\n@app.route(base_url + 'instructorProfile', methods=['GET'])\ndef instructorhome():\n return render_template('Instructor_Profile.html')\n\n\n@app.route(base_url + 'Register', methods=['POST', 'GET'])\ndef createAccount():\n if request.method == 'POST':\n if request.form['options'] == 'STUDENT':\n new_user = Student(request.form['first-name'], request.form[\n 'last-name'], request.form['email'], request.form['pwd'])\n db.session.add(new_user)\n db.session.commit()\n db.session.refresh(new_user)\n while Instructor.query.filter_by(id=new_user.id).first(\n ) is not None:\n new_user.id = new_user.id + 1\n db.session.commit()\n db.session.refresh(new_user)\n login_user(new_user)\n return redirect(url_for('studenthome'))\n elif request.form['options'] == 'INSTRUCTOR':\n new_user = Instructor(request.form['first-name'], request.form[\n 'last-name'], request.form['email'], request.form['pwd'])\n db.session.add(new_user)\n db.session.commit()\n db.session.refresh(new_user)\n while Student.query.filter_by(id=new_user.id).first() is not None:\n new_user.id = new_user.id + 1\n db.session.commit()\n db.session.refresh(new_user)\n login_user(new_user)\n return redirect(url_for('instructorhome'))\n return redirect(url_for('login'))\n\n\n@app.route(base_url + 'instructors', methods=['POST'])\ndef createInstructor():\n instructor = Instructor(**request.json)\n db.session.add(instructor)\n db.session.commit()\n db.session.refresh(instructor)\n return jsonify({'status': 1, 'instructor': instructor_to_obj(instructor)}\n ), 200\n\n\n@app.route(base_url + 'post', methods=['POST', 'GET'])\ndef post():\n if request.method == 'POST':\n new_job = Jobs(request.form['position'], request.form['Semester'],\n request.form['pay'], request.form['gpa_required'])\n db.session.add(new_job)\n db.session.commit()\n db.session.refresh(new_job)\n return render_template('instructorPortal.html', applicates=\n Job_Application.query.all())\n\n\n@app.route(base_url + 'apply', methods=['POST', 'GET'])\n@login_required\ndef apply():\n if request.method == 'POST':\n new_app = Job_Application(grade_recieved=request.form['Grade'],\n Avalialability=request.form['Avalialability'], bio=request.form\n ['bio'], gpa_overall=request.form['gpa_overall'], job_status=\n request.form['job_status'], owner=current_user)\n new_app.job_status = 'Submited'\n db.session.add(new_app)\n db.session.commit()\n db.session.refresh(new_app)\n flash('Job Application successfully Submited')\n return render_template('studenPortal.html', Jobs=Jobs.query.all(),\n Appliedjobs=Job_Application.query.filter_by(id=current_user.id))\n\n\n@app.route(base_url + 'students_edit', methods=['GET', 'POST'])\n@login_required\ndef editStudent():\n if request.method == 'POST':\n current_user.gpa = request.form['editGpa']\n current_user.major = request.form['editMajor']\n db.session.add(current_user)\n db.session.commit()\n db.session.refresh(current_user)\n return render_template('student_Profile.html', current_user=\n current_user)\n return render_template('student_Profile.html', current_user=current_user)\n\n\n@app.route(base_url + 'instructors_edit', methods=['GET', 'POST'])\n@login_required\ndef editInstructor():\n if request.method == 'POST':\n current_user.email = request.form['editEmail']\n current_user.phone = request.form['editPhone']\n current_user.office = request.form['editOffice']\n db.session.add(current_user)\n db.session.commit()\n db.session.refresh(current_user)\n return render_template('Instructor_Profile.html', current_user=\n current_user)\n return render_template('Instructor_Profile.html', current_user=current_user\n )\n\n\n@app.route(base_url + 'updateApplication', methods=['POST'])\n@login_required\ndef update_application(applicate):\n if request.method == 'POST':\n student = Student.query.filter_by(id=applicate.owner_id)\n student.Job_Application.job_status = 'Rejected'\n db, session.add(student)\n db.session.commit()\n db.session.refresh(student)\n return render_template('instructorPortal.html', applicates=\n Job_Application.query.all())\n\n\n@app.route(base_url + 'cancel_Application', methods=['DELETE'])\n@login_required\ndef cancel_application():\n if request.method == 'DELETE':\n job_position = request.form['job_name']\n job_pos = current_user.jobs.filter_by(position=job_position)\n db.session.delete(job_pos)\n db.session.commit()\n db.session.refresh()\n return render_template('studenPortal.html', Jobs=Jobs.query.all(),\n Appliedjobs=Job_Application.query.filter_by(id=current_user.id),\n applied=Jobs.query.filter_by())\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('login'))\n\n\ndef main():\n db.create_all()\n app.run(debug=True)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import os\n\nimport flask_sqlalchemy as sqlalchemy\nfrom flask import Flask, jsonify, request,render_template,redirect,url_for,json,flash\nfrom flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class\nfrom flask_cors import CORS\nimport datetime\nfrom flask_bootstrap import Bootstrap\n\nfrom flask_login import LoginManager,current_user, login_user,logout_user, login_required\nfrom flask_login import UserMixin\n\nfrom hashlib import md5\n\n\nfrom database.models import *\n\n\n\n#from sqlalchemy_imageattach.entity import Image, image_attachment\n\n\napp = Flask(__name__,static_url_path='/static')\napp.debug = True\nCORS(app)\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n\n#UPLOAD_FOLDER = '../static/templates'\nALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///sqlalchemy-demo.db'\napp.config['SECRET_KEY'] = 'Thisissupposedtobesecret!'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n#app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\nbootstrap = Bootstrap(app)\napp.config.update(DEBUG=True)\ndb = sqlalchemy.SQLAlchemy(app)\n\n\n\nbase_url = '/api/'\n\n@login_manager.user_loader\ndef load_user(id):\n user = Student.query.get(int(id))\n if user is not None:\n return user\n else:\n return Instructor.query.get(int(id))\n\n \n \n \n@app.route(base_url, methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('studenthome'))\n\n form = LoginForm()\n if form.validate_on_submit():\n\n user = Student.query.filter_by(email=form.email.data).first()\n # Login Student\n if user is not None and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n return redirect(url_for('apply'))\n\n user = Instructor.query.filter_by(email=form.email.data).first()\n # Login Instructor\n if user is not None and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n return redirect(url_for('post'))\n\n # Login failed\n flash('Invalid username or password')\n return redirect(url_for('login'))\n \n return render_template('mainpage.html', title='Sign In', form=form)\n\n\n\n# Route to student Profile\n@app.route(base_url + 'studentProfile', methods=['GET'])\ndef studenthome():\n return render_template('student_Profile.html')\n\n\n# Route to Instructor Profile\n@app.route(base_url + 'instructorProfile', methods=['GET'])\ndef instructorhome():\n return render_template('Instructor_Profile.html')\n\n# Route to create a student account and main page\n@app.route(base_url + 'Register', methods=['POST','GET'])\ndef createAccount():\n\n if request.method == 'POST':\n # Student option is checked\n if request.form['options'] == 'STUDENT':\n new_user = Student(request.form['first-name'],request.form['last-name'],request.form['email'],request.form['pwd'])\n db.session.add(new_user)\n db.session.commit()\n db.session.refresh(new_user)\n\n # Make sure id is unique\n while Instructor.query.filter_by(id=new_user.id).first() is not None:\n new_user.id = new_user.id + 1\n db.session.commit()\n db.session.refresh(new_user)\n\n login_user(new_user)\n return redirect(url_for('studenthome'))\n\n # Instructor option is checked\n elif request.form['options'] == 'INSTRUCTOR':\n new_user = Instructor(request.form['first-name'],request.form['last-name'],request.form['email'],request.form['pwd'])\n db.session.add(new_user)\n db.session.commit()\n db.session.refresh(new_user)\n\n # Make sure id is unique\n while Student.query.filter_by(id=new_user.id).first() is not None:\n new_user.id = new_user.id + 1\n db.session.commit()\n db.session.refresh(new_user)\n\n login_user(new_user)\n return redirect(url_for('instructorhome'))\n\n return redirect(url_for('login'))\n #return render_template('studenPortal.html', Jobs = Jobs.query.all())\n\n\n\n# Route to create a instructor account\n@app.route(base_url + 'instructors', methods=['POST'])\ndef createInstructor():\n instructor = Instructor(**request.json)\n \n db.session.add(instructor)\n db.session.commit()\n db.session.refresh(instructor)\n return jsonify({\"status\": 1, \"instructor\": instructor_to_obj(instructor)}), 200\n\n# Route to post a job for Instructors\n@app.route(base_url + 'post', methods=['POST','GET'])\n#@login_required\ndef post():\n\n if request.method == 'POST':\n new_job = Jobs(request.form['position'],request.form['Semester'],request.form['pay'],request.form['gpa_required'])\n db.session.add(new_job)\n db.session.commit()\n db.session.refresh(new_job)\n #,applicates = Job_Application.query.all()\n return render_template('instructorPortal.html',applicates = Job_Application.query.all())\n\n# Route to Display jobs for students\n@app.route(base_url + 'apply', methods=['POST','GET'])\n@login_required\ndef apply():\n\n \n if request.method == 'POST':\n #temp_student = Student(first_name=current_user.first_name,last_name=current_user.last_name,email=current_user.email,password=current_user.password)\n #db.session.add(temp_student)\n #db.session.commit()\n new_app = Job_Application(grade_recieved=request.form['Grade'],Avalialability=request.form['Avalialability'],bio=request.form['bio'],gpa_overall=request.form['gpa_overall'],job_status=request.form['job_status'],owner=current_user)\n new_app.job_status = \"Submited\" \n \n #new_app = Job_Application(owner=temp_student)\n db.session.add(new_app)\n db.session.commit()\n db.session.refresh(new_app)\n flash(\"Job Application successfully Submited\")\n\n \n\n\n return render_template('studenPortal.html', Jobs = Jobs.query.all(),Appliedjobs = Job_Application.query.filter_by(id=current_user.id))\n\n\n# Route to edit info in a student account\n# Edit ONLY major, gpa and grad_date\n@app.route(base_url + 'students_edit', methods=['GET', 'POST'])\n@login_required\ndef editStudent():\n\n if request.method == 'POST':\n\n current_user.gpa = request.form['editGpa']\n current_user.major = request.form['editMajor']\n\n db.session.add(current_user)\n db.session.commit()\n db.session.refresh(current_user)\n\n return render_template('student_Profile.html',current_user=current_user)\n\n return render_template('student_Profile.html',current_user=current_user)\n\n \n\n\n# Route to edit info in an Instructor account\n# Edit ONLY email, office, and phone\n@app.route(base_url + 'instructors_edit', methods=['GET', 'POST'])\n@login_required\ndef editInstructor():\n\n if request.method == 'POST':\n\n current_user.email = request.form['editEmail']\n current_user.phone = request.form['editPhone']\n current_user.office = request.form['editOffice']\n\n db.session.add(current_user)\n db.session.commit()\n db.session.refresh(current_user)\n\n return render_template('Instructor_Profile.html',current_user=current_user)\n\n return render_template('Instructor_Profile.html',current_user=current_user)\n \n# Route to update Student Application \n@app.route(base_url + 'updateApplication', methods=['POST'])\n@login_required\ndef update_application(applicate):\n \n if request.method == 'POST':\n student = Student.query.filter_by(id =applicate.owner_id)\n student.Job_Application.job_status = \"Rejected\"\n db,session.add(student)\n db.session.commit()\n db.session.refresh(student)\n return render_template('instructorPortal.html',applicates = Job_Application.query.all())\n\n\n\n\n# Route to Delete student Application\n@app.route(base_url + 'cancel_Application', methods=['DELETE'])\n@login_required\ndef cancel_application():\n\n if request.method == 'DELETE':\n job_position = request.form['job_name']\n\n job_pos = current_user.jobs.filter_by(position=job_position)\n db.session.delete(job_pos)\n db.session.commit()\n db.session.refresh()\n return render_template('studenPortal.html', Jobs = Jobs.query.all(),Appliedjobs = Job_Application.query.filter_by(id=current_user.id),applied=Jobs.query.filter_by())\n\n\n# Route to Login out User\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('login'))\n\n\ndef main():\n db.create_all() # creates the tables you've provided\n app.run(debug=True) # runs the Flask application \n \nif __name__ == '__main__':\n main()\n",
"step-ids": [
9,
10,
13,
17,
18
]
}
|
[
9,
10,
13,
17,
18
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('movies.csv', 'w') as file:
csv_writer = writer(file)
csv_writer.writerow(['Name', 'Year'])
csv_writer.writerow(['Ratchasan', 2018])
csv_writer.writerow(['Vadachennai', 2018])
csv_writer.writerow(['Naran', 2007])
<|reserved_special_token_1|>
from csv import writer
with open('movies.csv', 'w') as file:
csv_writer = writer(file)
csv_writer.writerow(['Name', 'Year'])
csv_writer.writerow(['Ratchasan', 2018])
csv_writer.writerow(['Vadachennai', 2018])
csv_writer.writerow(['Naran', 2007])
<|reserved_special_token_1|>
from csv import writer
with open("movies.csv","w") as file:
csv_writer=writer(file)
csv_writer.writerow(['Name','Year'])
csv_writer.writerow(['Ratchasan',2018])
csv_writer.writerow(['Vadachennai',2018])
csv_writer.writerow(['Naran',2007])
|
flexible
|
{
"blob_id": "83e231480c618d290089340c642313bbba4f1070",
"index": 2035,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('movies.csv', 'w') as file:\n csv_writer = writer(file)\n csv_writer.writerow(['Name', 'Year'])\n csv_writer.writerow(['Ratchasan', 2018])\n csv_writer.writerow(['Vadachennai', 2018])\n csv_writer.writerow(['Naran', 2007])\n",
"step-3": "from csv import writer\nwith open('movies.csv', 'w') as file:\n csv_writer = writer(file)\n csv_writer.writerow(['Name', 'Year'])\n csv_writer.writerow(['Ratchasan', 2018])\n csv_writer.writerow(['Vadachennai', 2018])\n csv_writer.writerow(['Naran', 2007])\n",
"step-4": "from csv import writer\nwith open(\"movies.csv\",\"w\") as file:\n csv_writer=writer(file)\n csv_writer.writerow(['Name','Year'])\n csv_writer.writerow(['Ratchasan',2018])\n csv_writer.writerow(['Vadachennai',2018])\n csv_writer.writerow(['Naran',2007])\n \n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if mibBuilder.loadTexts:
ds0Bundle.setOrganization('IETF Trunk MIB Working Group')
if mibBuilder.loadTexts:
ds0Bundle.setContactInfo(
""" David Fowler
Postal: Newbridge Networks Corporation
600 March Road
Kanata, Ontario, Canada K2K 2E6
Tel: +1 613 591 3600
Fax: +1 613 599 3619
E-mail: davef@newbridge.com"""
)
if mibBuilder.loadTexts:
ds0Bundle.setDescription(
'The MIB module to describe\nDS0 Bundle interfaces objects.')
<|reserved_special_token_0|>
if mibBuilder.loadTexts:
dsx0BondingTable.setDescription('The DS0 Bonding table.')
<|reserved_special_token_0|>
if mibBuilder.loadTexts:
dsx0BondingEntry.setDescription(
"""An entry in the DS0 Bonding table. There is a
row in this table for each DS0Bundle interface."""
)
<|reserved_special_token_0|>
if mibBuilder.loadTexts:
dsx0BondMode.setDescription(
"""This object indicates which BONDing mode is used,
if any, for a ds0Bundle. Mode0 provides parameter
and number exchange with no synchronization. Mode
1 provides parameter and number exchange. Mode 1
also provides synchronization during
initialization but does not include inband
monitoring. Mode 2 provides all of the above plus
inband monitoring. Mode 2 also steals 1/64th of
the bandwidth of each channel (thus not supporting
n x 56/64 kbit/s data channels for most values of
n). Mode 3 provides all of the above, but also
provides n x 56/64 kbit/s data channels. Most
common implementations of Mode 3 add an extra
channel to support the inband monitoring overhead.
ModeNone should be used when the interface is not
performing bandwidth-on-demand."""
)
<|reserved_special_token_0|>
if mibBuilder.loadTexts:
dsx0BondStatus.setDescription(
"""This object indicates the current status of the
bonding call using this ds0Bundle. idle(1) should
be used when the bonding mode is set to none(1)."""
)
<|reserved_special_token_0|>
if mibBuilder.loadTexts:
dsx0BondRowStatus.setDescription(
"""This object is used to create new rows in this
table, modify existing rows, and to delete
existing rows."""
)
<|reserved_special_token_0|>
if mibBuilder.loadTexts:
dsx0BundleNextIndex.setDescription(
"""This object is used to assist the manager in
selecting a value for dsx0BundleIndex. Because
this object is of syntax TestAndIncr (see the
SNMPv2-TC document, RFC 1903) it can also be used
to avoid race conditions with multiple managers
trying to create rows in the table.
If the result of the SET for dsx0BundleNextIndex
is not success, this means the value has been
changed from index (i.e. another manager used the
value), so a new value is required.
The algorithm is:
done = false
while done == false
index = GET (dsx0BundleNextIndex.0)
SET (dsx0BundleNextIndex.0=index)
if (set failed)
done = false
else
SET(dsx0BundleRowStatus.index=createAndGo)
if (set failed)
done = false
else
done = true
other error handling"""
)
<|reserved_special_token_0|>
if mibBuilder.loadTexts:
dsx0BundleTable.setDescription(
"""There is an row in this table for each ds0Bundle
in the system. This table can be used to
(indirectly) create rows in the ifTable with
ifType = 'ds0Bundle(82)'."""
)
<|reserved_special_token_0|>
if mibBuilder.loadTexts:
dsx0BundleEntry.setDescription(
"""There is a row in entry in this table for each
ds0Bundle interface."""
)
<|reserved_special_token_0|>
if mibBuilder.loadTexts:
dsx0BundleIndex.setDescription(
"""A unique identifier for a ds0Bundle. This is not
the same value as ifIndex. This table is not
indexed by ifIndex because the manager has to
choose the index in a createable row and the agent
must be allowed to select ifIndex values."""
)
<|reserved_special_token_0|>
if mibBuilder.loadTexts:
dsx0BundleIfIndex.setDescription(
"""The ifIndex value the agent selected for the
(new) ds0Bundle interface."""
)
<|reserved_special_token_0|>
if mibBuilder.loadTexts:
dsx0BundleCircuitIdentifier.setDescription(
"""This variable contains the transmission vendor's
circuit identifier, for the purpose of
facilitating troubleshooting."""
)
<|reserved_special_token_0|>
if mibBuilder.loadTexts:
dsx0BundleRowStatus.setDescription(
"""This object is used to create and delete rows in
this table.""")
<|reserved_special_token_0|>
if mibBuilder.loadTexts:
ds0BondingGroup.setDescription(
"""A collection of objects providing
configuration information applicable
to all DS0 interfaces."""
)
<|reserved_special_token_0|>
if mibBuilder.loadTexts:
ds0BundleConfigGroup.setDescription(
"""A collection of objects providing the ability to
create a new ds0Bundle in the ifTable as well as
configuration information about the ds0Bundle."""
)
<|reserved_special_token_0|>
if mibBuilder.loadTexts:
ds0BundleCompliance.setDescription(
'The compliance statement for DS0Bundle\ninterfaces.')
mibBuilder.exportSymbols('DS0BUNDLE-MIB', PYSNMP_MODULE_ID=ds0Bundle)
mibBuilder.exportSymbols('DS0BUNDLE-MIB', ds0Bundle=ds0Bundle,
dsx0BondingTable=dsx0BondingTable, dsx0BondingEntry=dsx0BondingEntry,
dsx0BondMode=dsx0BondMode, dsx0BondStatus=dsx0BondStatus,
dsx0BondRowStatus=dsx0BondRowStatus, dsx0BundleNextIndex=
dsx0BundleNextIndex, dsx0BundleTable=dsx0BundleTable, dsx0BundleEntry=
dsx0BundleEntry, dsx0BundleIndex=dsx0BundleIndex, dsx0BundleIfIndex=
dsx0BundleIfIndex, dsx0BundleCircuitIdentifier=
dsx0BundleCircuitIdentifier, dsx0BundleRowStatus=dsx0BundleRowStatus,
ds0BundleConformance=ds0BundleConformance, ds0BundleGroups=
ds0BundleGroups, ds0BundleCompliances=ds0BundleCompliances)
mibBuilder.exportSymbols('DS0BUNDLE-MIB', ds0BondingGroup=ds0BondingGroup,
ds0BundleConfigGroup=ds0BundleConfigGroup)
mibBuilder.exportSymbols('DS0BUNDLE-MIB', ds0BundleCompliance=
ds0BundleCompliance)
<|reserved_special_token_1|>
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols('ASN1',
'Integer', 'ObjectIdentifier', 'OctetString')
NamedValues, = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')
(ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint,
ValueRangeConstraint, ValueSizeConstraint) = (mibBuilder.importSymbols(
'ASN1-REFINEMENT', 'ConstraintsIntersection', 'ConstraintsUnion',
'SingleValueConstraint', 'ValueRangeConstraint', 'ValueSizeConstraint'))
InterfaceIndex, ifIndex = mibBuilder.importSymbols('IF-MIB',
'InterfaceIndex', 'ifIndex')
ModuleCompliance, ObjectGroup = mibBuilder.importSymbols('SNMPv2-CONF',
'ModuleCompliance', 'ObjectGroup')
(Bits, Integer32, ModuleIdentity, MibIdentifier, MibScalar, MibTable,
MibTableRow, MibTableColumn, TimeTicks, transmission) = (mibBuilder.
importSymbols('SNMPv2-SMI', 'Bits', 'Integer32', 'ModuleIdentity',
'MibIdentifier', 'MibScalar', 'MibTable', 'MibTableRow',
'MibTableColumn', 'TimeTicks', 'transmission'))
DisplayString, RowStatus, TestAndIncr = mibBuilder.importSymbols('SNMPv2-TC',
'DisplayString', 'RowStatus', 'TestAndIncr')
ds0Bundle = ModuleIdentity((1, 3, 6, 1, 2, 1, 10, 82)).setRevisions((
'1998-07-16 16:30', '1998-05-24 20:10'))
if mibBuilder.loadTexts:
ds0Bundle.setOrganization('IETF Trunk MIB Working Group')
if mibBuilder.loadTexts:
ds0Bundle.setContactInfo(
""" David Fowler
Postal: Newbridge Networks Corporation
600 March Road
Kanata, Ontario, Canada K2K 2E6
Tel: +1 613 591 3600
Fax: +1 613 599 3619
E-mail: davef@newbridge.com"""
)
if mibBuilder.loadTexts:
ds0Bundle.setDescription(
'The MIB module to describe\nDS0 Bundle interfaces objects.')
dsx0BondingTable = MibTable((1, 3, 6, 1, 2, 1, 10, 82, 1))
if mibBuilder.loadTexts:
dsx0BondingTable.setDescription('The DS0 Bonding table.')
dsx0BondingEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 82, 1, 1)).setIndexNames(
(0, 'IF-MIB', 'ifIndex'))
if mibBuilder.loadTexts:
dsx0BondingEntry.setDescription(
"""An entry in the DS0 Bonding table. There is a
row in this table for each DS0Bundle interface."""
)
dsx0BondMode = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 1), Integer(
).subtype(subtypeSpec=SingleValueConstraint(1, 5, 6, 3, 4, 2)).subtype(
namedValues=NamedValues(('none', 1), ('other', 2), ('mode0', 3), (
'mode1', 4), ('mode2', 5), ('mode3', 6)))).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
dsx0BondMode.setDescription(
"""This object indicates which BONDing mode is used,
if any, for a ds0Bundle. Mode0 provides parameter
and number exchange with no synchronization. Mode
1 provides parameter and number exchange. Mode 1
also provides synchronization during
initialization but does not include inband
monitoring. Mode 2 provides all of the above plus
inband monitoring. Mode 2 also steals 1/64th of
the bandwidth of each channel (thus not supporting
n x 56/64 kbit/s data channels for most values of
n). Mode 3 provides all of the above, but also
provides n x 56/64 kbit/s data channels. Most
common implementations of Mode 3 add an extra
channel to support the inband monitoring overhead.
ModeNone should be used when the interface is not
performing bandwidth-on-demand."""
)
dsx0BondStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 2),
Integer().subtype(subtypeSpec=SingleValueConstraint(1, 3, 2)).subtype(
namedValues=NamedValues(('idle', 1), ('callSetup', 2), ('dataTransfer',
3)))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
dsx0BondStatus.setDescription(
"""This object indicates the current status of the
bonding call using this ds0Bundle. idle(1) should
be used when the bonding mode is set to none(1)."""
)
dsx0BondRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 3),
RowStatus()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
dsx0BondRowStatus.setDescription(
"""This object is used to create new rows in this
table, modify existing rows, and to delete
existing rows."""
)
dsx0BundleNextIndex = MibScalar((1, 3, 6, 1, 2, 1, 10, 82, 2), TestAndIncr()
).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
dsx0BundleNextIndex.setDescription(
"""This object is used to assist the manager in
selecting a value for dsx0BundleIndex. Because
this object is of syntax TestAndIncr (see the
SNMPv2-TC document, RFC 1903) it can also be used
to avoid race conditions with multiple managers
trying to create rows in the table.
If the result of the SET for dsx0BundleNextIndex
is not success, this means the value has been
changed from index (i.e. another manager used the
value), so a new value is required.
The algorithm is:
done = false
while done == false
index = GET (dsx0BundleNextIndex.0)
SET (dsx0BundleNextIndex.0=index)
if (set failed)
done = false
else
SET(dsx0BundleRowStatus.index=createAndGo)
if (set failed)
done = false
else
done = true
other error handling"""
)
dsx0BundleTable = MibTable((1, 3, 6, 1, 2, 1, 10, 82, 3))
if mibBuilder.loadTexts:
dsx0BundleTable.setDescription(
"""There is an row in this table for each ds0Bundle
in the system. This table can be used to
(indirectly) create rows in the ifTable with
ifType = 'ds0Bundle(82)'."""
)
dsx0BundleEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 82, 3, 1)).setIndexNames((
0, 'DS0BUNDLE-MIB', 'dsx0BundleIndex'))
if mibBuilder.loadTexts:
dsx0BundleEntry.setDescription(
"""There is a row in entry in this table for each
ds0Bundle interface."""
)
dsx0BundleIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 1),
Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))
).setMaxAccess('noaccess')
if mibBuilder.loadTexts:
dsx0BundleIndex.setDescription(
"""A unique identifier for a ds0Bundle. This is not
the same value as ifIndex. This table is not
indexed by ifIndex because the manager has to
choose the index in a createable row and the agent
must be allowed to select ifIndex values."""
)
dsx0BundleIfIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 2),
InterfaceIndex()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
dsx0BundleIfIndex.setDescription(
"""The ifIndex value the agent selected for the
(new) ds0Bundle interface."""
)
dsx0BundleCircuitIdentifier = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3,
1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))
).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
dsx0BundleCircuitIdentifier.setDescription(
"""This variable contains the transmission vendor's
circuit identifier, for the purpose of
facilitating troubleshooting."""
)
dsx0BundleRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 4),
RowStatus()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
dsx0BundleRowStatus.setDescription(
"""This object is used to create and delete rows in
this table.""")
ds0BundleConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4))
ds0BundleGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4, 1))
ds0BundleCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4, 2))
ds0BondingGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 10, 82, 4, 1, 1)).setObjects(*
(('DS0BUNDLE-MIB', 'dsx0BondMode'), ('DS0BUNDLE-MIB', 'dsx0BondStatus'),
('DS0BUNDLE-MIB', 'dsx0BondRowStatus')))
if mibBuilder.loadTexts:
ds0BondingGroup.setDescription(
"""A collection of objects providing
configuration information applicable
to all DS0 interfaces."""
)
ds0BundleConfigGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 10, 82, 4, 1, 2)
).setObjects(*(('DS0BUNDLE-MIB', 'dsx0BundleIfIndex'), ('DS0BUNDLE-MIB',
'dsx0BundleRowStatus'), ('DS0BUNDLE-MIB', 'dsx0BundleCircuitIdentifier'
), ('DS0BUNDLE-MIB', 'dsx0BundleNextIndex')))
if mibBuilder.loadTexts:
ds0BundleConfigGroup.setDescription(
"""A collection of objects providing the ability to
create a new ds0Bundle in the ifTable as well as
configuration information about the ds0Bundle."""
)
ds0BundleCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 10, 82, 4, 2, 1)
).setObjects(*(('DS0BUNDLE-MIB', 'ds0BundleConfigGroup'), (
'DS0BUNDLE-MIB', 'ds0BondingGroup')))
if mibBuilder.loadTexts:
ds0BundleCompliance.setDescription(
'The compliance statement for DS0Bundle\ninterfaces.')
mibBuilder.exportSymbols('DS0BUNDLE-MIB', PYSNMP_MODULE_ID=ds0Bundle)
mibBuilder.exportSymbols('DS0BUNDLE-MIB', ds0Bundle=ds0Bundle,
dsx0BondingTable=dsx0BondingTable, dsx0BondingEntry=dsx0BondingEntry,
dsx0BondMode=dsx0BondMode, dsx0BondStatus=dsx0BondStatus,
dsx0BondRowStatus=dsx0BondRowStatus, dsx0BundleNextIndex=
dsx0BundleNextIndex, dsx0BundleTable=dsx0BundleTable, dsx0BundleEntry=
dsx0BundleEntry, dsx0BundleIndex=dsx0BundleIndex, dsx0BundleIfIndex=
dsx0BundleIfIndex, dsx0BundleCircuitIdentifier=
dsx0BundleCircuitIdentifier, dsx0BundleRowStatus=dsx0BundleRowStatus,
ds0BundleConformance=ds0BundleConformance, ds0BundleGroups=
ds0BundleGroups, ds0BundleCompliances=ds0BundleCompliances)
mibBuilder.exportSymbols('DS0BUNDLE-MIB', ds0BondingGroup=ds0BondingGroup,
ds0BundleConfigGroup=ds0BundleConfigGroup)
mibBuilder.exportSymbols('DS0BUNDLE-MIB', ds0BundleCompliance=
ds0BundleCompliance)
<|reserved_special_token_1|>
# PySNMP SMI module. Autogenerated from smidump -f python DS0BUNDLE-MIB
# by libsmi2pysnmp-0.1.3 at Thu May 22 11:57:37 2014,
# Python version sys.version_info(major=2, minor=7, micro=2, releaselevel='final', serial=0)
# Imports
( Integer, ObjectIdentifier, OctetString, ) = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
( NamedValues, ) = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
( ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ) = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint")
( InterfaceIndex, ifIndex, ) = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex", "ifIndex")
( ModuleCompliance, ObjectGroup, ) = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup")
( Bits, Integer32, ModuleIdentity, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, transmission, ) = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Integer32", "ModuleIdentity", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "transmission")
( DisplayString, RowStatus, TestAndIncr, ) = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "RowStatus", "TestAndIncr")
# Objects
ds0Bundle = ModuleIdentity((1, 3, 6, 1, 2, 1, 10, 82)).setRevisions(("1998-07-16 16:30","1998-05-24 20:10",))
if mibBuilder.loadTexts: ds0Bundle.setOrganization("IETF Trunk MIB Working Group")
if mibBuilder.loadTexts: ds0Bundle.setContactInfo(" David Fowler\n\nPostal: Newbridge Networks Corporation\n 600 March Road\n Kanata, Ontario, Canada K2K 2E6\n\n Tel: +1 613 591 3600\n Fax: +1 613 599 3619\n\nE-mail: davef@newbridge.com")
if mibBuilder.loadTexts: ds0Bundle.setDescription("The MIB module to describe\nDS0 Bundle interfaces objects.")
dsx0BondingTable = MibTable((1, 3, 6, 1, 2, 1, 10, 82, 1))
if mibBuilder.loadTexts: dsx0BondingTable.setDescription("The DS0 Bonding table.")
dsx0BondingEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 82, 1, 1)).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: dsx0BondingEntry.setDescription("An entry in the DS0 Bonding table. There is a\nrow in this table for each DS0Bundle interface.")
dsx0BondMode = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 1), Integer().subtype(subtypeSpec=SingleValueConstraint(1,5,6,3,4,2,)).subtype(namedValues=NamedValues(("none", 1), ("other", 2), ("mode0", 3), ("mode1", 4), ("mode2", 5), ("mode3", 6), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dsx0BondMode.setDescription("This object indicates which BONDing mode is used,\nif any, for a ds0Bundle. Mode0 provides parameter\nand number exchange with no synchronization. Mode\n1 provides parameter and number exchange. Mode 1\nalso provides synchronization during\ninitialization but does not include inband\nmonitoring. Mode 2 provides all of the above plus\ninband monitoring. Mode 2 also steals 1/64th of\nthe bandwidth of each channel (thus not supporting\nn x 56/64 kbit/s data channels for most values of\nn). Mode 3 provides all of the above, but also\nprovides n x 56/64 kbit/s data channels. Most\ncommon implementations of Mode 3 add an extra\nchannel to support the inband monitoring overhead.\nModeNone should be used when the interface is not\nperforming bandwidth-on-demand.")
dsx0BondStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 2), Integer().subtype(subtypeSpec=SingleValueConstraint(1,3,2,)).subtype(namedValues=NamedValues(("idle", 1), ("callSetup", 2), ("dataTransfer", 3), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx0BondStatus.setDescription("This object indicates the current status of the\nbonding call using this ds0Bundle. idle(1) should\nbe used when the bonding mode is set to none(1).")
dsx0BondRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dsx0BondRowStatus.setDescription("This object is used to create new rows in this\ntable, modify existing rows, and to delete\nexisting rows.")
dsx0BundleNextIndex = MibScalar((1, 3, 6, 1, 2, 1, 10, 82, 2), TestAndIncr()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsx0BundleNextIndex.setDescription("This object is used to assist the manager in\nselecting a value for dsx0BundleIndex. Because\nthis object is of syntax TestAndIncr (see the\nSNMPv2-TC document, RFC 1903) it can also be used\nto avoid race conditions with multiple managers\ntrying to create rows in the table.\n\nIf the result of the SET for dsx0BundleNextIndex\nis not success, this means the value has been\nchanged from index (i.e. another manager used the\nvalue), so a new value is required.\n\nThe algorithm is:\ndone = false\nwhile done == false\n index = GET (dsx0BundleNextIndex.0)\n SET (dsx0BundleNextIndex.0=index)\n if (set failed)\n done = false\n else\n SET(dsx0BundleRowStatus.index=createAndGo)\n if (set failed)\n done = false\n else\n done = true\n other error handling")
dsx0BundleTable = MibTable((1, 3, 6, 1, 2, 1, 10, 82, 3))
if mibBuilder.loadTexts: dsx0BundleTable.setDescription("There is an row in this table for each ds0Bundle\nin the system. This table can be used to\n(indirectly) create rows in the ifTable with\nifType = 'ds0Bundle(82)'.")
dsx0BundleEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 82, 3, 1)).setIndexNames((0, "DS0BUNDLE-MIB", "dsx0BundleIndex"))
if mibBuilder.loadTexts: dsx0BundleEntry.setDescription("There is a row in entry in this table for each\nds0Bundle interface.")
dsx0BundleIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: dsx0BundleIndex.setDescription("A unique identifier for a ds0Bundle. This is not\nthe same value as ifIndex. This table is not\nindexed by ifIndex because the manager has to\nchoose the index in a createable row and the agent\nmust be allowed to select ifIndex values.")
dsx0BundleIfIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 2), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx0BundleIfIndex.setDescription("The ifIndex value the agent selected for the\n(new) ds0Bundle interface.")
dsx0BundleCircuitIdentifier = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dsx0BundleCircuitIdentifier.setDescription("This variable contains the transmission vendor's\ncircuit identifier, for the purpose of\nfacilitating troubleshooting.")
dsx0BundleRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dsx0BundleRowStatus.setDescription("This object is used to create and delete rows in\nthis table.")
ds0BundleConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4))
ds0BundleGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4, 1))
ds0BundleCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4, 2))
# Augmentions
# Groups
ds0BondingGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 10, 82, 4, 1, 1)).setObjects(*(("DS0BUNDLE-MIB", "dsx0BondMode"), ("DS0BUNDLE-MIB", "dsx0BondStatus"), ("DS0BUNDLE-MIB", "dsx0BondRowStatus"), ) )
if mibBuilder.loadTexts: ds0BondingGroup.setDescription("A collection of objects providing\nconfiguration information applicable\nto all DS0 interfaces.")
ds0BundleConfigGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 10, 82, 4, 1, 2)).setObjects(*(("DS0BUNDLE-MIB", "dsx0BundleIfIndex"), ("DS0BUNDLE-MIB", "dsx0BundleRowStatus"), ("DS0BUNDLE-MIB", "dsx0BundleCircuitIdentifier"), ("DS0BUNDLE-MIB", "dsx0BundleNextIndex"), ) )
if mibBuilder.loadTexts: ds0BundleConfigGroup.setDescription("A collection of objects providing the ability to\ncreate a new ds0Bundle in the ifTable as well as\nconfiguration information about the ds0Bundle.")
# Compliances
ds0BundleCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 10, 82, 4, 2, 1)).setObjects(*(("DS0BUNDLE-MIB", "ds0BundleConfigGroup"), ("DS0BUNDLE-MIB", "ds0BondingGroup"), ) )
if mibBuilder.loadTexts: ds0BundleCompliance.setDescription("The compliance statement for DS0Bundle\ninterfaces.")
# Exports
# Module identity
mibBuilder.exportSymbols("DS0BUNDLE-MIB", PYSNMP_MODULE_ID=ds0Bundle)
# Objects
mibBuilder.exportSymbols("DS0BUNDLE-MIB", ds0Bundle=ds0Bundle, dsx0BondingTable=dsx0BondingTable, dsx0BondingEntry=dsx0BondingEntry, dsx0BondMode=dsx0BondMode, dsx0BondStatus=dsx0BondStatus, dsx0BondRowStatus=dsx0BondRowStatus, dsx0BundleNextIndex=dsx0BundleNextIndex, dsx0BundleTable=dsx0BundleTable, dsx0BundleEntry=dsx0BundleEntry, dsx0BundleIndex=dsx0BundleIndex, dsx0BundleIfIndex=dsx0BundleIfIndex, dsx0BundleCircuitIdentifier=dsx0BundleCircuitIdentifier, dsx0BundleRowStatus=dsx0BundleRowStatus, ds0BundleConformance=ds0BundleConformance, ds0BundleGroups=ds0BundleGroups, ds0BundleCompliances=ds0BundleCompliances)
# Groups
mibBuilder.exportSymbols("DS0BUNDLE-MIB", ds0BondingGroup=ds0BondingGroup, ds0BundleConfigGroup=ds0BundleConfigGroup)
# Compliances
mibBuilder.exportSymbols("DS0BUNDLE-MIB", ds0BundleCompliance=ds0BundleCompliance)
|
flexible
|
{
"blob_id": "fab15d34d29301e53a26577725cdd66dca7507bc",
"index": 2330,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif mibBuilder.loadTexts:\n ds0Bundle.setOrganization('IETF Trunk MIB Working Group')\nif mibBuilder.loadTexts:\n ds0Bundle.setContactInfo(\n \"\"\" David Fowler\n\nPostal: Newbridge Networks Corporation\n 600 March Road\n Kanata, Ontario, Canada K2K 2E6\n\n Tel: +1 613 591 3600\n Fax: +1 613 599 3619\n\nE-mail: davef@newbridge.com\"\"\"\n )\nif mibBuilder.loadTexts:\n ds0Bundle.setDescription(\n 'The MIB module to describe\\nDS0 Bundle interfaces objects.')\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BondingTable.setDescription('The DS0 Bonding table.')\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BondingEntry.setDescription(\n \"\"\"An entry in the DS0 Bonding table. There is a\nrow in this table for each DS0Bundle interface.\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BondMode.setDescription(\n \"\"\"This object indicates which BONDing mode is used,\nif any, for a ds0Bundle. Mode0 provides parameter\nand number exchange with no synchronization. Mode\n1 provides parameter and number exchange. Mode 1\nalso provides synchronization during\ninitialization but does not include inband\nmonitoring. Mode 2 provides all of the above plus\ninband monitoring. Mode 2 also steals 1/64th of\nthe bandwidth of each channel (thus not supporting\nn x 56/64 kbit/s data channels for most values of\nn). Mode 3 provides all of the above, but also\nprovides n x 56/64 kbit/s data channels. Most\ncommon implementations of Mode 3 add an extra\nchannel to support the inband monitoring overhead.\nModeNone should be used when the interface is not\nperforming bandwidth-on-demand.\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BondStatus.setDescription(\n \"\"\"This object indicates the current status of the\nbonding call using this ds0Bundle. idle(1) should\nbe used when the bonding mode is set to none(1).\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BondRowStatus.setDescription(\n \"\"\"This object is used to create new rows in this\ntable, modify existing rows, and to delete\nexisting rows.\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BundleNextIndex.setDescription(\n \"\"\"This object is used to assist the manager in\nselecting a value for dsx0BundleIndex. Because\nthis object is of syntax TestAndIncr (see the\nSNMPv2-TC document, RFC 1903) it can also be used\nto avoid race conditions with multiple managers\ntrying to create rows in the table.\n\nIf the result of the SET for dsx0BundleNextIndex\nis not success, this means the value has been\nchanged from index (i.e. another manager used the\nvalue), so a new value is required.\n\nThe algorithm is:\ndone = false\nwhile done == false\n index = GET (dsx0BundleNextIndex.0)\n SET (dsx0BundleNextIndex.0=index)\n if (set failed)\n done = false\n else\n SET(dsx0BundleRowStatus.index=createAndGo)\n if (set failed)\n done = false\n else\n done = true\n other error handling\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BundleTable.setDescription(\n \"\"\"There is an row in this table for each ds0Bundle\nin the system. This table can be used to\n(indirectly) create rows in the ifTable with\nifType = 'ds0Bundle(82)'.\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BundleEntry.setDescription(\n \"\"\"There is a row in entry in this table for each\nds0Bundle interface.\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BundleIndex.setDescription(\n \"\"\"A unique identifier for a ds0Bundle. This is not\nthe same value as ifIndex. This table is not\nindexed by ifIndex because the manager has to\nchoose the index in a createable row and the agent\nmust be allowed to select ifIndex values.\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BundleIfIndex.setDescription(\n \"\"\"The ifIndex value the agent selected for the\n(new) ds0Bundle interface.\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BundleCircuitIdentifier.setDescription(\n \"\"\"This variable contains the transmission vendor's\ncircuit identifier, for the purpose of\nfacilitating troubleshooting.\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n dsx0BundleRowStatus.setDescription(\n \"\"\"This object is used to create and delete rows in\nthis table.\"\"\")\n<mask token>\nif mibBuilder.loadTexts:\n ds0BondingGroup.setDescription(\n \"\"\"A collection of objects providing\nconfiguration information applicable\nto all DS0 interfaces.\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n ds0BundleConfigGroup.setDescription(\n \"\"\"A collection of objects providing the ability to\ncreate a new ds0Bundle in the ifTable as well as\nconfiguration information about the ds0Bundle.\"\"\"\n )\n<mask token>\nif mibBuilder.loadTexts:\n ds0BundleCompliance.setDescription(\n 'The compliance statement for DS0Bundle\\ninterfaces.')\nmibBuilder.exportSymbols('DS0BUNDLE-MIB', PYSNMP_MODULE_ID=ds0Bundle)\nmibBuilder.exportSymbols('DS0BUNDLE-MIB', ds0Bundle=ds0Bundle,\n dsx0BondingTable=dsx0BondingTable, dsx0BondingEntry=dsx0BondingEntry,\n dsx0BondMode=dsx0BondMode, dsx0BondStatus=dsx0BondStatus,\n dsx0BondRowStatus=dsx0BondRowStatus, dsx0BundleNextIndex=\n dsx0BundleNextIndex, dsx0BundleTable=dsx0BundleTable, dsx0BundleEntry=\n dsx0BundleEntry, dsx0BundleIndex=dsx0BundleIndex, dsx0BundleIfIndex=\n dsx0BundleIfIndex, dsx0BundleCircuitIdentifier=\n dsx0BundleCircuitIdentifier, dsx0BundleRowStatus=dsx0BundleRowStatus,\n ds0BundleConformance=ds0BundleConformance, ds0BundleGroups=\n ds0BundleGroups, ds0BundleCompliances=ds0BundleCompliances)\nmibBuilder.exportSymbols('DS0BUNDLE-MIB', ds0BondingGroup=ds0BondingGroup,\n ds0BundleConfigGroup=ds0BundleConfigGroup)\nmibBuilder.exportSymbols('DS0BUNDLE-MIB', ds0BundleCompliance=\n ds0BundleCompliance)\n",
"step-3": "Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols('ASN1',\n 'Integer', 'ObjectIdentifier', 'OctetString')\nNamedValues, = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')\n(ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint,\n ValueRangeConstraint, ValueSizeConstraint) = (mibBuilder.importSymbols(\n 'ASN1-REFINEMENT', 'ConstraintsIntersection', 'ConstraintsUnion',\n 'SingleValueConstraint', 'ValueRangeConstraint', 'ValueSizeConstraint'))\nInterfaceIndex, ifIndex = mibBuilder.importSymbols('IF-MIB',\n 'InterfaceIndex', 'ifIndex')\nModuleCompliance, ObjectGroup = mibBuilder.importSymbols('SNMPv2-CONF',\n 'ModuleCompliance', 'ObjectGroup')\n(Bits, Integer32, ModuleIdentity, MibIdentifier, MibScalar, MibTable,\n MibTableRow, MibTableColumn, TimeTicks, transmission) = (mibBuilder.\n importSymbols('SNMPv2-SMI', 'Bits', 'Integer32', 'ModuleIdentity',\n 'MibIdentifier', 'MibScalar', 'MibTable', 'MibTableRow',\n 'MibTableColumn', 'TimeTicks', 'transmission'))\nDisplayString, RowStatus, TestAndIncr = mibBuilder.importSymbols('SNMPv2-TC',\n 'DisplayString', 'RowStatus', 'TestAndIncr')\nds0Bundle = ModuleIdentity((1, 3, 6, 1, 2, 1, 10, 82)).setRevisions((\n '1998-07-16 16:30', '1998-05-24 20:10'))\nif mibBuilder.loadTexts:\n ds0Bundle.setOrganization('IETF Trunk MIB Working Group')\nif mibBuilder.loadTexts:\n ds0Bundle.setContactInfo(\n \"\"\" David Fowler\n\nPostal: Newbridge Networks Corporation\n 600 March Road\n Kanata, Ontario, Canada K2K 2E6\n\n Tel: +1 613 591 3600\n Fax: +1 613 599 3619\n\nE-mail: davef@newbridge.com\"\"\"\n )\nif mibBuilder.loadTexts:\n ds0Bundle.setDescription(\n 'The MIB module to describe\\nDS0 Bundle interfaces objects.')\ndsx0BondingTable = MibTable((1, 3, 6, 1, 2, 1, 10, 82, 1))\nif mibBuilder.loadTexts:\n dsx0BondingTable.setDescription('The DS0 Bonding table.')\ndsx0BondingEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 82, 1, 1)).setIndexNames(\n (0, 'IF-MIB', 'ifIndex'))\nif mibBuilder.loadTexts:\n dsx0BondingEntry.setDescription(\n \"\"\"An entry in the DS0 Bonding table. There is a\nrow in this table for each DS0Bundle interface.\"\"\"\n )\ndsx0BondMode = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 1), Integer(\n ).subtype(subtypeSpec=SingleValueConstraint(1, 5, 6, 3, 4, 2)).subtype(\n namedValues=NamedValues(('none', 1), ('other', 2), ('mode0', 3), (\n 'mode1', 4), ('mode2', 5), ('mode3', 6)))).setMaxAccess('readcreate')\nif mibBuilder.loadTexts:\n dsx0BondMode.setDescription(\n \"\"\"This object indicates which BONDing mode is used,\nif any, for a ds0Bundle. Mode0 provides parameter\nand number exchange with no synchronization. Mode\n1 provides parameter and number exchange. Mode 1\nalso provides synchronization during\ninitialization but does not include inband\nmonitoring. Mode 2 provides all of the above plus\ninband monitoring. Mode 2 also steals 1/64th of\nthe bandwidth of each channel (thus not supporting\nn x 56/64 kbit/s data channels for most values of\nn). Mode 3 provides all of the above, but also\nprovides n x 56/64 kbit/s data channels. Most\ncommon implementations of Mode 3 add an extra\nchannel to support the inband monitoring overhead.\nModeNone should be used when the interface is not\nperforming bandwidth-on-demand.\"\"\"\n )\ndsx0BondStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 2),\n Integer().subtype(subtypeSpec=SingleValueConstraint(1, 3, 2)).subtype(\n namedValues=NamedValues(('idle', 1), ('callSetup', 2), ('dataTransfer',\n 3)))).setMaxAccess('readonly')\nif mibBuilder.loadTexts:\n dsx0BondStatus.setDescription(\n \"\"\"This object indicates the current status of the\nbonding call using this ds0Bundle. idle(1) should\nbe used when the bonding mode is set to none(1).\"\"\"\n )\ndsx0BondRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 3),\n RowStatus()).setMaxAccess('readcreate')\nif mibBuilder.loadTexts:\n dsx0BondRowStatus.setDescription(\n \"\"\"This object is used to create new rows in this\ntable, modify existing rows, and to delete\nexisting rows.\"\"\"\n )\ndsx0BundleNextIndex = MibScalar((1, 3, 6, 1, 2, 1, 10, 82, 2), TestAndIncr()\n ).setMaxAccess('readwrite')\nif mibBuilder.loadTexts:\n dsx0BundleNextIndex.setDescription(\n \"\"\"This object is used to assist the manager in\nselecting a value for dsx0BundleIndex. Because\nthis object is of syntax TestAndIncr (see the\nSNMPv2-TC document, RFC 1903) it can also be used\nto avoid race conditions with multiple managers\ntrying to create rows in the table.\n\nIf the result of the SET for dsx0BundleNextIndex\nis not success, this means the value has been\nchanged from index (i.e. another manager used the\nvalue), so a new value is required.\n\nThe algorithm is:\ndone = false\nwhile done == false\n index = GET (dsx0BundleNextIndex.0)\n SET (dsx0BundleNextIndex.0=index)\n if (set failed)\n done = false\n else\n SET(dsx0BundleRowStatus.index=createAndGo)\n if (set failed)\n done = false\n else\n done = true\n other error handling\"\"\"\n )\ndsx0BundleTable = MibTable((1, 3, 6, 1, 2, 1, 10, 82, 3))\nif mibBuilder.loadTexts:\n dsx0BundleTable.setDescription(\n \"\"\"There is an row in this table for each ds0Bundle\nin the system. This table can be used to\n(indirectly) create rows in the ifTable with\nifType = 'ds0Bundle(82)'.\"\"\"\n )\ndsx0BundleEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 82, 3, 1)).setIndexNames((\n 0, 'DS0BUNDLE-MIB', 'dsx0BundleIndex'))\nif mibBuilder.loadTexts:\n dsx0BundleEntry.setDescription(\n \"\"\"There is a row in entry in this table for each\nds0Bundle interface.\"\"\"\n )\ndsx0BundleIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 1),\n Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))\n ).setMaxAccess('noaccess')\nif mibBuilder.loadTexts:\n dsx0BundleIndex.setDescription(\n \"\"\"A unique identifier for a ds0Bundle. This is not\nthe same value as ifIndex. This table is not\nindexed by ifIndex because the manager has to\nchoose the index in a createable row and the agent\nmust be allowed to select ifIndex values.\"\"\"\n )\ndsx0BundleIfIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 2),\n InterfaceIndex()).setMaxAccess('readonly')\nif mibBuilder.loadTexts:\n dsx0BundleIfIndex.setDescription(\n \"\"\"The ifIndex value the agent selected for the\n(new) ds0Bundle interface.\"\"\"\n )\ndsx0BundleCircuitIdentifier = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, \n 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))\n ).setMaxAccess('readcreate')\nif mibBuilder.loadTexts:\n dsx0BundleCircuitIdentifier.setDescription(\n \"\"\"This variable contains the transmission vendor's\ncircuit identifier, for the purpose of\nfacilitating troubleshooting.\"\"\"\n )\ndsx0BundleRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 4),\n RowStatus()).setMaxAccess('readcreate')\nif mibBuilder.loadTexts:\n dsx0BundleRowStatus.setDescription(\n \"\"\"This object is used to create and delete rows in\nthis table.\"\"\")\nds0BundleConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4))\nds0BundleGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4, 1))\nds0BundleCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4, 2))\nds0BondingGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 10, 82, 4, 1, 1)).setObjects(*\n (('DS0BUNDLE-MIB', 'dsx0BondMode'), ('DS0BUNDLE-MIB', 'dsx0BondStatus'),\n ('DS0BUNDLE-MIB', 'dsx0BondRowStatus')))\nif mibBuilder.loadTexts:\n ds0BondingGroup.setDescription(\n \"\"\"A collection of objects providing\nconfiguration information applicable\nto all DS0 interfaces.\"\"\"\n )\nds0BundleConfigGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 10, 82, 4, 1, 2)\n ).setObjects(*(('DS0BUNDLE-MIB', 'dsx0BundleIfIndex'), ('DS0BUNDLE-MIB',\n 'dsx0BundleRowStatus'), ('DS0BUNDLE-MIB', 'dsx0BundleCircuitIdentifier'\n ), ('DS0BUNDLE-MIB', 'dsx0BundleNextIndex')))\nif mibBuilder.loadTexts:\n ds0BundleConfigGroup.setDescription(\n \"\"\"A collection of objects providing the ability to\ncreate a new ds0Bundle in the ifTable as well as\nconfiguration information about the ds0Bundle.\"\"\"\n )\nds0BundleCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 10, 82, 4, 2, 1)\n ).setObjects(*(('DS0BUNDLE-MIB', 'ds0BundleConfigGroup'), (\n 'DS0BUNDLE-MIB', 'ds0BondingGroup')))\nif mibBuilder.loadTexts:\n ds0BundleCompliance.setDescription(\n 'The compliance statement for DS0Bundle\\ninterfaces.')\nmibBuilder.exportSymbols('DS0BUNDLE-MIB', PYSNMP_MODULE_ID=ds0Bundle)\nmibBuilder.exportSymbols('DS0BUNDLE-MIB', ds0Bundle=ds0Bundle,\n dsx0BondingTable=dsx0BondingTable, dsx0BondingEntry=dsx0BondingEntry,\n dsx0BondMode=dsx0BondMode, dsx0BondStatus=dsx0BondStatus,\n dsx0BondRowStatus=dsx0BondRowStatus, dsx0BundleNextIndex=\n dsx0BundleNextIndex, dsx0BundleTable=dsx0BundleTable, dsx0BundleEntry=\n dsx0BundleEntry, dsx0BundleIndex=dsx0BundleIndex, dsx0BundleIfIndex=\n dsx0BundleIfIndex, dsx0BundleCircuitIdentifier=\n dsx0BundleCircuitIdentifier, dsx0BundleRowStatus=dsx0BundleRowStatus,\n ds0BundleConformance=ds0BundleConformance, ds0BundleGroups=\n ds0BundleGroups, ds0BundleCompliances=ds0BundleCompliances)\nmibBuilder.exportSymbols('DS0BUNDLE-MIB', ds0BondingGroup=ds0BondingGroup,\n ds0BundleConfigGroup=ds0BundleConfigGroup)\nmibBuilder.exportSymbols('DS0BUNDLE-MIB', ds0BundleCompliance=\n ds0BundleCompliance)\n",
"step-4": "# PySNMP SMI module. Autogenerated from smidump -f python DS0BUNDLE-MIB\n# by libsmi2pysnmp-0.1.3 at Thu May 22 11:57:37 2014,\n# Python version sys.version_info(major=2, minor=7, micro=2, releaselevel='final', serial=0)\n\n# Imports\n\n( Integer, ObjectIdentifier, OctetString, ) = mibBuilder.importSymbols(\"ASN1\", \"Integer\", \"ObjectIdentifier\", \"OctetString\")\n( NamedValues, ) = mibBuilder.importSymbols(\"ASN1-ENUMERATION\", \"NamedValues\")\n( ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ) = mibBuilder.importSymbols(\"ASN1-REFINEMENT\", \"ConstraintsIntersection\", \"ConstraintsUnion\", \"SingleValueConstraint\", \"ValueRangeConstraint\", \"ValueSizeConstraint\")\n( InterfaceIndex, ifIndex, ) = mibBuilder.importSymbols(\"IF-MIB\", \"InterfaceIndex\", \"ifIndex\")\n( ModuleCompliance, ObjectGroup, ) = mibBuilder.importSymbols(\"SNMPv2-CONF\", \"ModuleCompliance\", \"ObjectGroup\")\n( Bits, Integer32, ModuleIdentity, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, transmission, ) = mibBuilder.importSymbols(\"SNMPv2-SMI\", \"Bits\", \"Integer32\", \"ModuleIdentity\", \"MibIdentifier\", \"MibScalar\", \"MibTable\", \"MibTableRow\", \"MibTableColumn\", \"TimeTicks\", \"transmission\")\n( DisplayString, RowStatus, TestAndIncr, ) = mibBuilder.importSymbols(\"SNMPv2-TC\", \"DisplayString\", \"RowStatus\", \"TestAndIncr\")\n\n# Objects\n\nds0Bundle = ModuleIdentity((1, 3, 6, 1, 2, 1, 10, 82)).setRevisions((\"1998-07-16 16:30\",\"1998-05-24 20:10\",))\nif mibBuilder.loadTexts: ds0Bundle.setOrganization(\"IETF Trunk MIB Working Group\")\nif mibBuilder.loadTexts: ds0Bundle.setContactInfo(\" David Fowler\\n\\nPostal: Newbridge Networks Corporation\\n 600 March Road\\n Kanata, Ontario, Canada K2K 2E6\\n\\n Tel: +1 613 591 3600\\n Fax: +1 613 599 3619\\n\\nE-mail: davef@newbridge.com\")\nif mibBuilder.loadTexts: ds0Bundle.setDescription(\"The MIB module to describe\\nDS0 Bundle interfaces objects.\")\ndsx0BondingTable = MibTable((1, 3, 6, 1, 2, 1, 10, 82, 1))\nif mibBuilder.loadTexts: dsx0BondingTable.setDescription(\"The DS0 Bonding table.\")\ndsx0BondingEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 82, 1, 1)).setIndexNames((0, \"IF-MIB\", \"ifIndex\"))\nif mibBuilder.loadTexts: dsx0BondingEntry.setDescription(\"An entry in the DS0 Bonding table. There is a\\nrow in this table for each DS0Bundle interface.\")\ndsx0BondMode = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 1), Integer().subtype(subtypeSpec=SingleValueConstraint(1,5,6,3,4,2,)).subtype(namedValues=NamedValues((\"none\", 1), (\"other\", 2), (\"mode0\", 3), (\"mode1\", 4), (\"mode2\", 5), (\"mode3\", 6), ))).setMaxAccess(\"readcreate\")\nif mibBuilder.loadTexts: dsx0BondMode.setDescription(\"This object indicates which BONDing mode is used,\\nif any, for a ds0Bundle. Mode0 provides parameter\\nand number exchange with no synchronization. Mode\\n1 provides parameter and number exchange. Mode 1\\nalso provides synchronization during\\ninitialization but does not include inband\\nmonitoring. Mode 2 provides all of the above plus\\ninband monitoring. Mode 2 also steals 1/64th of\\nthe bandwidth of each channel (thus not supporting\\nn x 56/64 kbit/s data channels for most values of\\nn). Mode 3 provides all of the above, but also\\nprovides n x 56/64 kbit/s data channels. Most\\ncommon implementations of Mode 3 add an extra\\nchannel to support the inband monitoring overhead.\\nModeNone should be used when the interface is not\\nperforming bandwidth-on-demand.\")\ndsx0BondStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 2), Integer().subtype(subtypeSpec=SingleValueConstraint(1,3,2,)).subtype(namedValues=NamedValues((\"idle\", 1), (\"callSetup\", 2), (\"dataTransfer\", 3), ))).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: dsx0BondStatus.setDescription(\"This object indicates the current status of the\\nbonding call using this ds0Bundle. idle(1) should\\nbe used when the bonding mode is set to none(1).\")\ndsx0BondRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 3), RowStatus()).setMaxAccess(\"readcreate\")\nif mibBuilder.loadTexts: dsx0BondRowStatus.setDescription(\"This object is used to create new rows in this\\ntable, modify existing rows, and to delete\\nexisting rows.\")\ndsx0BundleNextIndex = MibScalar((1, 3, 6, 1, 2, 1, 10, 82, 2), TestAndIncr()).setMaxAccess(\"readwrite\")\nif mibBuilder.loadTexts: dsx0BundleNextIndex.setDescription(\"This object is used to assist the manager in\\nselecting a value for dsx0BundleIndex. Because\\nthis object is of syntax TestAndIncr (see the\\nSNMPv2-TC document, RFC 1903) it can also be used\\nto avoid race conditions with multiple managers\\ntrying to create rows in the table.\\n\\nIf the result of the SET for dsx0BundleNextIndex\\nis not success, this means the value has been\\nchanged from index (i.e. another manager used the\\nvalue), so a new value is required.\\n\\nThe algorithm is:\\ndone = false\\nwhile done == false\\n index = GET (dsx0BundleNextIndex.0)\\n SET (dsx0BundleNextIndex.0=index)\\n if (set failed)\\n done = false\\n else\\n SET(dsx0BundleRowStatus.index=createAndGo)\\n if (set failed)\\n done = false\\n else\\n done = true\\n other error handling\")\ndsx0BundleTable = MibTable((1, 3, 6, 1, 2, 1, 10, 82, 3))\nif mibBuilder.loadTexts: dsx0BundleTable.setDescription(\"There is an row in this table for each ds0Bundle\\nin the system. This table can be used to\\n(indirectly) create rows in the ifTable with\\nifType = 'ds0Bundle(82)'.\")\ndsx0BundleEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 82, 3, 1)).setIndexNames((0, \"DS0BUNDLE-MIB\", \"dsx0BundleIndex\"))\nif mibBuilder.loadTexts: dsx0BundleEntry.setDescription(\"There is a row in entry in this table for each\\nds0Bundle interface.\")\ndsx0BundleIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess(\"noaccess\")\nif mibBuilder.loadTexts: dsx0BundleIndex.setDescription(\"A unique identifier for a ds0Bundle. This is not\\nthe same value as ifIndex. This table is not\\nindexed by ifIndex because the manager has to\\nchoose the index in a createable row and the agent\\nmust be allowed to select ifIndex values.\")\ndsx0BundleIfIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 2), InterfaceIndex()).setMaxAccess(\"readonly\")\nif mibBuilder.loadTexts: dsx0BundleIfIndex.setDescription(\"The ifIndex value the agent selected for the\\n(new) ds0Bundle interface.\")\ndsx0BundleCircuitIdentifier = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess(\"readcreate\")\nif mibBuilder.loadTexts: dsx0BundleCircuitIdentifier.setDescription(\"This variable contains the transmission vendor's\\ncircuit identifier, for the purpose of\\nfacilitating troubleshooting.\")\ndsx0BundleRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 4), RowStatus()).setMaxAccess(\"readcreate\")\nif mibBuilder.loadTexts: dsx0BundleRowStatus.setDescription(\"This object is used to create and delete rows in\\nthis table.\")\nds0BundleConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4))\nds0BundleGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4, 1))\nds0BundleCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4, 2))\n\n# Augmentions\n\n# Groups\n\nds0BondingGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 10, 82, 4, 1, 1)).setObjects(*((\"DS0BUNDLE-MIB\", \"dsx0BondMode\"), (\"DS0BUNDLE-MIB\", \"dsx0BondStatus\"), (\"DS0BUNDLE-MIB\", \"dsx0BondRowStatus\"), ) )\nif mibBuilder.loadTexts: ds0BondingGroup.setDescription(\"A collection of objects providing\\nconfiguration information applicable\\nto all DS0 interfaces.\")\nds0BundleConfigGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 10, 82, 4, 1, 2)).setObjects(*((\"DS0BUNDLE-MIB\", \"dsx0BundleIfIndex\"), (\"DS0BUNDLE-MIB\", \"dsx0BundleRowStatus\"), (\"DS0BUNDLE-MIB\", \"dsx0BundleCircuitIdentifier\"), (\"DS0BUNDLE-MIB\", \"dsx0BundleNextIndex\"), ) )\nif mibBuilder.loadTexts: ds0BundleConfigGroup.setDescription(\"A collection of objects providing the ability to\\ncreate a new ds0Bundle in the ifTable as well as\\nconfiguration information about the ds0Bundle.\")\n\n# Compliances\n\nds0BundleCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 10, 82, 4, 2, 1)).setObjects(*((\"DS0BUNDLE-MIB\", \"ds0BundleConfigGroup\"), (\"DS0BUNDLE-MIB\", \"ds0BondingGroup\"), ) )\nif mibBuilder.loadTexts: ds0BundleCompliance.setDescription(\"The compliance statement for DS0Bundle\\ninterfaces.\")\n\n# Exports\n\n# Module identity\nmibBuilder.exportSymbols(\"DS0BUNDLE-MIB\", PYSNMP_MODULE_ID=ds0Bundle)\n\n# Objects\nmibBuilder.exportSymbols(\"DS0BUNDLE-MIB\", ds0Bundle=ds0Bundle, dsx0BondingTable=dsx0BondingTable, dsx0BondingEntry=dsx0BondingEntry, dsx0BondMode=dsx0BondMode, dsx0BondStatus=dsx0BondStatus, dsx0BondRowStatus=dsx0BondRowStatus, dsx0BundleNextIndex=dsx0BundleNextIndex, dsx0BundleTable=dsx0BundleTable, dsx0BundleEntry=dsx0BundleEntry, dsx0BundleIndex=dsx0BundleIndex, dsx0BundleIfIndex=dsx0BundleIfIndex, dsx0BundleCircuitIdentifier=dsx0BundleCircuitIdentifier, dsx0BundleRowStatus=dsx0BundleRowStatus, ds0BundleConformance=ds0BundleConformance, ds0BundleGroups=ds0BundleGroups, ds0BundleCompliances=ds0BundleCompliances)\n\n# Groups\nmibBuilder.exportSymbols(\"DS0BUNDLE-MIB\", ds0BondingGroup=ds0BondingGroup, ds0BundleConfigGroup=ds0BundleConfigGroup)\n\n# Compliances\nmibBuilder.exportSymbols(\"DS0BUNDLE-MIB\", ds0BundleCompliance=ds0BundleCompliance)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from tkinter import *
from PIL import ImageTk,Image
import sys, os
# This will display images and icon
root = Tk()
root.title("Expanding GUI")
# With ubuntu, it did not work the icon part
#root.iconbitmap('@/home/gxgarciat/Documents/Tkinter/gdrive.ico')
#root.iconphoto(True, PhotoImage(file="@/home/gxgarciat/Documents/Tkinter/gdrive.ico"))
#root.iconbitmap(os.path.join(sys.path[0], "/home/gxgarciat/Documents/Tkinter/gdrive.ico"))
#root.iconbitmap('~home/gxgarciat/Documents/Tkinter/gdrive.ico')
#root.iconphoto(False, Tk.PhotoImage(file='/home/gxgarciat/Documents/Tkinter/gdrive.ico'))
# Importing images is a 3 step process here.
my_img = ImageTk.PhotoImage(Image.open("googledrive.png"))
my_label = Label(image=my_img)
my_label.pack()
# Adding a quit button
buttonquit = Button(root,text="Exit program",command=root.quit)
buttonquit.pack()
root.mainloop()
|
normal
|
{
"blob_id": "2da10163a40c9720ca9deecd9afb0e39aa885546",
"index": 5523,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nroot.title('Expanding GUI')\n<mask token>\nmy_label.pack()\n<mask token>\nbuttonquit.pack()\nroot.mainloop()\n",
"step-3": "<mask token>\nroot = Tk()\nroot.title('Expanding GUI')\nmy_img = ImageTk.PhotoImage(Image.open('googledrive.png'))\nmy_label = Label(image=my_img)\nmy_label.pack()\nbuttonquit = Button(root, text='Exit program', command=root.quit)\nbuttonquit.pack()\nroot.mainloop()\n",
"step-4": "from tkinter import *\nfrom PIL import ImageTk, Image\nimport sys, os\nroot = Tk()\nroot.title('Expanding GUI')\nmy_img = ImageTk.PhotoImage(Image.open('googledrive.png'))\nmy_label = Label(image=my_img)\nmy_label.pack()\nbuttonquit = Button(root, text='Exit program', command=root.quit)\nbuttonquit.pack()\nroot.mainloop()\n",
"step-5": "from tkinter import *\nfrom PIL import ImageTk,Image\n\nimport sys, os\n\n# This will display images and icon\n\nroot = Tk()\nroot.title(\"Expanding GUI\")\n\n# With ubuntu, it did not work the icon part\n#root.iconbitmap('@/home/gxgarciat/Documents/Tkinter/gdrive.ico')\n#root.iconphoto(True, PhotoImage(file=\"@/home/gxgarciat/Documents/Tkinter/gdrive.ico\"))\n#root.iconbitmap(os.path.join(sys.path[0], \"/home/gxgarciat/Documents/Tkinter/gdrive.ico\"))\n#root.iconbitmap('~home/gxgarciat/Documents/Tkinter/gdrive.ico')\n#root.iconphoto(False, Tk.PhotoImage(file='/home/gxgarciat/Documents/Tkinter/gdrive.ico'))\n\n\n# Importing images is a 3 step process here.\nmy_img = ImageTk.PhotoImage(Image.open(\"googledrive.png\"))\nmy_label = Label(image=my_img)\nmy_label.pack()\n\n# Adding a quit button\nbuttonquit = Button(root,text=\"Exit program\",command=root.quit)\nbuttonquit.pack()\n\nroot.mainloop()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def apply_timezone_datetime(_local_tz: str, _time: datetime.time):
"""
set time zone + merge now().date() with time()
:param _local_tz:
:param _time:
:return:
"""
return pytz.timezone(_local_tz).localize(datetime.datetime.combine(
datetime.datetime.now().date(), _time))
<|reserved_special_token_1|>
import pytz
import datetime
def apply_timezone_datetime(_local_tz: str, _time: datetime.time):
"""
set time zone + merge now().date() with time()
:param _local_tz:
:param _time:
:return:
"""
return pytz.timezone(_local_tz).localize(datetime.datetime.combine(
datetime.datetime.now().date(), _time))
|
flexible
|
{
"blob_id": "347627df4b08eca6e2137161472b4d31534cf81b",
"index": 1238,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef apply_timezone_datetime(_local_tz: str, _time: datetime.time):\n \"\"\"\n set time zone + merge now().date() with time()\n :param _local_tz:\n :param _time:\n :return:\n \"\"\"\n return pytz.timezone(_local_tz).localize(datetime.datetime.combine(\n datetime.datetime.now().date(), _time))\n",
"step-3": "import pytz\nimport datetime\n\n\ndef apply_timezone_datetime(_local_tz: str, _time: datetime.time):\n \"\"\"\n set time zone + merge now().date() with time()\n :param _local_tz:\n :param _time:\n :return:\n \"\"\"\n return pytz.timezone(_local_tz).localize(datetime.datetime.combine(\n datetime.datetime.now().date(), _time))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Standard Library Imports
# Third Party Imports
from kivy.clock import Clock
from kivy.properties import StringProperty
from kivy.uix.screenmanager import Screen, ScreenManager
from kivy.uix.widget import Widget
# Local Imports
from client.source.ui.kv_widgets import ModalPopupButton, SubmissionPopup, FailedSubmissionPopup, ServerShutdownPopup
# ====================================
# CONSTANTS
# ====================================
PORT = 1776
# ====================================
# PARAMETERS
# ====================================
TIME_UNIT = 'MINUTES'
class RootScreen(ScreenManager):
def __init__(self, client_protocol, **kwargs):
super().__init__(**kwargs)
self.client_protocol = client_protocol
class StartScreen(Screen):
def attempt_to_connect(self, server_ip, username, password):
self.parent.client_protocol.start_connection(server_ip, username, password)
self.open_connecting_popup()
self.timeout = 0
self.wait_For_server_response_event = Clock.schedule_interval(self.wait_for_server_response, 1)
def wait_for_server_response(self, *args):
print(self.timeout)
# Login success
if self.parent.client_protocol.login_success:
self.popup.dismiss()
self.wait_For_server_response_event.cancel()
self.parent.current = 'ChatRoomScreen'
# Timeout
elif self.timeout == 5:
self.failed_to_connect(message='Failed to connect to server. Please try again or check your network connection.')
# Invalid credentials
elif self.parent.client_protocol.invalid_credentials:
self.parent.client_protocol.invalid_credentials = False
self.failed_to_connect(message='Invalid username/password combination. Please try again.')
else:
self.timeout += 1
def failed_to_connect(self, message):
print("FAILED TO CONNECT")
self.popup.dismiss()
self.open_failed_popup(message=message)
self.wait_For_server_response_event.cancel()
def open_connecting_popup(self):
self.popup = SubmissionPopup()
self.popup.open()
def open_failed_popup(self, message):
self.popup = FailedSubmissionPopup(message=message)
self.popup.open()
class ChatRoomScreen(Screen):
chat_history = StringProperty('')
user_list = StringProperty('')
def on_enter(self):
Clock.schedule_once(self.schedule_update_display_info)
def schedule_update_display_info(self, *args):
Clock.schedule_interval(self.update_display_info, 1)
def update_user_list_buttons(self):
self.clear_user_list_display()
for user in self.user_list.split("\n"):
button = ModalPopupButton(text=user)
self.ids.user_list.add_widget(button)
self.ids.user_list.add_widget(Widget())
def clear_user_list_display(self):
self.ids.user_list.clear_widgets()
def update_display_info(self, *args):
if self.chat_history != self.parent.client_protocol.chat_history.history_string:
self.chat_history = self.parent.client_protocol.chat_history.history_string
if self.user_list != self.parent.client_protocol.user_list:
print("User List mismatch")
self.user_list = self.parent.client_protocol.user_list
self.update_user_list_buttons()
if self.parent.client_protocol.server_shutdown:
self.server_shutdown()
def next_message_private(self, user):
current_text = self.ids.message.text
self.ids.message.text = ''
current_text = "@{}, ".format(user) + current_text
self.ids.message.text = current_text
def server_shutdown(self):
print("SERVER SHUTDOWN")
self.popup = ServerShutdownPopup()
self.popup.open()
def schedule_clear_input_box(self):
Clock.schedule_once(self.clear_input_box, 0.25)
def clear_input_box(self, *args):
self.ids.message.text = ''
|
normal
|
{
"blob_id": "327e9dcba49419b8a8c320940e333765c1d9b980",
"index": 5997,
"step-1": "<mask token>\n\n\nclass ChatRoomScreen(Screen):\n <mask token>\n <mask token>\n <mask token>\n\n def schedule_update_display_info(self, *args):\n Clock.schedule_interval(self.update_display_info, 1)\n <mask token>\n <mask token>\n\n def update_display_info(self, *args):\n if (self.chat_history != self.parent.client_protocol.chat_history.\n history_string):\n self.chat_history = (self.parent.client_protocol.chat_history.\n history_string)\n if self.user_list != self.parent.client_protocol.user_list:\n print('User List mismatch')\n self.user_list = self.parent.client_protocol.user_list\n self.update_user_list_buttons()\n if self.parent.client_protocol.server_shutdown:\n self.server_shutdown()\n\n def next_message_private(self, user):\n current_text = self.ids.message.text\n self.ids.message.text = ''\n current_text = '@{}, '.format(user) + current_text\n self.ids.message.text = current_text\n\n def server_shutdown(self):\n print('SERVER SHUTDOWN')\n self.popup = ServerShutdownPopup()\n self.popup.open()\n\n def schedule_clear_input_box(self):\n Clock.schedule_once(self.clear_input_box, 0.25)\n\n def clear_input_box(self, *args):\n self.ids.message.text = ''\n",
"step-2": "<mask token>\n\n\nclass StartScreen(Screen):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass ChatRoomScreen(Screen):\n chat_history = StringProperty('')\n user_list = StringProperty('')\n\n def on_enter(self):\n Clock.schedule_once(self.schedule_update_display_info)\n\n def schedule_update_display_info(self, *args):\n Clock.schedule_interval(self.update_display_info, 1)\n\n def update_user_list_buttons(self):\n self.clear_user_list_display()\n for user in self.user_list.split('\\n'):\n button = ModalPopupButton(text=user)\n self.ids.user_list.add_widget(button)\n self.ids.user_list.add_widget(Widget())\n\n def clear_user_list_display(self):\n self.ids.user_list.clear_widgets()\n\n def update_display_info(self, *args):\n if (self.chat_history != self.parent.client_protocol.chat_history.\n history_string):\n self.chat_history = (self.parent.client_protocol.chat_history.\n history_string)\n if self.user_list != self.parent.client_protocol.user_list:\n print('User List mismatch')\n self.user_list = self.parent.client_protocol.user_list\n self.update_user_list_buttons()\n if self.parent.client_protocol.server_shutdown:\n self.server_shutdown()\n\n def next_message_private(self, user):\n current_text = self.ids.message.text\n self.ids.message.text = ''\n current_text = '@{}, '.format(user) + current_text\n self.ids.message.text = current_text\n\n def server_shutdown(self):\n print('SERVER SHUTDOWN')\n self.popup = ServerShutdownPopup()\n self.popup.open()\n\n def schedule_clear_input_box(self):\n Clock.schedule_once(self.clear_input_box, 0.25)\n\n def clear_input_box(self, *args):\n self.ids.message.text = ''\n",
"step-3": "<mask token>\n\n\nclass StartScreen(Screen):\n\n def attempt_to_connect(self, server_ip, username, password):\n self.parent.client_protocol.start_connection(server_ip, username,\n password)\n self.open_connecting_popup()\n self.timeout = 0\n self.wait_For_server_response_event = Clock.schedule_interval(self.\n wait_for_server_response, 1)\n\n def wait_for_server_response(self, *args):\n print(self.timeout)\n if self.parent.client_protocol.login_success:\n self.popup.dismiss()\n self.wait_For_server_response_event.cancel()\n self.parent.current = 'ChatRoomScreen'\n elif self.timeout == 5:\n self.failed_to_connect(message=\n 'Failed to connect to server. Please try again or check your network connection.'\n )\n elif self.parent.client_protocol.invalid_credentials:\n self.parent.client_protocol.invalid_credentials = False\n self.failed_to_connect(message=\n 'Invalid username/password combination. Please try again.')\n else:\n self.timeout += 1\n\n def failed_to_connect(self, message):\n print('FAILED TO CONNECT')\n self.popup.dismiss()\n self.open_failed_popup(message=message)\n self.wait_For_server_response_event.cancel()\n\n def open_connecting_popup(self):\n self.popup = SubmissionPopup()\n self.popup.open()\n\n def open_failed_popup(self, message):\n self.popup = FailedSubmissionPopup(message=message)\n self.popup.open()\n\n\nclass ChatRoomScreen(Screen):\n chat_history = StringProperty('')\n user_list = StringProperty('')\n\n def on_enter(self):\n Clock.schedule_once(self.schedule_update_display_info)\n\n def schedule_update_display_info(self, *args):\n Clock.schedule_interval(self.update_display_info, 1)\n\n def update_user_list_buttons(self):\n self.clear_user_list_display()\n for user in self.user_list.split('\\n'):\n button = ModalPopupButton(text=user)\n self.ids.user_list.add_widget(button)\n self.ids.user_list.add_widget(Widget())\n\n def clear_user_list_display(self):\n self.ids.user_list.clear_widgets()\n\n def update_display_info(self, *args):\n if (self.chat_history != self.parent.client_protocol.chat_history.\n history_string):\n self.chat_history = (self.parent.client_protocol.chat_history.\n history_string)\n if self.user_list != self.parent.client_protocol.user_list:\n print('User List mismatch')\n self.user_list = self.parent.client_protocol.user_list\n self.update_user_list_buttons()\n if self.parent.client_protocol.server_shutdown:\n self.server_shutdown()\n\n def next_message_private(self, user):\n current_text = self.ids.message.text\n self.ids.message.text = ''\n current_text = '@{}, '.format(user) + current_text\n self.ids.message.text = current_text\n\n def server_shutdown(self):\n print('SERVER SHUTDOWN')\n self.popup = ServerShutdownPopup()\n self.popup.open()\n\n def schedule_clear_input_box(self):\n Clock.schedule_once(self.clear_input_box, 0.25)\n\n def clear_input_box(self, *args):\n self.ids.message.text = ''\n",
"step-4": "<mask token>\nPORT = 1776\nTIME_UNIT = 'MINUTES'\n\n\nclass RootScreen(ScreenManager):\n\n def __init__(self, client_protocol, **kwargs):\n super().__init__(**kwargs)\n self.client_protocol = client_protocol\n\n\nclass StartScreen(Screen):\n\n def attempt_to_connect(self, server_ip, username, password):\n self.parent.client_protocol.start_connection(server_ip, username,\n password)\n self.open_connecting_popup()\n self.timeout = 0\n self.wait_For_server_response_event = Clock.schedule_interval(self.\n wait_for_server_response, 1)\n\n def wait_for_server_response(self, *args):\n print(self.timeout)\n if self.parent.client_protocol.login_success:\n self.popup.dismiss()\n self.wait_For_server_response_event.cancel()\n self.parent.current = 'ChatRoomScreen'\n elif self.timeout == 5:\n self.failed_to_connect(message=\n 'Failed to connect to server. Please try again or check your network connection.'\n )\n elif self.parent.client_protocol.invalid_credentials:\n self.parent.client_protocol.invalid_credentials = False\n self.failed_to_connect(message=\n 'Invalid username/password combination. Please try again.')\n else:\n self.timeout += 1\n\n def failed_to_connect(self, message):\n print('FAILED TO CONNECT')\n self.popup.dismiss()\n self.open_failed_popup(message=message)\n self.wait_For_server_response_event.cancel()\n\n def open_connecting_popup(self):\n self.popup = SubmissionPopup()\n self.popup.open()\n\n def open_failed_popup(self, message):\n self.popup = FailedSubmissionPopup(message=message)\n self.popup.open()\n\n\nclass ChatRoomScreen(Screen):\n chat_history = StringProperty('')\n user_list = StringProperty('')\n\n def on_enter(self):\n Clock.schedule_once(self.schedule_update_display_info)\n\n def schedule_update_display_info(self, *args):\n Clock.schedule_interval(self.update_display_info, 1)\n\n def update_user_list_buttons(self):\n self.clear_user_list_display()\n for user in self.user_list.split('\\n'):\n button = ModalPopupButton(text=user)\n self.ids.user_list.add_widget(button)\n self.ids.user_list.add_widget(Widget())\n\n def clear_user_list_display(self):\n self.ids.user_list.clear_widgets()\n\n def update_display_info(self, *args):\n if (self.chat_history != self.parent.client_protocol.chat_history.\n history_string):\n self.chat_history = (self.parent.client_protocol.chat_history.\n history_string)\n if self.user_list != self.parent.client_protocol.user_list:\n print('User List mismatch')\n self.user_list = self.parent.client_protocol.user_list\n self.update_user_list_buttons()\n if self.parent.client_protocol.server_shutdown:\n self.server_shutdown()\n\n def next_message_private(self, user):\n current_text = self.ids.message.text\n self.ids.message.text = ''\n current_text = '@{}, '.format(user) + current_text\n self.ids.message.text = current_text\n\n def server_shutdown(self):\n print('SERVER SHUTDOWN')\n self.popup = ServerShutdownPopup()\n self.popup.open()\n\n def schedule_clear_input_box(self):\n Clock.schedule_once(self.clear_input_box, 0.25)\n\n def clear_input_box(self, *args):\n self.ids.message.text = ''\n",
"step-5": "# Standard Library Imports\n\n# Third Party Imports\nfrom kivy.clock import Clock\nfrom kivy.properties import StringProperty\nfrom kivy.uix.screenmanager import Screen, ScreenManager\nfrom kivy.uix.widget import Widget\n\n# Local Imports\nfrom client.source.ui.kv_widgets import ModalPopupButton, SubmissionPopup, FailedSubmissionPopup, ServerShutdownPopup\n\n# ====================================\n# CONSTANTS\n# ====================================\nPORT = 1776\n\n# ====================================\n# PARAMETERS\n# ====================================\nTIME_UNIT = 'MINUTES'\n\n\nclass RootScreen(ScreenManager):\n def __init__(self, client_protocol, **kwargs):\n super().__init__(**kwargs)\n self.client_protocol = client_protocol\n\n\nclass StartScreen(Screen):\n\n def attempt_to_connect(self, server_ip, username, password):\n self.parent.client_protocol.start_connection(server_ip, username, password)\n self.open_connecting_popup()\n self.timeout = 0\n self.wait_For_server_response_event = Clock.schedule_interval(self.wait_for_server_response, 1)\n\n def wait_for_server_response(self, *args):\n print(self.timeout)\n # Login success\n if self.parent.client_protocol.login_success:\n self.popup.dismiss()\n self.wait_For_server_response_event.cancel()\n self.parent.current = 'ChatRoomScreen'\n # Timeout\n elif self.timeout == 5:\n self.failed_to_connect(message='Failed to connect to server. Please try again or check your network connection.')\n # Invalid credentials\n elif self.parent.client_protocol.invalid_credentials:\n self.parent.client_protocol.invalid_credentials = False\n self.failed_to_connect(message='Invalid username/password combination. Please try again.')\n else:\n self.timeout += 1\n\n def failed_to_connect(self, message):\n print(\"FAILED TO CONNECT\")\n self.popup.dismiss()\n self.open_failed_popup(message=message)\n self.wait_For_server_response_event.cancel()\n\n def open_connecting_popup(self):\n self.popup = SubmissionPopup()\n self.popup.open()\n\n def open_failed_popup(self, message):\n self.popup = FailedSubmissionPopup(message=message)\n self.popup.open()\n\n\nclass ChatRoomScreen(Screen):\n chat_history = StringProperty('')\n user_list = StringProperty('')\n\n def on_enter(self):\n Clock.schedule_once(self.schedule_update_display_info)\n\n def schedule_update_display_info(self, *args):\n Clock.schedule_interval(self.update_display_info, 1)\n\n def update_user_list_buttons(self):\n self.clear_user_list_display()\n for user in self.user_list.split(\"\\n\"):\n button = ModalPopupButton(text=user)\n self.ids.user_list.add_widget(button)\n self.ids.user_list.add_widget(Widget())\n\n def clear_user_list_display(self):\n self.ids.user_list.clear_widgets()\n\n def update_display_info(self, *args):\n if self.chat_history != self.parent.client_protocol.chat_history.history_string:\n self.chat_history = self.parent.client_protocol.chat_history.history_string\n\n if self.user_list != self.parent.client_protocol.user_list:\n print(\"User List mismatch\")\n self.user_list = self.parent.client_protocol.user_list\n self.update_user_list_buttons()\n\n if self.parent.client_protocol.server_shutdown:\n self.server_shutdown()\n\n def next_message_private(self, user):\n current_text = self.ids.message.text\n self.ids.message.text = ''\n current_text = \"@{}, \".format(user) + current_text\n self.ids.message.text = current_text\n\n def server_shutdown(self):\n print(\"SERVER SHUTDOWN\")\n self.popup = ServerShutdownPopup()\n self.popup.open()\n\n def schedule_clear_input_box(self):\n Clock.schedule_once(self.clear_input_box, 0.25)\n\n def clear_input_box(self, *args):\n self.ids.message.text = ''\n",
"step-ids": [
7,
12,
17,
20,
22
]
}
|
[
7,
12,
17,
20,
22
] |
<|reserved_special_token_0|>
def convertImage(imgData):
getI420FromBase64(imgData)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append(os.path.abspath('./models'))
<|reserved_special_token_0|>
def getI420FromBase64(codec):
base64_data = re.sub('^data:image/.+;base64,', '', codec)
byte_data = base64.b64decode(base64_data)
image_data = BytesIO(byte_data)
img = Image.open(image_data)
img.save(OUTPUT)
def convertImage(imgData):
getI420FromBase64(imgData)
@csrf_exempt
def predict(request):
imgData = request.POST.get('img')
convertImage(imgData)
x = Image.open(OUTPUT)
x = x.convert('L')
x = x.resize((32, 32))
x.save(OUTPUT)
x = np.array(x)
x = x.reshape(1, 32, 32, 1)
model, graph = init()
out = model.predict(x)
response = np.array(np.argmax(out, axis=1))
return JsonResponse({'output': str(response[0])})
def index(request):
return render(request, 'index.html', {'imagestr':
'static/hindi_characters/1.png'})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append(os.path.abspath('./models'))
OUTPUT = os.path.join(os.path.dirname(__file__), 'output.png')
<|reserved_special_token_0|>
def getI420FromBase64(codec):
base64_data = re.sub('^data:image/.+;base64,', '', codec)
byte_data = base64.b64decode(base64_data)
image_data = BytesIO(byte_data)
img = Image.open(image_data)
img.save(OUTPUT)
def convertImage(imgData):
getI420FromBase64(imgData)
@csrf_exempt
def predict(request):
imgData = request.POST.get('img')
convertImage(imgData)
x = Image.open(OUTPUT)
x = x.convert('L')
x = x.resize((32, 32))
x.save(OUTPUT)
x = np.array(x)
x = x.reshape(1, 32, 32, 1)
model, graph = init()
out = model.predict(x)
response = np.array(np.argmax(out, axis=1))
return JsonResponse({'output': str(response[0])})
def index(request):
return render(request, 'index.html', {'imagestr':
'static/hindi_characters/1.png'})
<|reserved_special_token_1|>
from django.shortcuts import render
from PIL import Image
from django.views.decorators import csrf
import numpy as np
import re
import sys
import os
from .utils import *
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import base64
sys.path.append(os.path.abspath('./models'))
OUTPUT = os.path.join(os.path.dirname(__file__), 'output.png')
from PIL import Image
from io import BytesIO
def getI420FromBase64(codec):
base64_data = re.sub('^data:image/.+;base64,', '', codec)
byte_data = base64.b64decode(base64_data)
image_data = BytesIO(byte_data)
img = Image.open(image_data)
img.save(OUTPUT)
def convertImage(imgData):
getI420FromBase64(imgData)
@csrf_exempt
def predict(request):
imgData = request.POST.get('img')
convertImage(imgData)
x = Image.open(OUTPUT)
x = x.convert('L')
x = x.resize((32, 32))
x.save(OUTPUT)
x = np.array(x)
x = x.reshape(1, 32, 32, 1)
model, graph = init()
out = model.predict(x)
response = np.array(np.argmax(out, axis=1))
return JsonResponse({'output': str(response[0])})
def index(request):
return render(request, 'index.html', {'imagestr':
'static/hindi_characters/1.png'})
<|reserved_special_token_1|>
from django.shortcuts import render
from PIL import Image
from django.views.decorators import csrf
import numpy as np
import re
import sys
import os
from .utils import *
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import base64
sys.path.append(os.path.abspath("./models"))
OUTPUT = os.path.join(os.path.dirname(__file__), 'output.png')
from PIL import Image
from io import BytesIO
def getI420FromBase64(codec):
base64_data = re.sub('^data:image/.+;base64,', '', codec)
byte_data = base64.b64decode(base64_data)
image_data = BytesIO(byte_data)
img = Image.open(image_data)
img.save(OUTPUT)
def convertImage(imgData):
getI420FromBase64(imgData)
@csrf_exempt
def predict(request):
imgData = request.POST.get('img')
convertImage(imgData)
x = Image.open(OUTPUT)
x = x.convert('L')
x = x.resize((32,32))
x.save(OUTPUT)
x = np.array(x)
x = x.reshape(1,32,32,1)
model, graph = init()
out = model.predict(x)
response = np.array(np.argmax(out, axis=1))
return JsonResponse({"output": str(response[0]) })
def index(request):
return render(request, 'index.html', { "imagestr" : "static/hindi_characters/1.png"})
|
flexible
|
{
"blob_id": "b84b3206e87176feee2c39fc0866ada994c9ac7a",
"index": 8655,
"step-1": "<mask token>\n\n\ndef convertImage(imgData):\n getI420FromBase64(imgData)\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append(os.path.abspath('./models'))\n<mask token>\n\n\ndef getI420FromBase64(codec):\n base64_data = re.sub('^data:image/.+;base64,', '', codec)\n byte_data = base64.b64decode(base64_data)\n image_data = BytesIO(byte_data)\n img = Image.open(image_data)\n img.save(OUTPUT)\n\n\ndef convertImage(imgData):\n getI420FromBase64(imgData)\n\n\n@csrf_exempt\ndef predict(request):\n imgData = request.POST.get('img')\n convertImage(imgData)\n x = Image.open(OUTPUT)\n x = x.convert('L')\n x = x.resize((32, 32))\n x.save(OUTPUT)\n x = np.array(x)\n x = x.reshape(1, 32, 32, 1)\n model, graph = init()\n out = model.predict(x)\n response = np.array(np.argmax(out, axis=1))\n return JsonResponse({'output': str(response[0])})\n\n\ndef index(request):\n return render(request, 'index.html', {'imagestr':\n 'static/hindi_characters/1.png'})\n",
"step-3": "<mask token>\nsys.path.append(os.path.abspath('./models'))\nOUTPUT = os.path.join(os.path.dirname(__file__), 'output.png')\n<mask token>\n\n\ndef getI420FromBase64(codec):\n base64_data = re.sub('^data:image/.+;base64,', '', codec)\n byte_data = base64.b64decode(base64_data)\n image_data = BytesIO(byte_data)\n img = Image.open(image_data)\n img.save(OUTPUT)\n\n\ndef convertImage(imgData):\n getI420FromBase64(imgData)\n\n\n@csrf_exempt\ndef predict(request):\n imgData = request.POST.get('img')\n convertImage(imgData)\n x = Image.open(OUTPUT)\n x = x.convert('L')\n x = x.resize((32, 32))\n x.save(OUTPUT)\n x = np.array(x)\n x = x.reshape(1, 32, 32, 1)\n model, graph = init()\n out = model.predict(x)\n response = np.array(np.argmax(out, axis=1))\n return JsonResponse({'output': str(response[0])})\n\n\ndef index(request):\n return render(request, 'index.html', {'imagestr':\n 'static/hindi_characters/1.png'})\n",
"step-4": "from django.shortcuts import render\nfrom PIL import Image\nfrom django.views.decorators import csrf\nimport numpy as np\nimport re\nimport sys\nimport os\nfrom .utils import *\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nimport base64\nsys.path.append(os.path.abspath('./models'))\nOUTPUT = os.path.join(os.path.dirname(__file__), 'output.png')\nfrom PIL import Image\nfrom io import BytesIO\n\n\ndef getI420FromBase64(codec):\n base64_data = re.sub('^data:image/.+;base64,', '', codec)\n byte_data = base64.b64decode(base64_data)\n image_data = BytesIO(byte_data)\n img = Image.open(image_data)\n img.save(OUTPUT)\n\n\ndef convertImage(imgData):\n getI420FromBase64(imgData)\n\n\n@csrf_exempt\ndef predict(request):\n imgData = request.POST.get('img')\n convertImage(imgData)\n x = Image.open(OUTPUT)\n x = x.convert('L')\n x = x.resize((32, 32))\n x.save(OUTPUT)\n x = np.array(x)\n x = x.reshape(1, 32, 32, 1)\n model, graph = init()\n out = model.predict(x)\n response = np.array(np.argmax(out, axis=1))\n return JsonResponse({'output': str(response[0])})\n\n\ndef index(request):\n return render(request, 'index.html', {'imagestr':\n 'static/hindi_characters/1.png'})\n",
"step-5": "from django.shortcuts import render\nfrom PIL import Image\nfrom django.views.decorators import csrf\nimport numpy as np\nimport re\nimport sys\nimport os\nfrom .utils import *\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nimport base64\nsys.path.append(os.path.abspath(\"./models\"))\nOUTPUT = os.path.join(os.path.dirname(__file__), 'output.png')\nfrom PIL import Image\nfrom io import BytesIO\ndef getI420FromBase64(codec):\n base64_data = re.sub('^data:image/.+;base64,', '', codec)\n byte_data = base64.b64decode(base64_data)\n image_data = BytesIO(byte_data)\n img = Image.open(image_data)\n img.save(OUTPUT)\n\n\ndef convertImage(imgData):\n getI420FromBase64(imgData)\n\n@csrf_exempt\ndef predict(request):\n imgData = request.POST.get('img')\n convertImage(imgData)\n x = Image.open(OUTPUT)\n x = x.convert('L')\n x = x.resize((32,32))\n x.save(OUTPUT)\n x = np.array(x)\n x = x.reshape(1,32,32,1)\n model, graph = init()\n out = model.predict(x)\n response = np.array(np.argmax(out, axis=1))\n return JsonResponse({\"output\": str(response[0]) })\n\n\ndef index(request):\n return render(request, 'index.html', { \"imagestr\" : \"static/hindi_characters/1.png\"})\n",
"step-ids": [
1,
5,
6,
7,
8
]
}
|
[
1,
5,
6,
7,
8
] |
#!/usr/bin/python3
import tkinter
from PIL import Image, ImageTk
import requests
from io import BytesIO
from threading import Timer
rootWindow = tkinter.Tk()
# the following makes the program full-screen
RWidth = rootWindow.winfo_screenwidth()
RHeight = rootWindow.winfo_screenheight()
#
rootWindow.overrideredirect(True) # without a close option
rootWindow.geometry(("%dx%d")%(RWidth,RHeight))
cameraURL01="http://209.251.247.251:82/cgi-bin/camera?resolution=640&amp;quality=1&amp;Language=0&amp;1507301122"
cameraURL02="http://108.209.209.13/webcapture.jpg?command=snap&channel=1?1507300788"
cameraURL03="http://72.81.132.14:60001/SnapshotJPEG?Resolution=640x480&amp;Quality=Clarity&amp;1507300872"
cameraURL04="http://24.98.52.12:8082/cgi-bin/viewer/video.jpg?r=1507300889"
cameraURL05="http://80.24.185.230:86/cgi-bin/camera?resolution=640&amp;quality=1&amp;Language=0&amp;1515078226"
cameraURL06="http://24.23.232.13:50001/cgi-bin/camera?resolution=640&amp;quality=1&amp;Language=0&amp;1507300932"
cameraURL07="http://80.24.185.230:81/cgi-bin/camera?resolution=640&amp;quality=1&amp;Language=0&amp;1515078327"
cameraURL08="http://80.24.185.230:82/cgi-bin/camera?resolution=640&amp;quality=1&amp;Language=0&amp;1515078336"
cameraURL09="http://63.172.41.245/webcapture.jpg?command=snap&channel=1?1508162812"
image01_label = tkinter.Label()
image02_label = tkinter.Label()
image03_label = tkinter.Label()
image04_label = tkinter.Label()
image05_label = tkinter.Label()
image06_label = tkinter.Label()
image07_label = tkinter.Label()
image08_label = tkinter.Label()
image09_label = tkinter.Label()
image01_label.grid(row=0, column=0)
image02_label.grid(row=0, column=1)
image03_label.grid(row=0, column=2)
image04_label.grid(row=1, column=0)
image05_label.grid(row=1, column=1)
image06_label.grid(row=1, column=2)
image07_label.grid(row=2, column=0)
image08_label.grid(row=2, column=1)
image09_label.grid(row=2, column=2)
def main():
rootWindow.bind('<Escape>', close)
Timer(0.1, refreshCam01).start()
Timer(0.1, refreshCam02).start()
Timer(0.1, refreshCam03).start()
Timer(0.1, refreshCam04).start()
Timer(0.1, refreshCam05).start()
Timer(0.1, refreshCam06).start()
Timer(0.1, refreshCam07).start()
Timer(0.1, refreshCam08).start()
Timer(0.1, refreshCam09).start()
def URL2PhotoImage(URL):
return ImageTk.PhotoImage(Image.open(BytesIO(requests.get(URL, timeout=4).content)).resize((int(RWidth/3),int(RHeight/3)), Image.ANTIALIAS))
def refreshCam01():
try:
tmp_photo = URL2PhotoImage(cameraURL01)
image01_label.configure(image=tmp_photo)
image01_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection
except:
pass
if rootWindow.state() == 'normal': Timer(0.05, refreshCam01).start()
def refreshCam02():
try:
tmp_photo = URL2PhotoImage(cameraURL02)
image02_label.configure(image=tmp_photo)
image02_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection
except:
pass
if rootWindow.state() == 'normal': Timer(0.05, refreshCam02).start()
def refreshCam03():
try:
tmp_photo = URL2PhotoImage(cameraURL03)
image03_label.configure(image=tmp_photo)
image03_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection
except:
pass
if rootWindow.state() == 'normal': Timer(0.05, refreshCam03).start()
def refreshCam04():
try:
tmp_photo = URL2PhotoImage(cameraURL04)
image04_label.configure(image=tmp_photo)
image04_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection
except:
pass
if rootWindow.state() == 'normal': Timer(0.05, refreshCam04).start()
def refreshCam05():
try:
tmp_photo = URL2PhotoImage(cameraURL05)
image05_label.configure(image=tmp_photo)
image05_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection
except:
pass
if rootWindow.state() == 'normal': Timer(0.05, refreshCam05).start()
def refreshCam06():
try:
tmp_photo = URL2PhotoImage(cameraURL06)
image06_label.configure(image=tmp_photo)
image06_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection
except:
pass
if rootWindow.state() == 'normal': Timer(0.05, refreshCam06).start()
def refreshCam07():
try:
tmp_photo = URL2PhotoImage(cameraURL07)
image07_label.configure(image=tmp_photo)
image07_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection
except:
pass
if rootWindow.state() == 'normal': Timer(0.05, refreshCam07).start()
def refreshCam08():
try:
tmp_photo = URL2PhotoImage(cameraURL08)
image08_label.configure(image=tmp_photo)
image08_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection
except:
pass
if rootWindow.state() == 'normal': Timer(0.05, refreshCam08).start()
def refreshCam09():
try:
tmp_photo = URL2PhotoImage(cameraURL09)
image09_label.configure(image=tmp_photo)
image09_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection
except:
pass
if rootWindow.state() == 'normal': Timer(0.05, refreshCam09).start()
def close(event=None):
rootWindow.quit()
# start the subprocess, main loop, and gui
if __name__ == '__main__':
main()
rootWindow.mainloop()
|
normal
|
{
"blob_id": "be63e8e6e98c9afed66cae033a7f41f1be1561a8",
"index": 8077,
"step-1": "<mask token>\n\n\ndef refreshCam03():\n try:\n tmp_photo = URL2PhotoImage(cameraURL03)\n image03_label.configure(image=tmp_photo)\n image03_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam03).start()\n\n\n<mask token>\n\n\ndef refreshCam05():\n try:\n tmp_photo = URL2PhotoImage(cameraURL05)\n image05_label.configure(image=tmp_photo)\n image05_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam05).start()\n\n\n<mask token>\n\n\ndef close(event=None):\n rootWindow.quit()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n rootWindow.bind('<Escape>', close)\n Timer(0.1, refreshCam01).start()\n Timer(0.1, refreshCam02).start()\n Timer(0.1, refreshCam03).start()\n Timer(0.1, refreshCam04).start()\n Timer(0.1, refreshCam05).start()\n Timer(0.1, refreshCam06).start()\n Timer(0.1, refreshCam07).start()\n Timer(0.1, refreshCam08).start()\n Timer(0.1, refreshCam09).start()\n\n\ndef URL2PhotoImage(URL):\n return ImageTk.PhotoImage(Image.open(BytesIO(requests.get(URL, timeout=\n 4).content)).resize((int(RWidth / 3), int(RHeight / 3)), Image.\n ANTIALIAS))\n\n\n<mask token>\n\n\ndef refreshCam02():\n try:\n tmp_photo = URL2PhotoImage(cameraURL02)\n image02_label.configure(image=tmp_photo)\n image02_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam02).start()\n\n\ndef refreshCam03():\n try:\n tmp_photo = URL2PhotoImage(cameraURL03)\n image03_label.configure(image=tmp_photo)\n image03_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam03).start()\n\n\ndef refreshCam04():\n try:\n tmp_photo = URL2PhotoImage(cameraURL04)\n image04_label.configure(image=tmp_photo)\n image04_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam04).start()\n\n\ndef refreshCam05():\n try:\n tmp_photo = URL2PhotoImage(cameraURL05)\n image05_label.configure(image=tmp_photo)\n image05_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam05).start()\n\n\n<mask token>\n\n\ndef refreshCam07():\n try:\n tmp_photo = URL2PhotoImage(cameraURL07)\n image07_label.configure(image=tmp_photo)\n image07_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam07).start()\n\n\ndef refreshCam08():\n try:\n tmp_photo = URL2PhotoImage(cameraURL08)\n image08_label.configure(image=tmp_photo)\n image08_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam08).start()\n\n\ndef refreshCam09():\n try:\n tmp_photo = URL2PhotoImage(cameraURL09)\n image09_label.configure(image=tmp_photo)\n image09_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam09).start()\n\n\ndef close(event=None):\n rootWindow.quit()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n rootWindow.bind('<Escape>', close)\n Timer(0.1, refreshCam01).start()\n Timer(0.1, refreshCam02).start()\n Timer(0.1, refreshCam03).start()\n Timer(0.1, refreshCam04).start()\n Timer(0.1, refreshCam05).start()\n Timer(0.1, refreshCam06).start()\n Timer(0.1, refreshCam07).start()\n Timer(0.1, refreshCam08).start()\n Timer(0.1, refreshCam09).start()\n\n\ndef URL2PhotoImage(URL):\n return ImageTk.PhotoImage(Image.open(BytesIO(requests.get(URL, timeout=\n 4).content)).resize((int(RWidth / 3), int(RHeight / 3)), Image.\n ANTIALIAS))\n\n\ndef refreshCam01():\n try:\n tmp_photo = URL2PhotoImage(cameraURL01)\n image01_label.configure(image=tmp_photo)\n image01_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam01).start()\n\n\ndef refreshCam02():\n try:\n tmp_photo = URL2PhotoImage(cameraURL02)\n image02_label.configure(image=tmp_photo)\n image02_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam02).start()\n\n\ndef refreshCam03():\n try:\n tmp_photo = URL2PhotoImage(cameraURL03)\n image03_label.configure(image=tmp_photo)\n image03_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam03).start()\n\n\ndef refreshCam04():\n try:\n tmp_photo = URL2PhotoImage(cameraURL04)\n image04_label.configure(image=tmp_photo)\n image04_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam04).start()\n\n\ndef refreshCam05():\n try:\n tmp_photo = URL2PhotoImage(cameraURL05)\n image05_label.configure(image=tmp_photo)\n image05_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam05).start()\n\n\n<mask token>\n\n\ndef refreshCam07():\n try:\n tmp_photo = URL2PhotoImage(cameraURL07)\n image07_label.configure(image=tmp_photo)\n image07_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam07).start()\n\n\ndef refreshCam08():\n try:\n tmp_photo = URL2PhotoImage(cameraURL08)\n image08_label.configure(image=tmp_photo)\n image08_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam08).start()\n\n\ndef refreshCam09():\n try:\n tmp_photo = URL2PhotoImage(cameraURL09)\n image09_label.configure(image=tmp_photo)\n image09_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam09).start()\n\n\ndef close(event=None):\n rootWindow.quit()\n\n\n<mask token>\n",
"step-4": "<mask token>\nrootWindow = tkinter.Tk()\nRWidth = rootWindow.winfo_screenwidth()\nRHeight = rootWindow.winfo_screenheight()\nrootWindow.overrideredirect(True)\nrootWindow.geometry('%dx%d' % (RWidth, RHeight))\ncameraURL01 = (\n 'http://209.251.247.251:82/cgi-bin/camera?resolution=640&amp;quality=1&amp;Language=0&amp;1507301122'\n )\ncameraURL02 = (\n 'http://108.209.209.13/webcapture.jpg?command=snap&channel=1?1507300788'\n )\ncameraURL03 = (\n 'http://72.81.132.14:60001/SnapshotJPEG?Resolution=640x480&amp;Quality=Clarity&amp;1507300872'\n )\ncameraURL04 = 'http://24.98.52.12:8082/cgi-bin/viewer/video.jpg?r=1507300889'\ncameraURL05 = (\n 'http://80.24.185.230:86/cgi-bin/camera?resolution=640&amp;quality=1&amp;Language=0&amp;1515078226'\n )\ncameraURL06 = (\n 'http://24.23.232.13:50001/cgi-bin/camera?resolution=640&amp;quality=1&amp;Language=0&amp;1507300932'\n )\ncameraURL07 = (\n 'http://80.24.185.230:81/cgi-bin/camera?resolution=640&amp;quality=1&amp;Language=0&amp;1515078327'\n )\ncameraURL08 = (\n 'http://80.24.185.230:82/cgi-bin/camera?resolution=640&amp;quality=1&amp;Language=0&amp;1515078336'\n )\ncameraURL09 = (\n 'http://63.172.41.245/webcapture.jpg?command=snap&channel=1?1508162812'\n )\nimage01_label = tkinter.Label()\nimage02_label = tkinter.Label()\nimage03_label = tkinter.Label()\nimage04_label = tkinter.Label()\nimage05_label = tkinter.Label()\nimage06_label = tkinter.Label()\nimage07_label = tkinter.Label()\nimage08_label = tkinter.Label()\nimage09_label = tkinter.Label()\nimage01_label.grid(row=0, column=0)\nimage02_label.grid(row=0, column=1)\nimage03_label.grid(row=0, column=2)\nimage04_label.grid(row=1, column=0)\nimage05_label.grid(row=1, column=1)\nimage06_label.grid(row=1, column=2)\nimage07_label.grid(row=2, column=0)\nimage08_label.grid(row=2, column=1)\nimage09_label.grid(row=2, column=2)\n\n\ndef main():\n rootWindow.bind('<Escape>', close)\n Timer(0.1, refreshCam01).start()\n Timer(0.1, refreshCam02).start()\n Timer(0.1, refreshCam03).start()\n Timer(0.1, refreshCam04).start()\n Timer(0.1, refreshCam05).start()\n Timer(0.1, refreshCam06).start()\n Timer(0.1, refreshCam07).start()\n Timer(0.1, refreshCam08).start()\n Timer(0.1, refreshCam09).start()\n\n\ndef URL2PhotoImage(URL):\n return ImageTk.PhotoImage(Image.open(BytesIO(requests.get(URL, timeout=\n 4).content)).resize((int(RWidth / 3), int(RHeight / 3)), Image.\n ANTIALIAS))\n\n\ndef refreshCam01():\n try:\n tmp_photo = URL2PhotoImage(cameraURL01)\n image01_label.configure(image=tmp_photo)\n image01_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam01).start()\n\n\ndef refreshCam02():\n try:\n tmp_photo = URL2PhotoImage(cameraURL02)\n image02_label.configure(image=tmp_photo)\n image02_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam02).start()\n\n\ndef refreshCam03():\n try:\n tmp_photo = URL2PhotoImage(cameraURL03)\n image03_label.configure(image=tmp_photo)\n image03_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam03).start()\n\n\ndef refreshCam04():\n try:\n tmp_photo = URL2PhotoImage(cameraURL04)\n image04_label.configure(image=tmp_photo)\n image04_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam04).start()\n\n\ndef refreshCam05():\n try:\n tmp_photo = URL2PhotoImage(cameraURL05)\n image05_label.configure(image=tmp_photo)\n image05_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam05).start()\n\n\ndef refreshCam06():\n try:\n tmp_photo = URL2PhotoImage(cameraURL06)\n image06_label.configure(image=tmp_photo)\n image06_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam06).start()\n\n\ndef refreshCam07():\n try:\n tmp_photo = URL2PhotoImage(cameraURL07)\n image07_label.configure(image=tmp_photo)\n image07_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam07).start()\n\n\ndef refreshCam08():\n try:\n tmp_photo = URL2PhotoImage(cameraURL08)\n image08_label.configure(image=tmp_photo)\n image08_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam08).start()\n\n\ndef refreshCam09():\n try:\n tmp_photo = URL2PhotoImage(cameraURL09)\n image09_label.configure(image=tmp_photo)\n image09_label.image = tmp_photo\n except:\n pass\n if rootWindow.state() == 'normal':\n Timer(0.05, refreshCam09).start()\n\n\ndef close(event=None):\n rootWindow.quit()\n\n\nif __name__ == '__main__':\n main()\n rootWindow.mainloop()\n",
"step-5": "#!/usr/bin/python3\n\nimport tkinter\nfrom PIL import Image, ImageTk\nimport requests\nfrom io import BytesIO\nfrom threading import Timer\n\n\nrootWindow = tkinter.Tk()\n\n# the following makes the program full-screen\nRWidth = rootWindow.winfo_screenwidth()\nRHeight = rootWindow.winfo_screenheight()\n#\nrootWindow.overrideredirect(True)\t# without a close option\nrootWindow.geometry((\"%dx%d\")%(RWidth,RHeight))\n\ncameraURL01=\"http://209.251.247.251:82/cgi-bin/camera?resolution=640&amp;quality=1&amp;Language=0&amp;1507301122\"\ncameraURL02=\"http://108.209.209.13/webcapture.jpg?command=snap&channel=1?1507300788\"\ncameraURL03=\"http://72.81.132.14:60001/SnapshotJPEG?Resolution=640x480&amp;Quality=Clarity&amp;1507300872\"\ncameraURL04=\"http://24.98.52.12:8082/cgi-bin/viewer/video.jpg?r=1507300889\"\ncameraURL05=\"http://80.24.185.230:86/cgi-bin/camera?resolution=640&amp;quality=1&amp;Language=0&amp;1515078226\"\ncameraURL06=\"http://24.23.232.13:50001/cgi-bin/camera?resolution=640&amp;quality=1&amp;Language=0&amp;1507300932\"\ncameraURL07=\"http://80.24.185.230:81/cgi-bin/camera?resolution=640&amp;quality=1&amp;Language=0&amp;1515078327\"\ncameraURL08=\"http://80.24.185.230:82/cgi-bin/camera?resolution=640&amp;quality=1&amp;Language=0&amp;1515078336\"\ncameraURL09=\"http://63.172.41.245/webcapture.jpg?command=snap&channel=1?1508162812\"\n\n\nimage01_label = tkinter.Label()\nimage02_label = tkinter.Label()\nimage03_label = tkinter.Label()\nimage04_label = tkinter.Label()\nimage05_label = tkinter.Label()\nimage06_label = tkinter.Label()\nimage07_label = tkinter.Label()\nimage08_label = tkinter.Label()\nimage09_label = tkinter.Label()\nimage01_label.grid(row=0, column=0)\nimage02_label.grid(row=0, column=1)\nimage03_label.grid(row=0, column=2)\nimage04_label.grid(row=1, column=0)\nimage05_label.grid(row=1, column=1)\nimage06_label.grid(row=1, column=2)\nimage07_label.grid(row=2, column=0)\nimage08_label.grid(row=2, column=1)\nimage09_label.grid(row=2, column=2)\n\n\t\ndef main():\n\trootWindow.bind('<Escape>', close)\n\tTimer(0.1, refreshCam01).start()\n\tTimer(0.1, refreshCam02).start()\n\tTimer(0.1, refreshCam03).start()\n\tTimer(0.1, refreshCam04).start()\n\tTimer(0.1, refreshCam05).start()\n\tTimer(0.1, refreshCam06).start()\n\tTimer(0.1, refreshCam07).start()\n\tTimer(0.1, refreshCam08).start()\n\tTimer(0.1, refreshCam09).start()\n\n\ndef URL2PhotoImage(URL):\n\treturn ImageTk.PhotoImage(Image.open(BytesIO(requests.get(URL, timeout=4).content)).resize((int(RWidth/3),int(RHeight/3)), Image.ANTIALIAS))\n\t\ndef refreshCam01():\n\ttry:\n\t\ttmp_photo = URL2PhotoImage(cameraURL01)\n\t\timage01_label.configure(image=tmp_photo)\n\t\timage01_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection\n\texcept:\n\t\tpass\n\tif rootWindow.state() == 'normal': Timer(0.05, refreshCam01).start()\n\ndef refreshCam02():\n\ttry:\n\t\ttmp_photo = URL2PhotoImage(cameraURL02)\n\t\timage02_label.configure(image=tmp_photo)\n\t\timage02_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection\n\texcept:\n\t\tpass\n\tif rootWindow.state() == 'normal': Timer(0.05, refreshCam02).start()\n\t\ndef refreshCam03():\n\ttry:\n\t\ttmp_photo = URL2PhotoImage(cameraURL03)\n\t\timage03_label.configure(image=tmp_photo)\n\t\timage03_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection\n\texcept:\n\t\tpass\n\tif rootWindow.state() == 'normal': Timer(0.05, refreshCam03).start()\n\t\ndef refreshCam04():\n\ttry:\n\t\ttmp_photo = URL2PhotoImage(cameraURL04)\n\t\timage04_label.configure(image=tmp_photo)\n\t\timage04_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection\n\texcept:\n\t\tpass\n\tif rootWindow.state() == 'normal': Timer(0.05, refreshCam04).start()\n\t\ndef refreshCam05():\n\ttry:\n\t\ttmp_photo = URL2PhotoImage(cameraURL05)\n\t\timage05_label.configure(image=tmp_photo)\n\t\timage05_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection\n\texcept:\n\t\tpass\n\tif rootWindow.state() == 'normal': Timer(0.05, refreshCam05).start()\n\t\ndef refreshCam06():\n\ttry:\n\t\ttmp_photo = URL2PhotoImage(cameraURL06)\n\t\timage06_label.configure(image=tmp_photo)\n\t\timage06_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection\n\texcept:\n\t\tpass\n\tif rootWindow.state() == 'normal': Timer(0.05, refreshCam06).start()\n\t\ndef refreshCam07():\n\ttry:\n\t\ttmp_photo = URL2PhotoImage(cameraURL07)\n\t\timage07_label.configure(image=tmp_photo)\n\t\timage07_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection\n\texcept:\n\t\tpass\n\tif rootWindow.state() == 'normal': Timer(0.05, refreshCam07).start()\n\t\ndef refreshCam08():\n\ttry:\n\t\ttmp_photo = URL2PhotoImage(cameraURL08)\n\t\timage08_label.configure(image=tmp_photo)\n\t\timage08_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection\n\texcept:\n\t\tpass\n\tif rootWindow.state() == 'normal': Timer(0.05, refreshCam08).start()\n\t\ndef refreshCam09():\n\ttry:\n\t\ttmp_photo = URL2PhotoImage(cameraURL09)\n\t\timage09_label.configure(image=tmp_photo)\n\t\timage09_label.image = tmp_photo # keep a reference to prevent tkinter garbage collection\n\texcept:\n\t\tpass\n\tif rootWindow.state() == 'normal': Timer(0.05, refreshCam09).start()\n\ndef close(event=None):\n\trootWindow.quit()\n\n# start the subprocess, main loop, and gui\nif __name__ == '__main__':\n\tmain()\n\trootWindow.mainloop()\n\t\n",
"step-ids": [
3,
10,
11,
14,
16
]
}
|
[
3,
10,
11,
14,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
gerar_senha()
<|reserved_special_token_1|>
from gerador_senha import gerar_senha
gerar_senha()
|
flexible
|
{
"blob_id": "e81da535408cc36655328b37ca99b4f775f3a78e",
"index": 8435,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ngerar_senha()\n",
"step-3": "from gerador_senha import gerar_senha\ngerar_senha()\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python
import os
import shutil
import glob
import re
import subprocess
list = glob.glob("*en.mrc")
for en in list:
ef = re.sub("en","ef",en)
efAli = re.sub("en","efAli",en)
cmd='proc2d %s %s_filt.mrc apix=1.501 lp=20' %(ef,ef[:-4])
subprocess.Popen(cmd,shell=True).wait()
cmd="alignhuge %s_filt.mrc %s %s" %(ef[:-4],en,efAli)
subprocess.Popen(cmd,shell=True).wait()
cmd='rm %s_filt.mrc %s' %(ef[:-4],efAli)
subprocess.Popen(cmd,shell=True).wait()
|
normal
|
{
"blob_id": "e5cc556d4258ef5c85f7bc5149cdd33471493bdb",
"index": 1972,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor en in list:\n ef = re.sub('en', 'ef', en)\n efAli = re.sub('en', 'efAli', en)\n cmd = 'proc2d %s %s_filt.mrc apix=1.501 lp=20' % (ef, ef[:-4])\n subprocess.Popen(cmd, shell=True).wait()\n cmd = 'alignhuge %s_filt.mrc %s %s' % (ef[:-4], en, efAli)\n subprocess.Popen(cmd, shell=True).wait()\n cmd = 'rm %s_filt.mrc %s' % (ef[:-4], efAli)\n subprocess.Popen(cmd, shell=True).wait()\n",
"step-3": "<mask token>\nlist = glob.glob('*en.mrc')\nfor en in list:\n ef = re.sub('en', 'ef', en)\n efAli = re.sub('en', 'efAli', en)\n cmd = 'proc2d %s %s_filt.mrc apix=1.501 lp=20' % (ef, ef[:-4])\n subprocess.Popen(cmd, shell=True).wait()\n cmd = 'alignhuge %s_filt.mrc %s %s' % (ef[:-4], en, efAli)\n subprocess.Popen(cmd, shell=True).wait()\n cmd = 'rm %s_filt.mrc %s' % (ef[:-4], efAli)\n subprocess.Popen(cmd, shell=True).wait()\n",
"step-4": "import os\nimport shutil\nimport glob\nimport re\nimport subprocess\nlist = glob.glob('*en.mrc')\nfor en in list:\n ef = re.sub('en', 'ef', en)\n efAli = re.sub('en', 'efAli', en)\n cmd = 'proc2d %s %s_filt.mrc apix=1.501 lp=20' % (ef, ef[:-4])\n subprocess.Popen(cmd, shell=True).wait()\n cmd = 'alignhuge %s_filt.mrc %s %s' % (ef[:-4], en, efAli)\n subprocess.Popen(cmd, shell=True).wait()\n cmd = 'rm %s_filt.mrc %s' % (ef[:-4], efAli)\n subprocess.Popen(cmd, shell=True).wait()\n",
"step-5": "#!/usr/bin/env python\n\nimport os\nimport shutil\nimport glob\nimport re\nimport subprocess\n\nlist = glob.glob(\"*en.mrc\")\n\nfor en in list:\n\t\n\tef = re.sub(\"en\",\"ef\",en)\n\tefAli = re.sub(\"en\",\"efAli\",en)\n\n\tcmd='proc2d %s %s_filt.mrc apix=1.501 lp=20' %(ef,ef[:-4])\n\tsubprocess.Popen(cmd,shell=True).wait()\n\n\tcmd=\"alignhuge %s_filt.mrc %s %s\" %(ef[:-4],en,efAli)\n\tsubprocess.Popen(cmd,shell=True).wait()\n\n\tcmd='rm %s_filt.mrc %s' %(ef[:-4],efAli)\n\tsubprocess.Popen(cmd,shell=True).wait()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def append_log(log, message):
f = open(log, 'a+')
today = datetime.now()
f.write('%s %s \n' % (today.strftime('%Y-%m-%d %H:%M:%S'), message))
f.close()
def get_root_pass():
with open('/root/.my.cnf') as fp:
lines = fp.read().splitlines()
for line in lines:
grep = re.findall('password', line)
if grep:
pwrd = line.split('"')[1]
return pwrd
<|reserved_special_token_0|>
def backup_db(argv):
data = get_db_name(argv)
db_name = data[1]
try:
sqldir = '/home/kusanagi/' + argv + '/sql_backup/'
p = pathlib.Path(sqldir)
if not p.exists():
p.mkdir(mode=493, parents=True, exist_ok=True)
shutil.chown(sqldir, 'kusanagi', 'kusanagi')
except BaseException as error:
print(error)
pwrd = get_root_pass()
log = '/home/kusanagi/' + argv + '/log/backup.log'
mess = 'Backed up database ' + db_name
append_log(log, mess)
cmd = ('mysqldump --single-transaction -p' + pwrd + ' --databases ' +
db_name + ' | gzip > ' + sqldir + db_name + '.sql.gz')
execute_outputfile(cmd, log)
<|reserved_special_token_0|>
def compress_provision_dir(argv, chdir=''):
date = datetime.now()
today = date.strftime('%Y-%m-%d')
if chdir:
tarname = chdir + argv + '.' + today
else:
tarname = '/home/kusanagi/backup/' + argv + '.' + today
source_dir = '/home/kusanagi/' + argv
shutil.make_archive(tarname, 'gztar', source_dir)
return tarname
def local_backup(argv):
append_log('/home/kusanagi/' + argv + '/log/backup.log', '--- Local backup'
)
backup_db(argv)
tarname = compress_provision_dir(argv)
tar_file = pathlib.Path(tarname + '.tar.gz')
if tar_file.exists():
update_backup_record(argv, 0, 1)
else:
update_backup_record(argv, 0, 0)
def check_ssh_conn(argv, remote_user, remote_host, remote_port, remote_pass):
cmd = ('sshpass -p "' + remote_pass +
'" ssh -o StrictHostKeyChecking=no -p ' + remote_port + ' -q ' +
remote_user + '@' + remote_host + ' exit;echo $?')
res = execute(cmd)
log = '/home/kusanagi/' + argv + '/log/backup.log'
if int(res) == 0:
pass
else:
append_log(log, 'Remote connection failed. Can not issue remote backup'
)
update_backup_record(argv, 1, 0)
sys.exit(1)
def remote_backup(argv, remote_user, remote_host, remote_port, remote_pass,
remote_dest):
log = '/home/kusanagi/' + argv + '/log/backup.log'
append_log(log, '--- Remote backup')
check_ssh_conn(argv, remote_user, remote_host, remote_port, remote_pass)
backup_db(argv)
tarname = compress_provision_dir(argv, '/home/kusanagi/')
conf_ssh = '/etc/ssh/ssh_config'
with open(conf_ssh) as fp:
lines = fp.read().splitlines()
for line in lines:
grep = re.findall(remote_host, line)
if grep:
break
if not grep:
f = open(conf_ssh, 'a+')
f.write('Host %s\n\tStrictHostKeyChecking no\n' % remote_host)
f.close()
cmd = ('sshpass -p "' + remote_pass +
'" rsync --remove-source-files -azhe \'ssh -p' + remote_port + "' " +
tarname + '.tar.gz ' + remote_user + '@' + remote_host + ':' +
remote_dest + ' 2>> ' + log + ' ; echo $?')
res = execute(cmd)
if int(res) == 0:
update_backup_record(argv, 1, 1)
else:
update_backup_record(argv, 1, 0)
def drive_backup(argv, drive_dir):
log = '/home/kusanagi/' + argv + '/log/backup.log'
append_log(log, '--- Backup to Google Drive')
backup_db(argv)
tarname = compress_provision_dir(argv, '/home/kusanagi/')
cmd = ('rclone copy ' + tarname + '.tar.gz GGD1:' + drive_dir + ' 2>> ' +
log + ' ; echo $?')
res = execute(cmd)
if int(res) == 0:
update_backup_record(argv, 2, 1)
else:
update_backup_record(argv, 2, 0)
os.remove(tarname + '.tar.gz')
def get_options(argv):
parser = argparse.ArgumentParser()
parser.add_argument('mode', type=str, choices=['local', 'remote', 'drive'])
parser.add_argument('options', nargs=argparse.REMAINDER)
return parser.parse_args(argv)
def main():
args = get_options(sys.argv[1:])
options = ' '.join(map(str, args.options))
if args.mode == 'local':
local_backup(*args.options)
elif args.mode == 'remote':
remote_backup(*args.options)
else:
drive_backup(*args.options)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def append_log(log, message):
f = open(log, 'a+')
today = datetime.now()
f.write('%s %s \n' % (today.strftime('%Y-%m-%d %H:%M:%S'), message))
f.close()
def get_root_pass():
with open('/root/.my.cnf') as fp:
lines = fp.read().splitlines()
for line in lines:
grep = re.findall('password', line)
if grep:
pwrd = line.split('"')[1]
return pwrd
def get_db_name(argv):
try:
pwrd = get_root_pass()
db = pymysql.connect('localhost', 'root', pwrd, 'secure_vps')
cursor = db.cursor()
cursor.execute(
"select id,db_name from provision where provision_name='%s'" % argv
)
data = cursor.fetchone()
db.close()
return data
except pymysql.err.OperationalError as err:
print(' An error has occurred \n', err)
except pymysql.err.InternalError as err:
print(' An error has occurred \n', err)
def backup_db(argv):
data = get_db_name(argv)
db_name = data[1]
try:
sqldir = '/home/kusanagi/' + argv + '/sql_backup/'
p = pathlib.Path(sqldir)
if not p.exists():
p.mkdir(mode=493, parents=True, exist_ok=True)
shutil.chown(sqldir, 'kusanagi', 'kusanagi')
except BaseException as error:
print(error)
pwrd = get_root_pass()
log = '/home/kusanagi/' + argv + '/log/backup.log'
mess = 'Backed up database ' + db_name
append_log(log, mess)
cmd = ('mysqldump --single-transaction -p' + pwrd + ' --databases ' +
db_name + ' | gzip > ' + sqldir + db_name + '.sql.gz')
execute_outputfile(cmd, log)
<|reserved_special_token_0|>
def compress_provision_dir(argv, chdir=''):
date = datetime.now()
today = date.strftime('%Y-%m-%d')
if chdir:
tarname = chdir + argv + '.' + today
else:
tarname = '/home/kusanagi/backup/' + argv + '.' + today
source_dir = '/home/kusanagi/' + argv
shutil.make_archive(tarname, 'gztar', source_dir)
return tarname
def local_backup(argv):
append_log('/home/kusanagi/' + argv + '/log/backup.log', '--- Local backup'
)
backup_db(argv)
tarname = compress_provision_dir(argv)
tar_file = pathlib.Path(tarname + '.tar.gz')
if tar_file.exists():
update_backup_record(argv, 0, 1)
else:
update_backup_record(argv, 0, 0)
def check_ssh_conn(argv, remote_user, remote_host, remote_port, remote_pass):
cmd = ('sshpass -p "' + remote_pass +
'" ssh -o StrictHostKeyChecking=no -p ' + remote_port + ' -q ' +
remote_user + '@' + remote_host + ' exit;echo $?')
res = execute(cmd)
log = '/home/kusanagi/' + argv + '/log/backup.log'
if int(res) == 0:
pass
else:
append_log(log, 'Remote connection failed. Can not issue remote backup'
)
update_backup_record(argv, 1, 0)
sys.exit(1)
def remote_backup(argv, remote_user, remote_host, remote_port, remote_pass,
remote_dest):
log = '/home/kusanagi/' + argv + '/log/backup.log'
append_log(log, '--- Remote backup')
check_ssh_conn(argv, remote_user, remote_host, remote_port, remote_pass)
backup_db(argv)
tarname = compress_provision_dir(argv, '/home/kusanagi/')
conf_ssh = '/etc/ssh/ssh_config'
with open(conf_ssh) as fp:
lines = fp.read().splitlines()
for line in lines:
grep = re.findall(remote_host, line)
if grep:
break
if not grep:
f = open(conf_ssh, 'a+')
f.write('Host %s\n\tStrictHostKeyChecking no\n' % remote_host)
f.close()
cmd = ('sshpass -p "' + remote_pass +
'" rsync --remove-source-files -azhe \'ssh -p' + remote_port + "' " +
tarname + '.tar.gz ' + remote_user + '@' + remote_host + ':' +
remote_dest + ' 2>> ' + log + ' ; echo $?')
res = execute(cmd)
if int(res) == 0:
update_backup_record(argv, 1, 1)
else:
update_backup_record(argv, 1, 0)
def drive_backup(argv, drive_dir):
log = '/home/kusanagi/' + argv + '/log/backup.log'
append_log(log, '--- Backup to Google Drive')
backup_db(argv)
tarname = compress_provision_dir(argv, '/home/kusanagi/')
cmd = ('rclone copy ' + tarname + '.tar.gz GGD1:' + drive_dir + ' 2>> ' +
log + ' ; echo $?')
res = execute(cmd)
if int(res) == 0:
update_backup_record(argv, 2, 1)
else:
update_backup_record(argv, 2, 0)
os.remove(tarname + '.tar.gz')
def get_options(argv):
parser = argparse.ArgumentParser()
parser.add_argument('mode', type=str, choices=['local', 'remote', 'drive'])
parser.add_argument('options', nargs=argparse.REMAINDER)
return parser.parse_args(argv)
def main():
args = get_options(sys.argv[1:])
options = ' '.join(map(str, args.options))
if args.mode == 'local':
local_backup(*args.options)
elif args.mode == 'remote':
remote_backup(*args.options)
else:
drive_backup(*args.options)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def append_log(log, message):
f = open(log, 'a+')
today = datetime.now()
f.write('%s %s \n' % (today.strftime('%Y-%m-%d %H:%M:%S'), message))
f.close()
def get_root_pass():
with open('/root/.my.cnf') as fp:
lines = fp.read().splitlines()
for line in lines:
grep = re.findall('password', line)
if grep:
pwrd = line.split('"')[1]
return pwrd
def get_db_name(argv):
try:
pwrd = get_root_pass()
db = pymysql.connect('localhost', 'root', pwrd, 'secure_vps')
cursor = db.cursor()
cursor.execute(
"select id,db_name from provision where provision_name='%s'" % argv
)
data = cursor.fetchone()
db.close()
return data
except pymysql.err.OperationalError as err:
print(' An error has occurred \n', err)
except pymysql.err.InternalError as err:
print(' An error has occurred \n', err)
def backup_db(argv):
data = get_db_name(argv)
db_name = data[1]
try:
sqldir = '/home/kusanagi/' + argv + '/sql_backup/'
p = pathlib.Path(sqldir)
if not p.exists():
p.mkdir(mode=493, parents=True, exist_ok=True)
shutil.chown(sqldir, 'kusanagi', 'kusanagi')
except BaseException as error:
print(error)
pwrd = get_root_pass()
log = '/home/kusanagi/' + argv + '/log/backup.log'
mess = 'Backed up database ' + db_name
append_log(log, mess)
cmd = ('mysqldump --single-transaction -p' + pwrd + ' --databases ' +
db_name + ' | gzip > ' + sqldir + db_name + '.sql.gz')
execute_outputfile(cmd, log)
def update_backup_record(argv, backup_type, result):
pwrd = get_root_pass()
data = get_db_name(argv)
provi_id = data[0]
log = '/home/kusanagi/' + argv + '/log/backup.log'
db = pymysql.connect('localhost', 'root', pwrd, 'secure_vps')
cursor = db.cursor()
cursor.execute(
'select id from logs where provision_id=%d and status=0 and backup_type=%d'
% (provi_id, backup_type))
res = cursor.fetchone()
record_id = res[0]
if result:
cursor.execute(
"update logs set status=1,message='Done' where provision_id=%d and id=%d"
% (provi_id, record_id))
else:
cursor.execute(
"update logs set status=-1,message='Failed. See %s' where provision_id=%d and id=%d"
% (log, provi_id, record_id))
db.commit()
db.close()
def compress_provision_dir(argv, chdir=''):
date = datetime.now()
today = date.strftime('%Y-%m-%d')
if chdir:
tarname = chdir + argv + '.' + today
else:
tarname = '/home/kusanagi/backup/' + argv + '.' + today
source_dir = '/home/kusanagi/' + argv
shutil.make_archive(tarname, 'gztar', source_dir)
return tarname
def local_backup(argv):
append_log('/home/kusanagi/' + argv + '/log/backup.log', '--- Local backup'
)
backup_db(argv)
tarname = compress_provision_dir(argv)
tar_file = pathlib.Path(tarname + '.tar.gz')
if tar_file.exists():
update_backup_record(argv, 0, 1)
else:
update_backup_record(argv, 0, 0)
def check_ssh_conn(argv, remote_user, remote_host, remote_port, remote_pass):
cmd = ('sshpass -p "' + remote_pass +
'" ssh -o StrictHostKeyChecking=no -p ' + remote_port + ' -q ' +
remote_user + '@' + remote_host + ' exit;echo $?')
res = execute(cmd)
log = '/home/kusanagi/' + argv + '/log/backup.log'
if int(res) == 0:
pass
else:
append_log(log, 'Remote connection failed. Can not issue remote backup'
)
update_backup_record(argv, 1, 0)
sys.exit(1)
def remote_backup(argv, remote_user, remote_host, remote_port, remote_pass,
remote_dest):
log = '/home/kusanagi/' + argv + '/log/backup.log'
append_log(log, '--- Remote backup')
check_ssh_conn(argv, remote_user, remote_host, remote_port, remote_pass)
backup_db(argv)
tarname = compress_provision_dir(argv, '/home/kusanagi/')
conf_ssh = '/etc/ssh/ssh_config'
with open(conf_ssh) as fp:
lines = fp.read().splitlines()
for line in lines:
grep = re.findall(remote_host, line)
if grep:
break
if not grep:
f = open(conf_ssh, 'a+')
f.write('Host %s\n\tStrictHostKeyChecking no\n' % remote_host)
f.close()
cmd = ('sshpass -p "' + remote_pass +
'" rsync --remove-source-files -azhe \'ssh -p' + remote_port + "' " +
tarname + '.tar.gz ' + remote_user + '@' + remote_host + ':' +
remote_dest + ' 2>> ' + log + ' ; echo $?')
res = execute(cmd)
if int(res) == 0:
update_backup_record(argv, 1, 1)
else:
update_backup_record(argv, 1, 0)
def drive_backup(argv, drive_dir):
log = '/home/kusanagi/' + argv + '/log/backup.log'
append_log(log, '--- Backup to Google Drive')
backup_db(argv)
tarname = compress_provision_dir(argv, '/home/kusanagi/')
cmd = ('rclone copy ' + tarname + '.tar.gz GGD1:' + drive_dir + ' 2>> ' +
log + ' ; echo $?')
res = execute(cmd)
if int(res) == 0:
update_backup_record(argv, 2, 1)
else:
update_backup_record(argv, 2, 0)
os.remove(tarname + '.tar.gz')
def get_options(argv):
parser = argparse.ArgumentParser()
parser.add_argument('mode', type=str, choices=['local', 'remote', 'drive'])
parser.add_argument('options', nargs=argparse.REMAINDER)
return parser.parse_args(argv)
def main():
args = get_options(sys.argv[1:])
options = ' '.join(map(str, args.options))
if args.mode == 'local':
local_backup(*args.options)
elif args.mode == 'remote':
remote_backup(*args.options)
else:
drive_backup(*args.options)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import argparse
import os
import sys, shutil
from shutil import make_archive
import pathlib
from phpManager import execute, execute_outputfile
from datetime import date, datetime
import re
import pymysql
import tarfile
def append_log(log, message):
f = open(log, 'a+')
today = datetime.now()
f.write('%s %s \n' % (today.strftime('%Y-%m-%d %H:%M:%S'), message))
f.close()
def get_root_pass():
with open('/root/.my.cnf') as fp:
lines = fp.read().splitlines()
for line in lines:
grep = re.findall('password', line)
if grep:
pwrd = line.split('"')[1]
return pwrd
def get_db_name(argv):
try:
pwrd = get_root_pass()
db = pymysql.connect('localhost', 'root', pwrd, 'secure_vps')
cursor = db.cursor()
cursor.execute(
"select id,db_name from provision where provision_name='%s'" % argv
)
data = cursor.fetchone()
db.close()
return data
except pymysql.err.OperationalError as err:
print(' An error has occurred \n', err)
except pymysql.err.InternalError as err:
print(' An error has occurred \n', err)
def backup_db(argv):
data = get_db_name(argv)
db_name = data[1]
try:
sqldir = '/home/kusanagi/' + argv + '/sql_backup/'
p = pathlib.Path(sqldir)
if not p.exists():
p.mkdir(mode=493, parents=True, exist_ok=True)
shutil.chown(sqldir, 'kusanagi', 'kusanagi')
except BaseException as error:
print(error)
pwrd = get_root_pass()
log = '/home/kusanagi/' + argv + '/log/backup.log'
mess = 'Backed up database ' + db_name
append_log(log, mess)
cmd = ('mysqldump --single-transaction -p' + pwrd + ' --databases ' +
db_name + ' | gzip > ' + sqldir + db_name + '.sql.gz')
execute_outputfile(cmd, log)
def update_backup_record(argv, backup_type, result):
pwrd = get_root_pass()
data = get_db_name(argv)
provi_id = data[0]
log = '/home/kusanagi/' + argv + '/log/backup.log'
db = pymysql.connect('localhost', 'root', pwrd, 'secure_vps')
cursor = db.cursor()
cursor.execute(
'select id from logs where provision_id=%d and status=0 and backup_type=%d'
% (provi_id, backup_type))
res = cursor.fetchone()
record_id = res[0]
if result:
cursor.execute(
"update logs set status=1,message='Done' where provision_id=%d and id=%d"
% (provi_id, record_id))
else:
cursor.execute(
"update logs set status=-1,message='Failed. See %s' where provision_id=%d and id=%d"
% (log, provi_id, record_id))
db.commit()
db.close()
def compress_provision_dir(argv, chdir=''):
date = datetime.now()
today = date.strftime('%Y-%m-%d')
if chdir:
tarname = chdir + argv + '.' + today
else:
tarname = '/home/kusanagi/backup/' + argv + '.' + today
source_dir = '/home/kusanagi/' + argv
shutil.make_archive(tarname, 'gztar', source_dir)
return tarname
def local_backup(argv):
append_log('/home/kusanagi/' + argv + '/log/backup.log', '--- Local backup'
)
backup_db(argv)
tarname = compress_provision_dir(argv)
tar_file = pathlib.Path(tarname + '.tar.gz')
if tar_file.exists():
update_backup_record(argv, 0, 1)
else:
update_backup_record(argv, 0, 0)
def check_ssh_conn(argv, remote_user, remote_host, remote_port, remote_pass):
cmd = ('sshpass -p "' + remote_pass +
'" ssh -o StrictHostKeyChecking=no -p ' + remote_port + ' -q ' +
remote_user + '@' + remote_host + ' exit;echo $?')
res = execute(cmd)
log = '/home/kusanagi/' + argv + '/log/backup.log'
if int(res) == 0:
pass
else:
append_log(log, 'Remote connection failed. Can not issue remote backup'
)
update_backup_record(argv, 1, 0)
sys.exit(1)
def remote_backup(argv, remote_user, remote_host, remote_port, remote_pass,
remote_dest):
log = '/home/kusanagi/' + argv + '/log/backup.log'
append_log(log, '--- Remote backup')
check_ssh_conn(argv, remote_user, remote_host, remote_port, remote_pass)
backup_db(argv)
tarname = compress_provision_dir(argv, '/home/kusanagi/')
conf_ssh = '/etc/ssh/ssh_config'
with open(conf_ssh) as fp:
lines = fp.read().splitlines()
for line in lines:
grep = re.findall(remote_host, line)
if grep:
break
if not grep:
f = open(conf_ssh, 'a+')
f.write('Host %s\n\tStrictHostKeyChecking no\n' % remote_host)
f.close()
cmd = ('sshpass -p "' + remote_pass +
'" rsync --remove-source-files -azhe \'ssh -p' + remote_port + "' " +
tarname + '.tar.gz ' + remote_user + '@' + remote_host + ':' +
remote_dest + ' 2>> ' + log + ' ; echo $?')
res = execute(cmd)
if int(res) == 0:
update_backup_record(argv, 1, 1)
else:
update_backup_record(argv, 1, 0)
def drive_backup(argv, drive_dir):
log = '/home/kusanagi/' + argv + '/log/backup.log'
append_log(log, '--- Backup to Google Drive')
backup_db(argv)
tarname = compress_provision_dir(argv, '/home/kusanagi/')
cmd = ('rclone copy ' + tarname + '.tar.gz GGD1:' + drive_dir + ' 2>> ' +
log + ' ; echo $?')
res = execute(cmd)
if int(res) == 0:
update_backup_record(argv, 2, 1)
else:
update_backup_record(argv, 2, 0)
os.remove(tarname + '.tar.gz')
def get_options(argv):
parser = argparse.ArgumentParser()
parser.add_argument('mode', type=str, choices=['local', 'remote', 'drive'])
parser.add_argument('options', nargs=argparse.REMAINDER)
return parser.parse_args(argv)
def main():
args = get_options(sys.argv[1:])
options = ' '.join(map(str, args.options))
if args.mode == 'local':
local_backup(*args.options)
elif args.mode == 'remote':
remote_backup(*args.options)
else:
drive_backup(*args.options)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python3
import argparse
import os
import sys,shutil
from shutil import make_archive
import pathlib
from phpManager import execute,execute_outputfile
from datetime import date,datetime
import re
import pymysql
import tarfile
def append_log(log,message):
f = open(log, "a+")
today = datetime.now()
f.write("%s %s \n" % (today.strftime("%Y-%m-%d %H:%M:%S"), message))
f.close()
def get_root_pass():
with open("/root/.my.cnf") as fp: lines = fp.read().splitlines()
for line in lines:
grep = re.findall(r'password', line)
if grep:
pwrd = line.split('"')[1]
return pwrd
def get_db_name(argv):
try:
pwrd = get_root_pass()
db = pymysql.connect("localhost","root",pwrd,"secure_vps")
cursor = db.cursor()
cursor.execute("select id,db_name from provision where provision_name='%s'" % argv)
data = cursor.fetchone()
db.close()
return data
except pymysql.err.OperationalError as err:
print (' An error has occurred \n', err)
except pymysql.err.InternalError as err:
print (' An error has occurred \n', err)
def backup_db(argv):
data = get_db_name(argv)
db_name = data[1]
try:
sqldir = '/home/kusanagi/'+argv+'/sql_backup/'
p = pathlib.Path(sqldir)
if not p.exists():
p.mkdir(mode=0o755, parents=True, exist_ok=True)
shutil.chown(sqldir,'kusanagi','kusanagi')
except BaseException as error:
print(error)
pwrd = get_root_pass()
log = '/home/kusanagi/'+argv+'/log/backup.log'
mess = 'Backed up database '+db_name
append_log(log,mess)
cmd = 'mysqldump --single-transaction -p'+pwrd+' --databases '+db_name+' | gzip > '+sqldir+db_name+'.sql.gz'
execute_outputfile(cmd,log)
def update_backup_record(argv,backup_type,result):
pwrd = get_root_pass()
data = get_db_name(argv)
provi_id = data[0]
log = '/home/kusanagi/'+argv+'/log/backup.log'
db = pymysql.connect("localhost","root",pwrd,"secure_vps")
cursor = db.cursor()
cursor.execute("select id from logs where provision_id=%d and status=0 and backup_type=%d" % (provi_id,backup_type))
res = cursor.fetchone()
record_id = res[0]
if result:
cursor.execute("update logs set status=1,message='Done' where provision_id=%d and id=%d" % (provi_id,record_id))
else:
cursor.execute("update logs set status=-1,message='Failed. See %s' where provision_id=%d and id=%d" % (log,provi_id,record_id))
db.commit()
db.close()
def compress_provision_dir(argv,chdir=''):
date = datetime.now()
today = date.strftime("%Y-%m-%d")
if chdir:
tarname = chdir+argv+'.'+today
else:
tarname = '/home/kusanagi/backup/'+argv+'.'+today
source_dir = '/home/kusanagi/'+argv
shutil.make_archive(tarname,"gztar",source_dir)
return tarname
def local_backup(argv):
append_log('/home/kusanagi/'+argv+'/log/backup.log', '--- Local backup')
backup_db(argv)
tarname = compress_provision_dir(argv)
tar_file=pathlib.Path(tarname+'.tar.gz')
if tar_file.exists():
update_backup_record(argv,0,1)
else:
update_backup_record(argv,0,0)
def check_ssh_conn(argv,remote_user,remote_host,remote_port,remote_pass):
cmd = 'sshpass -p "'+remote_pass+'" ssh -o StrictHostKeyChecking=no -p '+remote_port+' -q '+remote_user+'@'+remote_host+' exit;echo $?'
res = execute(cmd)
log = '/home/kusanagi/'+argv+'/log/backup.log'
if int(res) == 0:
#print('Connect OK \n')
pass
else:
append_log(log, 'Remote connection failed. Can not issue remote backup')
update_backup_record(argv,1,0)
sys.exit(1)
def remote_backup(argv, remote_user, remote_host, remote_port, remote_pass, remote_dest):
log = '/home/kusanagi/'+argv+'/log/backup.log'
append_log(log, '--- Remote backup')
check_ssh_conn(argv, remote_user, remote_host, remote_port, remote_pass)
backup_db(argv)
tarname = compress_provision_dir(argv,'/home/kusanagi/')
conf_ssh = '/etc/ssh/ssh_config'
with open(conf_ssh) as fp: lines = fp.read().splitlines()
for line in lines:
grep = re.findall(remote_host, line)
if grep:
break
if not grep:
#configure stricthostkey ssh
f = open(conf_ssh,"a+")
f.write('Host %s\n\tStrictHostKeyChecking no\n' % remote_host)
f.close()
cmd = 'sshpass -p "'+remote_pass+'" rsync --remove-source-files -azhe \'ssh -p'+remote_port+'\' '+tarname+'.tar.gz '+remote_user+'@'+remote_host+':'+remote_dest+' 2>> '+log+' ; echo $?'
res = execute(cmd)
if int(res) == 0:
update_backup_record(argv,1,1)
else:
update_backup_record(argv,1,0)
def drive_backup(argv,drive_dir):
log = '/home/kusanagi/'+argv+'/log/backup.log'
append_log(log,'--- Backup to Google Drive')
backup_db(argv)
tarname = compress_provision_dir(argv,'/home/kusanagi/')
cmd = 'rclone copy '+tarname+'.tar.gz GGD1:'+drive_dir+ ' 2>> '+log+' ; echo $?'
res = execute(cmd)
if int(res) == 0:
update_backup_record(argv,2,1)
else:
update_backup_record(argv,2,0)
os.remove(tarname+'.tar.gz')
def get_options(argv):
parser = argparse.ArgumentParser()
parser.add_argument('mode', type=str, choices=['local', 'remote', 'drive'])
parser.add_argument('options', nargs=argparse.REMAINDER)
return parser.parse_args(argv)
def main():
args=get_options(sys.argv[1:])
#pwrd = get_root_pass()
options = ' '.join(map(str, args.options))
if args.mode == 'local':
local_backup(*args.options)
elif args.mode == 'remote':
remote_backup(*args.options)
else:
drive_backup(*args.options)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "e09af436f2fb37d16427aa0b1416d6f2d59ad6c4",
"index": 214,
"step-1": "<mask token>\n\n\ndef append_log(log, message):\n f = open(log, 'a+')\n today = datetime.now()\n f.write('%s %s \\n' % (today.strftime('%Y-%m-%d %H:%M:%S'), message))\n f.close()\n\n\ndef get_root_pass():\n with open('/root/.my.cnf') as fp:\n lines = fp.read().splitlines()\n for line in lines:\n grep = re.findall('password', line)\n if grep:\n pwrd = line.split('\"')[1]\n return pwrd\n\n\n<mask token>\n\n\ndef backup_db(argv):\n data = get_db_name(argv)\n db_name = data[1]\n try:\n sqldir = '/home/kusanagi/' + argv + '/sql_backup/'\n p = pathlib.Path(sqldir)\n if not p.exists():\n p.mkdir(mode=493, parents=True, exist_ok=True)\n shutil.chown(sqldir, 'kusanagi', 'kusanagi')\n except BaseException as error:\n print(error)\n pwrd = get_root_pass()\n log = '/home/kusanagi/' + argv + '/log/backup.log'\n mess = 'Backed up database ' + db_name\n append_log(log, mess)\n cmd = ('mysqldump --single-transaction -p' + pwrd + ' --databases ' +\n db_name + ' | gzip > ' + sqldir + db_name + '.sql.gz')\n execute_outputfile(cmd, log)\n\n\n<mask token>\n\n\ndef compress_provision_dir(argv, chdir=''):\n date = datetime.now()\n today = date.strftime('%Y-%m-%d')\n if chdir:\n tarname = chdir + argv + '.' + today\n else:\n tarname = '/home/kusanagi/backup/' + argv + '.' + today\n source_dir = '/home/kusanagi/' + argv\n shutil.make_archive(tarname, 'gztar', source_dir)\n return tarname\n\n\ndef local_backup(argv):\n append_log('/home/kusanagi/' + argv + '/log/backup.log', '--- Local backup'\n )\n backup_db(argv)\n tarname = compress_provision_dir(argv)\n tar_file = pathlib.Path(tarname + '.tar.gz')\n if tar_file.exists():\n update_backup_record(argv, 0, 1)\n else:\n update_backup_record(argv, 0, 0)\n\n\ndef check_ssh_conn(argv, remote_user, remote_host, remote_port, remote_pass):\n cmd = ('sshpass -p \"' + remote_pass +\n '\" ssh -o StrictHostKeyChecking=no -p ' + remote_port + ' -q ' +\n remote_user + '@' + remote_host + ' exit;echo $?')\n res = execute(cmd)\n log = '/home/kusanagi/' + argv + '/log/backup.log'\n if int(res) == 0:\n pass\n else:\n append_log(log, 'Remote connection failed. Can not issue remote backup'\n )\n update_backup_record(argv, 1, 0)\n sys.exit(1)\n\n\ndef remote_backup(argv, remote_user, remote_host, remote_port, remote_pass,\n remote_dest):\n log = '/home/kusanagi/' + argv + '/log/backup.log'\n append_log(log, '--- Remote backup')\n check_ssh_conn(argv, remote_user, remote_host, remote_port, remote_pass)\n backup_db(argv)\n tarname = compress_provision_dir(argv, '/home/kusanagi/')\n conf_ssh = '/etc/ssh/ssh_config'\n with open(conf_ssh) as fp:\n lines = fp.read().splitlines()\n for line in lines:\n grep = re.findall(remote_host, line)\n if grep:\n break\n if not grep:\n f = open(conf_ssh, 'a+')\n f.write('Host %s\\n\\tStrictHostKeyChecking no\\n' % remote_host)\n f.close()\n cmd = ('sshpass -p \"' + remote_pass +\n '\" rsync --remove-source-files -azhe \\'ssh -p' + remote_port + \"' \" +\n tarname + '.tar.gz ' + remote_user + '@' + remote_host + ':' +\n remote_dest + ' 2>> ' + log + ' ; echo $?')\n res = execute(cmd)\n if int(res) == 0:\n update_backup_record(argv, 1, 1)\n else:\n update_backup_record(argv, 1, 0)\n\n\ndef drive_backup(argv, drive_dir):\n log = '/home/kusanagi/' + argv + '/log/backup.log'\n append_log(log, '--- Backup to Google Drive')\n backup_db(argv)\n tarname = compress_provision_dir(argv, '/home/kusanagi/')\n cmd = ('rclone copy ' + tarname + '.tar.gz GGD1:' + drive_dir + ' 2>> ' +\n log + ' ; echo $?')\n res = execute(cmd)\n if int(res) == 0:\n update_backup_record(argv, 2, 1)\n else:\n update_backup_record(argv, 2, 0)\n os.remove(tarname + '.tar.gz')\n\n\ndef get_options(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('mode', type=str, choices=['local', 'remote', 'drive'])\n parser.add_argument('options', nargs=argparse.REMAINDER)\n return parser.parse_args(argv)\n\n\ndef main():\n args = get_options(sys.argv[1:])\n options = ' '.join(map(str, args.options))\n if args.mode == 'local':\n local_backup(*args.options)\n elif args.mode == 'remote':\n remote_backup(*args.options)\n else:\n drive_backup(*args.options)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef append_log(log, message):\n f = open(log, 'a+')\n today = datetime.now()\n f.write('%s %s \\n' % (today.strftime('%Y-%m-%d %H:%M:%S'), message))\n f.close()\n\n\ndef get_root_pass():\n with open('/root/.my.cnf') as fp:\n lines = fp.read().splitlines()\n for line in lines:\n grep = re.findall('password', line)\n if grep:\n pwrd = line.split('\"')[1]\n return pwrd\n\n\ndef get_db_name(argv):\n try:\n pwrd = get_root_pass()\n db = pymysql.connect('localhost', 'root', pwrd, 'secure_vps')\n cursor = db.cursor()\n cursor.execute(\n \"select id,db_name from provision where provision_name='%s'\" % argv\n )\n data = cursor.fetchone()\n db.close()\n return data\n except pymysql.err.OperationalError as err:\n print(' An error has occurred \\n', err)\n except pymysql.err.InternalError as err:\n print(' An error has occurred \\n', err)\n\n\ndef backup_db(argv):\n data = get_db_name(argv)\n db_name = data[1]\n try:\n sqldir = '/home/kusanagi/' + argv + '/sql_backup/'\n p = pathlib.Path(sqldir)\n if not p.exists():\n p.mkdir(mode=493, parents=True, exist_ok=True)\n shutil.chown(sqldir, 'kusanagi', 'kusanagi')\n except BaseException as error:\n print(error)\n pwrd = get_root_pass()\n log = '/home/kusanagi/' + argv + '/log/backup.log'\n mess = 'Backed up database ' + db_name\n append_log(log, mess)\n cmd = ('mysqldump --single-transaction -p' + pwrd + ' --databases ' +\n db_name + ' | gzip > ' + sqldir + db_name + '.sql.gz')\n execute_outputfile(cmd, log)\n\n\n<mask token>\n\n\ndef compress_provision_dir(argv, chdir=''):\n date = datetime.now()\n today = date.strftime('%Y-%m-%d')\n if chdir:\n tarname = chdir + argv + '.' + today\n else:\n tarname = '/home/kusanagi/backup/' + argv + '.' + today\n source_dir = '/home/kusanagi/' + argv\n shutil.make_archive(tarname, 'gztar', source_dir)\n return tarname\n\n\ndef local_backup(argv):\n append_log('/home/kusanagi/' + argv + '/log/backup.log', '--- Local backup'\n )\n backup_db(argv)\n tarname = compress_provision_dir(argv)\n tar_file = pathlib.Path(tarname + '.tar.gz')\n if tar_file.exists():\n update_backup_record(argv, 0, 1)\n else:\n update_backup_record(argv, 0, 0)\n\n\ndef check_ssh_conn(argv, remote_user, remote_host, remote_port, remote_pass):\n cmd = ('sshpass -p \"' + remote_pass +\n '\" ssh -o StrictHostKeyChecking=no -p ' + remote_port + ' -q ' +\n remote_user + '@' + remote_host + ' exit;echo $?')\n res = execute(cmd)\n log = '/home/kusanagi/' + argv + '/log/backup.log'\n if int(res) == 0:\n pass\n else:\n append_log(log, 'Remote connection failed. Can not issue remote backup'\n )\n update_backup_record(argv, 1, 0)\n sys.exit(1)\n\n\ndef remote_backup(argv, remote_user, remote_host, remote_port, remote_pass,\n remote_dest):\n log = '/home/kusanagi/' + argv + '/log/backup.log'\n append_log(log, '--- Remote backup')\n check_ssh_conn(argv, remote_user, remote_host, remote_port, remote_pass)\n backup_db(argv)\n tarname = compress_provision_dir(argv, '/home/kusanagi/')\n conf_ssh = '/etc/ssh/ssh_config'\n with open(conf_ssh) as fp:\n lines = fp.read().splitlines()\n for line in lines:\n grep = re.findall(remote_host, line)\n if grep:\n break\n if not grep:\n f = open(conf_ssh, 'a+')\n f.write('Host %s\\n\\tStrictHostKeyChecking no\\n' % remote_host)\n f.close()\n cmd = ('sshpass -p \"' + remote_pass +\n '\" rsync --remove-source-files -azhe \\'ssh -p' + remote_port + \"' \" +\n tarname + '.tar.gz ' + remote_user + '@' + remote_host + ':' +\n remote_dest + ' 2>> ' + log + ' ; echo $?')\n res = execute(cmd)\n if int(res) == 0:\n update_backup_record(argv, 1, 1)\n else:\n update_backup_record(argv, 1, 0)\n\n\ndef drive_backup(argv, drive_dir):\n log = '/home/kusanagi/' + argv + '/log/backup.log'\n append_log(log, '--- Backup to Google Drive')\n backup_db(argv)\n tarname = compress_provision_dir(argv, '/home/kusanagi/')\n cmd = ('rclone copy ' + tarname + '.tar.gz GGD1:' + drive_dir + ' 2>> ' +\n log + ' ; echo $?')\n res = execute(cmd)\n if int(res) == 0:\n update_backup_record(argv, 2, 1)\n else:\n update_backup_record(argv, 2, 0)\n os.remove(tarname + '.tar.gz')\n\n\ndef get_options(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('mode', type=str, choices=['local', 'remote', 'drive'])\n parser.add_argument('options', nargs=argparse.REMAINDER)\n return parser.parse_args(argv)\n\n\ndef main():\n args = get_options(sys.argv[1:])\n options = ' '.join(map(str, args.options))\n if args.mode == 'local':\n local_backup(*args.options)\n elif args.mode == 'remote':\n remote_backup(*args.options)\n else:\n drive_backup(*args.options)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef append_log(log, message):\n f = open(log, 'a+')\n today = datetime.now()\n f.write('%s %s \\n' % (today.strftime('%Y-%m-%d %H:%M:%S'), message))\n f.close()\n\n\ndef get_root_pass():\n with open('/root/.my.cnf') as fp:\n lines = fp.read().splitlines()\n for line in lines:\n grep = re.findall('password', line)\n if grep:\n pwrd = line.split('\"')[1]\n return pwrd\n\n\ndef get_db_name(argv):\n try:\n pwrd = get_root_pass()\n db = pymysql.connect('localhost', 'root', pwrd, 'secure_vps')\n cursor = db.cursor()\n cursor.execute(\n \"select id,db_name from provision where provision_name='%s'\" % argv\n )\n data = cursor.fetchone()\n db.close()\n return data\n except pymysql.err.OperationalError as err:\n print(' An error has occurred \\n', err)\n except pymysql.err.InternalError as err:\n print(' An error has occurred \\n', err)\n\n\ndef backup_db(argv):\n data = get_db_name(argv)\n db_name = data[1]\n try:\n sqldir = '/home/kusanagi/' + argv + '/sql_backup/'\n p = pathlib.Path(sqldir)\n if not p.exists():\n p.mkdir(mode=493, parents=True, exist_ok=True)\n shutil.chown(sqldir, 'kusanagi', 'kusanagi')\n except BaseException as error:\n print(error)\n pwrd = get_root_pass()\n log = '/home/kusanagi/' + argv + '/log/backup.log'\n mess = 'Backed up database ' + db_name\n append_log(log, mess)\n cmd = ('mysqldump --single-transaction -p' + pwrd + ' --databases ' +\n db_name + ' | gzip > ' + sqldir + db_name + '.sql.gz')\n execute_outputfile(cmd, log)\n\n\ndef update_backup_record(argv, backup_type, result):\n pwrd = get_root_pass()\n data = get_db_name(argv)\n provi_id = data[0]\n log = '/home/kusanagi/' + argv + '/log/backup.log'\n db = pymysql.connect('localhost', 'root', pwrd, 'secure_vps')\n cursor = db.cursor()\n cursor.execute(\n 'select id from logs where provision_id=%d and status=0 and backup_type=%d'\n % (provi_id, backup_type))\n res = cursor.fetchone()\n record_id = res[0]\n if result:\n cursor.execute(\n \"update logs set status=1,message='Done' where provision_id=%d and id=%d\"\n % (provi_id, record_id))\n else:\n cursor.execute(\n \"update logs set status=-1,message='Failed. See %s' where provision_id=%d and id=%d\"\n % (log, provi_id, record_id))\n db.commit()\n db.close()\n\n\ndef compress_provision_dir(argv, chdir=''):\n date = datetime.now()\n today = date.strftime('%Y-%m-%d')\n if chdir:\n tarname = chdir + argv + '.' + today\n else:\n tarname = '/home/kusanagi/backup/' + argv + '.' + today\n source_dir = '/home/kusanagi/' + argv\n shutil.make_archive(tarname, 'gztar', source_dir)\n return tarname\n\n\ndef local_backup(argv):\n append_log('/home/kusanagi/' + argv + '/log/backup.log', '--- Local backup'\n )\n backup_db(argv)\n tarname = compress_provision_dir(argv)\n tar_file = pathlib.Path(tarname + '.tar.gz')\n if tar_file.exists():\n update_backup_record(argv, 0, 1)\n else:\n update_backup_record(argv, 0, 0)\n\n\ndef check_ssh_conn(argv, remote_user, remote_host, remote_port, remote_pass):\n cmd = ('sshpass -p \"' + remote_pass +\n '\" ssh -o StrictHostKeyChecking=no -p ' + remote_port + ' -q ' +\n remote_user + '@' + remote_host + ' exit;echo $?')\n res = execute(cmd)\n log = '/home/kusanagi/' + argv + '/log/backup.log'\n if int(res) == 0:\n pass\n else:\n append_log(log, 'Remote connection failed. Can not issue remote backup'\n )\n update_backup_record(argv, 1, 0)\n sys.exit(1)\n\n\ndef remote_backup(argv, remote_user, remote_host, remote_port, remote_pass,\n remote_dest):\n log = '/home/kusanagi/' + argv + '/log/backup.log'\n append_log(log, '--- Remote backup')\n check_ssh_conn(argv, remote_user, remote_host, remote_port, remote_pass)\n backup_db(argv)\n tarname = compress_provision_dir(argv, '/home/kusanagi/')\n conf_ssh = '/etc/ssh/ssh_config'\n with open(conf_ssh) as fp:\n lines = fp.read().splitlines()\n for line in lines:\n grep = re.findall(remote_host, line)\n if grep:\n break\n if not grep:\n f = open(conf_ssh, 'a+')\n f.write('Host %s\\n\\tStrictHostKeyChecking no\\n' % remote_host)\n f.close()\n cmd = ('sshpass -p \"' + remote_pass +\n '\" rsync --remove-source-files -azhe \\'ssh -p' + remote_port + \"' \" +\n tarname + '.tar.gz ' + remote_user + '@' + remote_host + ':' +\n remote_dest + ' 2>> ' + log + ' ; echo $?')\n res = execute(cmd)\n if int(res) == 0:\n update_backup_record(argv, 1, 1)\n else:\n update_backup_record(argv, 1, 0)\n\n\ndef drive_backup(argv, drive_dir):\n log = '/home/kusanagi/' + argv + '/log/backup.log'\n append_log(log, '--- Backup to Google Drive')\n backup_db(argv)\n tarname = compress_provision_dir(argv, '/home/kusanagi/')\n cmd = ('rclone copy ' + tarname + '.tar.gz GGD1:' + drive_dir + ' 2>> ' +\n log + ' ; echo $?')\n res = execute(cmd)\n if int(res) == 0:\n update_backup_record(argv, 2, 1)\n else:\n update_backup_record(argv, 2, 0)\n os.remove(tarname + '.tar.gz')\n\n\ndef get_options(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('mode', type=str, choices=['local', 'remote', 'drive'])\n parser.add_argument('options', nargs=argparse.REMAINDER)\n return parser.parse_args(argv)\n\n\ndef main():\n args = get_options(sys.argv[1:])\n options = ' '.join(map(str, args.options))\n if args.mode == 'local':\n local_backup(*args.options)\n elif args.mode == 'remote':\n remote_backup(*args.options)\n else:\n drive_backup(*args.options)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import argparse\nimport os\nimport sys, shutil\nfrom shutil import make_archive\nimport pathlib\nfrom phpManager import execute, execute_outputfile\nfrom datetime import date, datetime\nimport re\nimport pymysql\nimport tarfile\n\n\ndef append_log(log, message):\n f = open(log, 'a+')\n today = datetime.now()\n f.write('%s %s \\n' % (today.strftime('%Y-%m-%d %H:%M:%S'), message))\n f.close()\n\n\ndef get_root_pass():\n with open('/root/.my.cnf') as fp:\n lines = fp.read().splitlines()\n for line in lines:\n grep = re.findall('password', line)\n if grep:\n pwrd = line.split('\"')[1]\n return pwrd\n\n\ndef get_db_name(argv):\n try:\n pwrd = get_root_pass()\n db = pymysql.connect('localhost', 'root', pwrd, 'secure_vps')\n cursor = db.cursor()\n cursor.execute(\n \"select id,db_name from provision where provision_name='%s'\" % argv\n )\n data = cursor.fetchone()\n db.close()\n return data\n except pymysql.err.OperationalError as err:\n print(' An error has occurred \\n', err)\n except pymysql.err.InternalError as err:\n print(' An error has occurred \\n', err)\n\n\ndef backup_db(argv):\n data = get_db_name(argv)\n db_name = data[1]\n try:\n sqldir = '/home/kusanagi/' + argv + '/sql_backup/'\n p = pathlib.Path(sqldir)\n if not p.exists():\n p.mkdir(mode=493, parents=True, exist_ok=True)\n shutil.chown(sqldir, 'kusanagi', 'kusanagi')\n except BaseException as error:\n print(error)\n pwrd = get_root_pass()\n log = '/home/kusanagi/' + argv + '/log/backup.log'\n mess = 'Backed up database ' + db_name\n append_log(log, mess)\n cmd = ('mysqldump --single-transaction -p' + pwrd + ' --databases ' +\n db_name + ' | gzip > ' + sqldir + db_name + '.sql.gz')\n execute_outputfile(cmd, log)\n\n\ndef update_backup_record(argv, backup_type, result):\n pwrd = get_root_pass()\n data = get_db_name(argv)\n provi_id = data[0]\n log = '/home/kusanagi/' + argv + '/log/backup.log'\n db = pymysql.connect('localhost', 'root', pwrd, 'secure_vps')\n cursor = db.cursor()\n cursor.execute(\n 'select id from logs where provision_id=%d and status=0 and backup_type=%d'\n % (provi_id, backup_type))\n res = cursor.fetchone()\n record_id = res[0]\n if result:\n cursor.execute(\n \"update logs set status=1,message='Done' where provision_id=%d and id=%d\"\n % (provi_id, record_id))\n else:\n cursor.execute(\n \"update logs set status=-1,message='Failed. See %s' where provision_id=%d and id=%d\"\n % (log, provi_id, record_id))\n db.commit()\n db.close()\n\n\ndef compress_provision_dir(argv, chdir=''):\n date = datetime.now()\n today = date.strftime('%Y-%m-%d')\n if chdir:\n tarname = chdir + argv + '.' + today\n else:\n tarname = '/home/kusanagi/backup/' + argv + '.' + today\n source_dir = '/home/kusanagi/' + argv\n shutil.make_archive(tarname, 'gztar', source_dir)\n return tarname\n\n\ndef local_backup(argv):\n append_log('/home/kusanagi/' + argv + '/log/backup.log', '--- Local backup'\n )\n backup_db(argv)\n tarname = compress_provision_dir(argv)\n tar_file = pathlib.Path(tarname + '.tar.gz')\n if tar_file.exists():\n update_backup_record(argv, 0, 1)\n else:\n update_backup_record(argv, 0, 0)\n\n\ndef check_ssh_conn(argv, remote_user, remote_host, remote_port, remote_pass):\n cmd = ('sshpass -p \"' + remote_pass +\n '\" ssh -o StrictHostKeyChecking=no -p ' + remote_port + ' -q ' +\n remote_user + '@' + remote_host + ' exit;echo $?')\n res = execute(cmd)\n log = '/home/kusanagi/' + argv + '/log/backup.log'\n if int(res) == 0:\n pass\n else:\n append_log(log, 'Remote connection failed. Can not issue remote backup'\n )\n update_backup_record(argv, 1, 0)\n sys.exit(1)\n\n\ndef remote_backup(argv, remote_user, remote_host, remote_port, remote_pass,\n remote_dest):\n log = '/home/kusanagi/' + argv + '/log/backup.log'\n append_log(log, '--- Remote backup')\n check_ssh_conn(argv, remote_user, remote_host, remote_port, remote_pass)\n backup_db(argv)\n tarname = compress_provision_dir(argv, '/home/kusanagi/')\n conf_ssh = '/etc/ssh/ssh_config'\n with open(conf_ssh) as fp:\n lines = fp.read().splitlines()\n for line in lines:\n grep = re.findall(remote_host, line)\n if grep:\n break\n if not grep:\n f = open(conf_ssh, 'a+')\n f.write('Host %s\\n\\tStrictHostKeyChecking no\\n' % remote_host)\n f.close()\n cmd = ('sshpass -p \"' + remote_pass +\n '\" rsync --remove-source-files -azhe \\'ssh -p' + remote_port + \"' \" +\n tarname + '.tar.gz ' + remote_user + '@' + remote_host + ':' +\n remote_dest + ' 2>> ' + log + ' ; echo $?')\n res = execute(cmd)\n if int(res) == 0:\n update_backup_record(argv, 1, 1)\n else:\n update_backup_record(argv, 1, 0)\n\n\ndef drive_backup(argv, drive_dir):\n log = '/home/kusanagi/' + argv + '/log/backup.log'\n append_log(log, '--- Backup to Google Drive')\n backup_db(argv)\n tarname = compress_provision_dir(argv, '/home/kusanagi/')\n cmd = ('rclone copy ' + tarname + '.tar.gz GGD1:' + drive_dir + ' 2>> ' +\n log + ' ; echo $?')\n res = execute(cmd)\n if int(res) == 0:\n update_backup_record(argv, 2, 1)\n else:\n update_backup_record(argv, 2, 0)\n os.remove(tarname + '.tar.gz')\n\n\ndef get_options(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('mode', type=str, choices=['local', 'remote', 'drive'])\n parser.add_argument('options', nargs=argparse.REMAINDER)\n return parser.parse_args(argv)\n\n\ndef main():\n args = get_options(sys.argv[1:])\n options = ' '.join(map(str, args.options))\n if args.mode == 'local':\n local_backup(*args.options)\n elif args.mode == 'remote':\n remote_backup(*args.options)\n else:\n drive_backup(*args.options)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python3\nimport argparse\nimport os\nimport sys,shutil\nfrom shutil import make_archive\nimport pathlib\nfrom phpManager import execute,execute_outputfile\nfrom datetime import date,datetime\nimport re\nimport pymysql\nimport tarfile\n\n\ndef append_log(log,message):\n f = open(log, \"a+\")\n today = datetime.now()\n f.write(\"%s %s \\n\" % (today.strftime(\"%Y-%m-%d %H:%M:%S\"), message))\n f.close()\n\ndef get_root_pass():\n with open(\"/root/.my.cnf\") as fp: lines = fp.read().splitlines()\n for line in lines:\n grep = re.findall(r'password', line)\n if grep:\n pwrd = line.split('\"')[1]\n return pwrd\n\ndef get_db_name(argv):\n try:\n pwrd = get_root_pass()\n db = pymysql.connect(\"localhost\",\"root\",pwrd,\"secure_vps\")\n cursor = db.cursor()\n cursor.execute(\"select id,db_name from provision where provision_name='%s'\" % argv)\n data = cursor.fetchone()\n db.close()\n return data\n except pymysql.err.OperationalError as err:\n print (' An error has occurred \\n', err)\n except pymysql.err.InternalError as err:\n print (' An error has occurred \\n', err)\n\ndef backup_db(argv):\n\n data = get_db_name(argv)\n db_name = data[1]\n try:\n sqldir = '/home/kusanagi/'+argv+'/sql_backup/'\n p = pathlib.Path(sqldir)\n if not p.exists():\n p.mkdir(mode=0o755, parents=True, exist_ok=True)\n shutil.chown(sqldir,'kusanagi','kusanagi')\n except BaseException as error:\n print(error)\n pwrd = get_root_pass()\n\n log = '/home/kusanagi/'+argv+'/log/backup.log'\n mess = 'Backed up database '+db_name\n append_log(log,mess)\n\n cmd = 'mysqldump --single-transaction -p'+pwrd+' --databases '+db_name+' | gzip > '+sqldir+db_name+'.sql.gz'\n execute_outputfile(cmd,log)\n\ndef update_backup_record(argv,backup_type,result):\n\n pwrd = get_root_pass()\n data = get_db_name(argv)\n provi_id = data[0]\n log = '/home/kusanagi/'+argv+'/log/backup.log'\n\n db = pymysql.connect(\"localhost\",\"root\",pwrd,\"secure_vps\")\n cursor = db.cursor()\n cursor.execute(\"select id from logs where provision_id=%d and status=0 and backup_type=%d\" % (provi_id,backup_type))\n res = cursor.fetchone()\n record_id = res[0]\n\n if result:\n cursor.execute(\"update logs set status=1,message='Done' where provision_id=%d and id=%d\" % (provi_id,record_id))\n else:\n cursor.execute(\"update logs set status=-1,message='Failed. See %s' where provision_id=%d and id=%d\" % (log,provi_id,record_id))\n\n db.commit()\n db.close()\n\ndef compress_provision_dir(argv,chdir=''):\n date = datetime.now()\n today = date.strftime(\"%Y-%m-%d\")\n if chdir:\n tarname = chdir+argv+'.'+today\n else:\n tarname = '/home/kusanagi/backup/'+argv+'.'+today\n source_dir = '/home/kusanagi/'+argv\n shutil.make_archive(tarname,\"gztar\",source_dir)\n return tarname\n\ndef local_backup(argv):\n \n append_log('/home/kusanagi/'+argv+'/log/backup.log', '--- Local backup')\n backup_db(argv)\n tarname = compress_provision_dir(argv)\n\n tar_file=pathlib.Path(tarname+'.tar.gz')\n if tar_file.exists():\n update_backup_record(argv,0,1)\n else:\n update_backup_record(argv,0,0)\n\ndef check_ssh_conn(argv,remote_user,remote_host,remote_port,remote_pass):\n cmd = 'sshpass -p \"'+remote_pass+'\" ssh -o StrictHostKeyChecking=no -p '+remote_port+' -q '+remote_user+'@'+remote_host+' exit;echo $?'\n res = execute(cmd)\n log = '/home/kusanagi/'+argv+'/log/backup.log'\n if int(res) == 0:\n #print('Connect OK \\n')\n pass\n else:\n append_log(log, 'Remote connection failed. Can not issue remote backup')\n update_backup_record(argv,1,0)\n sys.exit(1)\n\ndef remote_backup(argv, remote_user, remote_host, remote_port, remote_pass, remote_dest):\n\n log = '/home/kusanagi/'+argv+'/log/backup.log'\n append_log(log, '--- Remote backup')\n check_ssh_conn(argv, remote_user, remote_host, remote_port, remote_pass)\n backup_db(argv)\n tarname = compress_provision_dir(argv,'/home/kusanagi/')\n \n conf_ssh = '/etc/ssh/ssh_config'\n with open(conf_ssh) as fp: lines = fp.read().splitlines()\n for line in lines:\n grep = re.findall(remote_host, line)\n if grep:\n break\n if not grep:\n #configure stricthostkey ssh\n f = open(conf_ssh,\"a+\")\n f.write('Host %s\\n\\tStrictHostKeyChecking no\\n' % remote_host)\n f.close()\n \n cmd = 'sshpass -p \"'+remote_pass+'\" rsync --remove-source-files -azhe \\'ssh -p'+remote_port+'\\' '+tarname+'.tar.gz '+remote_user+'@'+remote_host+':'+remote_dest+' 2>> '+log+' ; echo $?'\n res = execute(cmd)\n if int(res) == 0:\n update_backup_record(argv,1,1)\n else:\n update_backup_record(argv,1,0)\n\n\ndef drive_backup(argv,drive_dir):\n \n log = '/home/kusanagi/'+argv+'/log/backup.log'\n append_log(log,'--- Backup to Google Drive')\n backup_db(argv)\n tarname = compress_provision_dir(argv,'/home/kusanagi/')\n cmd = 'rclone copy '+tarname+'.tar.gz GGD1:'+drive_dir+ ' 2>> '+log+' ; echo $?'\n res = execute(cmd)\n if int(res) == 0:\n update_backup_record(argv,2,1)\n else:\n update_backup_record(argv,2,0)\n os.remove(tarname+'.tar.gz')\n \ndef get_options(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('mode', type=str, choices=['local', 'remote', 'drive'])\n parser.add_argument('options', nargs=argparse.REMAINDER)\n return parser.parse_args(argv)\n\ndef main():\n \n args=get_options(sys.argv[1:])\n #pwrd = get_root_pass()\n options = ' '.join(map(str, args.options))\n if args.mode == 'local':\n local_backup(*args.options)\n elif args.mode == 'remote':\n remote_backup(*args.options)\n else:\n drive_backup(*args.options)\n\nif __name__ == '__main__':\n main()\n\n",
"step-ids": [
10,
11,
13,
14,
15
]
}
|
[
10,
11,
13,
14,
15
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(response.geturl())
<|reserved_special_token_0|>
with open('bgdGW.html', 'w', encoding='utf-8') as fp:
fp.write(html)
print(response.geturl())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
response = urllib.request.urlopen('http://www.gengdan.cn/')
print(response.geturl())
html = response.read().decode('UTF-8')
with open('bgdGW.html', 'w', encoding='utf-8') as fp:
fp.write(html)
print(response.geturl())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import urllib.request
response = urllib.request.urlopen('http://www.gengdan.cn/')
print(response.geturl())
html = response.read().decode('UTF-8')
with open('bgdGW.html', 'w', encoding='utf-8') as fp:
fp.write(html)
print(response.geturl())
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
# @Project :experiment9
# @File :text1
# @Date :2020/10/28 09:13
# @Author :施嘉伟
# @Email :1138128021@qq.com
# @Software :PyCharm
-------------------------------------------------
"""
import urllib.request
# 发出请求,得到响应
response=urllib.request.urlopen("http://www.gengdan.cn/")
print(response.geturl())
html = response.read().decode("UTF-8")
with open("bgdGW.html",'w',encoding="utf-8")as fp:
fp.write(html)
print(response.geturl())
|
flexible
|
{
"blob_id": "b186ae7a48afbb70edf3be0d9697deed4f31e542",
"index": 2258,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(response.geturl())\n<mask token>\nwith open('bgdGW.html', 'w', encoding='utf-8') as fp:\n fp.write(html)\nprint(response.geturl())\n",
"step-3": "<mask token>\nresponse = urllib.request.urlopen('http://www.gengdan.cn/')\nprint(response.geturl())\nhtml = response.read().decode('UTF-8')\nwith open('bgdGW.html', 'w', encoding='utf-8') as fp:\n fp.write(html)\nprint(response.geturl())\n",
"step-4": "<mask token>\nimport urllib.request\nresponse = urllib.request.urlopen('http://www.gengdan.cn/')\nprint(response.geturl())\nhtml = response.read().decode('UTF-8')\nwith open('bgdGW.html', 'w', encoding='utf-8') as fp:\n fp.write(html)\nprint(response.geturl())\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n# @Project :experiment9\n# @File :text1\n# @Date :2020/10/28 09:13\n# @Author :施嘉伟\n# @Email :1138128021@qq.com\n# @Software :PyCharm\n-------------------------------------------------\n\"\"\"\nimport urllib.request\n# 发出请求,得到响应\nresponse=urllib.request.urlopen(\"http://www.gengdan.cn/\")\nprint(response.geturl())\nhtml = response.read().decode(\"UTF-8\")\nwith open(\"bgdGW.html\",'w',encoding=\"utf-8\")as fp:\n fp.write(html)\nprint(response.geturl())\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import collections
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def isSymmetric(self, root: TreeNode) -> bool:
if not root:
return True
queue = collections.deque()
queue.append((root.left, root.right))
while queue:
left, right = queue.popleft()
if not left and not right:
continue
if not left or not right or left.val != right.val:
return False
queue.append((left.left, right.right))
queue.append((left.right, right.left))
return True
|
normal
|
{
"blob_id": "24a4b9246a9b15334bebc45c532a25bd81266918",
"index": 9650,
"step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TreeNode:\n <mask token>\n\n\nclass Solution:\n\n def isSymmetric(self, root: TreeNode) ->bool:\n if not root:\n return True\n queue = collections.deque()\n queue.append((root.left, root.right))\n while queue:\n left, right = queue.popleft()\n if not left and not right:\n continue\n if not left or not right or left.val != right.val:\n return False\n queue.append((left.left, right.right))\n queue.append((left.right, right.left))\n return True\n",
"step-3": "<mask token>\n\n\nclass TreeNode:\n\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n\n def isSymmetric(self, root: TreeNode) ->bool:\n if not root:\n return True\n queue = collections.deque()\n queue.append((root.left, root.right))\n while queue:\n left, right = queue.popleft()\n if not left and not right:\n continue\n if not left or not right or left.val != right.val:\n return False\n queue.append((left.left, right.right))\n queue.append((left.right, right.left))\n return True\n",
"step-4": "import collections\n\n\nclass TreeNode:\n\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n\n def isSymmetric(self, root: TreeNode) ->bool:\n if not root:\n return True\n queue = collections.deque()\n queue.append((root.left, root.right))\n while queue:\n left, right = queue.popleft()\n if not left and not right:\n continue\n if not left or not right or left.val != right.val:\n return False\n queue.append((left.left, right.right))\n queue.append((left.right, right.left))\n return True\n",
"step-5": "import collections\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n def isSymmetric(self, root: TreeNode) -> bool:\n if not root:\n return True\n\n queue = collections.deque()\n \n queue.append((root.left, root.right))\n \n while queue:\n left, right = queue.popleft()\n \n if not left and not right:\n continue\n \n if not left or not right or left.val != right.val:\n return False\n \n queue.append((left.left, right.right))\n queue.append((left.right, right.left))\n \n return True\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from django.core.urlresolvers import reverse
from keptar import settings
import os, os.path
import Image
try:
from collections import OrderedDict
except ImportError:
from keptar.odict import OrderedDict
class AccessDenied(Exception):
pass
class FileNotFound(Exception):
pass
class NotDirectory(Exception):
pass
def enrich(filelist, relpath='', thumbnails=True):
"""A kep neveihez hozzateszi a szukseges adatokat"""
files = OrderedDict()
for f in filelist:
abspath = os.path.abspath(os.path.join(settings.KEPTAR_ROOT, relpath, f))
if os.path.isdir(abspath):
thumb = settings.KEPTAR_ICONS.get('dir', None)
url = reverse('keptar.views.listdir', args=[os.path.join(relpath, f)])
direct_url = None
type = 'dir'
else:
if thumbnails:
try:
thumb = get_thumbnail(abspath)
except:
thumb = None
else:
thumb = settings.KEPTAR_ICONS.get('file', None)
url = reverse('keptar.views.showfile', args=[os.path.join(relpath, f)])
direct_url = getattr(settings, 'KEPTAR_URL', '/media/')+relpath+f
type = 'file'
# TODO: egyeb adatok
files[f] = {
'relpath': relpath,
'url': url,
'abspath': abspath,
'thumb': thumb,
'type': type,
'direct_url': direct_url,
}
return files
def get_parent(path):
"""A megadott elem szulokonyvtarat adja meg"""
# security check
parent = os.path.dirname(path)
try:
get_abspath(parent)
except:
parent = ''
return parent
def get_abspath(path):
"""AccessDenied exceptiont dob, ha valaki cselezni akar"""
abspath = os.path.abspath(os.path.join(settings.KEPTAR_ROOT, path))
# vajon a celkonyvtar valoban a root-on belul talalhato? - /../... miatt
if not abspath.startswith(settings.KEPTAR_ROOT):
raise AccessDenied("%s < %s" % (abspath, settings.KEPTAR_ROOT))
return abspath
def get_filelist(path, show_hidden=getattr(settings, 'KEPTAR_SHOW_HIDDEN', False), thumbnails=True):
"""Visszaadja a ``path`` konyvtarban levo konyvtarak es fileok listajat.
A ``path`` a ``settings.KEPTAR_ROOT``-hoz relativ.
A konyvtarak es a fileok listajat ket kulon dict-ben adja vissza,
mindenfele extra parameterrel.
A ``settings.KEPTAR_EXTENSIONS``-nel allithatoak a tamogatott
kiterjesztesek.
"""
abspath = get_abspath(path)
if not os.path.isdir(abspath):
raise NotDirectory(abspath)
dirs = []
pictures = []
for fname in os.listdir(abspath):
file = os.path.join(abspath, fname)
if os.path.isdir(file) and (show_hidden or not fname.startswith('.')):
dirs.append(fname)
if os.path.isfile(file):
# a kiterjesztes tamogatott-e
ext = file[file.rfind('.')+1:]
if ext.lower() in settings.KEPTAR_EXTENSIONS and (show_hidden or not fname.startswith('.')):
pictures.append(fname)
dirs.sort()
pictures.sort()
return enrich(dirs+pictures, relpath=path)
def get_thumbnail(file, type='', regenerate=False):
"""Visszaadja, illetve ha nem letezik, akkor legeneralja a ``file``-hoz
tartozo thumbnailt.
A ``type``-on keresztul mondhatjuk meg, hogy milyen tipusu thumbnailre
van szuksegunk, a tipusok parametereit a ``settings.py``-ben allithatjuk.
Ha a ``regenerate`` ``True``, akkor ujrageneralja a thumbnailt.
"""
ext = file[file.rfind('.')+1:]
if not os.path.isfile(file) or ext.lower() not in settings.KEPTAR_EXTENSIONS:
raise FileNotFound(file)
basename = os.path.basename(file)
dirname = os.path.dirname(file)
thumbname = os.path.join(dirname, settings.KEPTAR_THUMBS[type]['dir'], basename)
if regenerate or not os.path.isfile(thumbname):
if not os.path.isdir(os.path.dirname(thumbname)):
os.mkdir(os.path.dirname(thumbname))
generate_thumbnail(file, thumbname, settings.KEPTAR_THUMBS[type]['size'])
thumburl = getattr(settings, 'KEPTAR_URL', '/media') + thumbname[len(settings.KEPTAR_ROOT):]
return thumburl
def generate_thumbnail(file, thumbname, size):
image = Image.open(file)
image.thumbnail(size)
image.save(thumbname, image.format)
|
normal
|
{
"blob_id": "d9156c20e046f608563bc6779575e14cc60f4c25",
"index": 896,
"step-1": "<mask token>\n\n\nclass AccessDenied(Exception):\n pass\n\n\nclass FileNotFound(Exception):\n pass\n\n\nclass NotDirectory(Exception):\n pass\n\n\n<mask token>\n\n\ndef get_parent(path):\n \"\"\"A megadott elem szulokonyvtarat adja meg\"\"\"\n parent = os.path.dirname(path)\n try:\n get_abspath(parent)\n except:\n parent = ''\n return parent\n\n\ndef get_abspath(path):\n \"\"\"AccessDenied exceptiont dob, ha valaki cselezni akar\"\"\"\n abspath = os.path.abspath(os.path.join(settings.KEPTAR_ROOT, path))\n if not abspath.startswith(settings.KEPTAR_ROOT):\n raise AccessDenied('%s < %s' % (abspath, settings.KEPTAR_ROOT))\n return abspath\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AccessDenied(Exception):\n pass\n\n\nclass FileNotFound(Exception):\n pass\n\n\nclass NotDirectory(Exception):\n pass\n\n\n<mask token>\n\n\ndef get_parent(path):\n \"\"\"A megadott elem szulokonyvtarat adja meg\"\"\"\n parent = os.path.dirname(path)\n try:\n get_abspath(parent)\n except:\n parent = ''\n return parent\n\n\ndef get_abspath(path):\n \"\"\"AccessDenied exceptiont dob, ha valaki cselezni akar\"\"\"\n abspath = os.path.abspath(os.path.join(settings.KEPTAR_ROOT, path))\n if not abspath.startswith(settings.KEPTAR_ROOT):\n raise AccessDenied('%s < %s' % (abspath, settings.KEPTAR_ROOT))\n return abspath\n\n\n<mask token>\n\n\ndef get_thumbnail(file, type='', regenerate=False):\n \"\"\"Visszaadja, illetve ha nem letezik, akkor legeneralja a ``file``-hoz\n tartozo thumbnailt.\n A ``type``-on keresztul mondhatjuk meg, hogy milyen tipusu thumbnailre\n van szuksegunk, a tipusok parametereit a ``settings.py``-ben allithatjuk.\n Ha a ``regenerate`` ``True``, akkor ujrageneralja a thumbnailt.\n \"\"\"\n ext = file[file.rfind('.') + 1:]\n if not os.path.isfile(file) or ext.lower(\n ) not in settings.KEPTAR_EXTENSIONS:\n raise FileNotFound(file)\n basename = os.path.basename(file)\n dirname = os.path.dirname(file)\n thumbname = os.path.join(dirname, settings.KEPTAR_THUMBS[type]['dir'],\n basename)\n if regenerate or not os.path.isfile(thumbname):\n if not os.path.isdir(os.path.dirname(thumbname)):\n os.mkdir(os.path.dirname(thumbname))\n generate_thumbnail(file, thumbname, settings.KEPTAR_THUMBS[type][\n 'size'])\n thumburl = getattr(settings, 'KEPTAR_URL', '/media') + thumbname[len(\n settings.KEPTAR_ROOT):]\n return thumburl\n\n\n<mask token>\n",
"step-3": "<mask token>\ntry:\n from collections import OrderedDict\nexcept ImportError:\n from keptar.odict import OrderedDict\n\n\nclass AccessDenied(Exception):\n pass\n\n\nclass FileNotFound(Exception):\n pass\n\n\nclass NotDirectory(Exception):\n pass\n\n\ndef enrich(filelist, relpath='', thumbnails=True):\n \"\"\"A kep neveihez hozzateszi a szukseges adatokat\"\"\"\n files = OrderedDict()\n for f in filelist:\n abspath = os.path.abspath(os.path.join(settings.KEPTAR_ROOT,\n relpath, f))\n if os.path.isdir(abspath):\n thumb = settings.KEPTAR_ICONS.get('dir', None)\n url = reverse('keptar.views.listdir', args=[os.path.join(\n relpath, f)])\n direct_url = None\n type = 'dir'\n else:\n if thumbnails:\n try:\n thumb = get_thumbnail(abspath)\n except:\n thumb = None\n else:\n thumb = settings.KEPTAR_ICONS.get('file', None)\n url = reverse('keptar.views.showfile', args=[os.path.join(\n relpath, f)])\n direct_url = getattr(settings, 'KEPTAR_URL', '/media/'\n ) + relpath + f\n type = 'file'\n files[f] = {'relpath': relpath, 'url': url, 'abspath': abspath,\n 'thumb': thumb, 'type': type, 'direct_url': direct_url}\n return files\n\n\ndef get_parent(path):\n \"\"\"A megadott elem szulokonyvtarat adja meg\"\"\"\n parent = os.path.dirname(path)\n try:\n get_abspath(parent)\n except:\n parent = ''\n return parent\n\n\ndef get_abspath(path):\n \"\"\"AccessDenied exceptiont dob, ha valaki cselezni akar\"\"\"\n abspath = os.path.abspath(os.path.join(settings.KEPTAR_ROOT, path))\n if not abspath.startswith(settings.KEPTAR_ROOT):\n raise AccessDenied('%s < %s' % (abspath, settings.KEPTAR_ROOT))\n return abspath\n\n\ndef get_filelist(path, show_hidden=getattr(settings, 'KEPTAR_SHOW_HIDDEN', \n False), thumbnails=True):\n \"\"\"Visszaadja a ``path`` konyvtarban levo konyvtarak es fileok listajat.\n A ``path`` a ``settings.KEPTAR_ROOT``-hoz relativ.\n A konyvtarak es a fileok listajat ket kulon dict-ben adja vissza, \n mindenfele extra parameterrel.\n A ``settings.KEPTAR_EXTENSIONS``-nel allithatoak a tamogatott \n kiterjesztesek.\n \"\"\"\n abspath = get_abspath(path)\n if not os.path.isdir(abspath):\n raise NotDirectory(abspath)\n dirs = []\n pictures = []\n for fname in os.listdir(abspath):\n file = os.path.join(abspath, fname)\n if os.path.isdir(file) and (show_hidden or not fname.startswith('.')):\n dirs.append(fname)\n if os.path.isfile(file):\n ext = file[file.rfind('.') + 1:]\n if ext.lower() in settings.KEPTAR_EXTENSIONS and (show_hidden or\n not fname.startswith('.')):\n pictures.append(fname)\n dirs.sort()\n pictures.sort()\n return enrich(dirs + pictures, relpath=path)\n\n\ndef get_thumbnail(file, type='', regenerate=False):\n \"\"\"Visszaadja, illetve ha nem letezik, akkor legeneralja a ``file``-hoz\n tartozo thumbnailt.\n A ``type``-on keresztul mondhatjuk meg, hogy milyen tipusu thumbnailre\n van szuksegunk, a tipusok parametereit a ``settings.py``-ben allithatjuk.\n Ha a ``regenerate`` ``True``, akkor ujrageneralja a thumbnailt.\n \"\"\"\n ext = file[file.rfind('.') + 1:]\n if not os.path.isfile(file) or ext.lower(\n ) not in settings.KEPTAR_EXTENSIONS:\n raise FileNotFound(file)\n basename = os.path.basename(file)\n dirname = os.path.dirname(file)\n thumbname = os.path.join(dirname, settings.KEPTAR_THUMBS[type]['dir'],\n basename)\n if regenerate or not os.path.isfile(thumbname):\n if not os.path.isdir(os.path.dirname(thumbname)):\n os.mkdir(os.path.dirname(thumbname))\n generate_thumbnail(file, thumbname, settings.KEPTAR_THUMBS[type][\n 'size'])\n thumburl = getattr(settings, 'KEPTAR_URL', '/media') + thumbname[len(\n settings.KEPTAR_ROOT):]\n return thumburl\n\n\ndef generate_thumbnail(file, thumbname, size):\n image = Image.open(file)\n image.thumbnail(size)\n image.save(thumbname, image.format)\n",
"step-4": "from django.core.urlresolvers import reverse\nfrom keptar import settings\nimport os, os.path\nimport Image\ntry:\n from collections import OrderedDict\nexcept ImportError:\n from keptar.odict import OrderedDict\n\n\nclass AccessDenied(Exception):\n pass\n\n\nclass FileNotFound(Exception):\n pass\n\n\nclass NotDirectory(Exception):\n pass\n\n\ndef enrich(filelist, relpath='', thumbnails=True):\n \"\"\"A kep neveihez hozzateszi a szukseges adatokat\"\"\"\n files = OrderedDict()\n for f in filelist:\n abspath = os.path.abspath(os.path.join(settings.KEPTAR_ROOT,\n relpath, f))\n if os.path.isdir(abspath):\n thumb = settings.KEPTAR_ICONS.get('dir', None)\n url = reverse('keptar.views.listdir', args=[os.path.join(\n relpath, f)])\n direct_url = None\n type = 'dir'\n else:\n if thumbnails:\n try:\n thumb = get_thumbnail(abspath)\n except:\n thumb = None\n else:\n thumb = settings.KEPTAR_ICONS.get('file', None)\n url = reverse('keptar.views.showfile', args=[os.path.join(\n relpath, f)])\n direct_url = getattr(settings, 'KEPTAR_URL', '/media/'\n ) + relpath + f\n type = 'file'\n files[f] = {'relpath': relpath, 'url': url, 'abspath': abspath,\n 'thumb': thumb, 'type': type, 'direct_url': direct_url}\n return files\n\n\ndef get_parent(path):\n \"\"\"A megadott elem szulokonyvtarat adja meg\"\"\"\n parent = os.path.dirname(path)\n try:\n get_abspath(parent)\n except:\n parent = ''\n return parent\n\n\ndef get_abspath(path):\n \"\"\"AccessDenied exceptiont dob, ha valaki cselezni akar\"\"\"\n abspath = os.path.abspath(os.path.join(settings.KEPTAR_ROOT, path))\n if not abspath.startswith(settings.KEPTAR_ROOT):\n raise AccessDenied('%s < %s' % (abspath, settings.KEPTAR_ROOT))\n return abspath\n\n\ndef get_filelist(path, show_hidden=getattr(settings, 'KEPTAR_SHOW_HIDDEN', \n False), thumbnails=True):\n \"\"\"Visszaadja a ``path`` konyvtarban levo konyvtarak es fileok listajat.\n A ``path`` a ``settings.KEPTAR_ROOT``-hoz relativ.\n A konyvtarak es a fileok listajat ket kulon dict-ben adja vissza, \n mindenfele extra parameterrel.\n A ``settings.KEPTAR_EXTENSIONS``-nel allithatoak a tamogatott \n kiterjesztesek.\n \"\"\"\n abspath = get_abspath(path)\n if not os.path.isdir(abspath):\n raise NotDirectory(abspath)\n dirs = []\n pictures = []\n for fname in os.listdir(abspath):\n file = os.path.join(abspath, fname)\n if os.path.isdir(file) and (show_hidden or not fname.startswith('.')):\n dirs.append(fname)\n if os.path.isfile(file):\n ext = file[file.rfind('.') + 1:]\n if ext.lower() in settings.KEPTAR_EXTENSIONS and (show_hidden or\n not fname.startswith('.')):\n pictures.append(fname)\n dirs.sort()\n pictures.sort()\n return enrich(dirs + pictures, relpath=path)\n\n\ndef get_thumbnail(file, type='', regenerate=False):\n \"\"\"Visszaadja, illetve ha nem letezik, akkor legeneralja a ``file``-hoz\n tartozo thumbnailt.\n A ``type``-on keresztul mondhatjuk meg, hogy milyen tipusu thumbnailre\n van szuksegunk, a tipusok parametereit a ``settings.py``-ben allithatjuk.\n Ha a ``regenerate`` ``True``, akkor ujrageneralja a thumbnailt.\n \"\"\"\n ext = file[file.rfind('.') + 1:]\n if not os.path.isfile(file) or ext.lower(\n ) not in settings.KEPTAR_EXTENSIONS:\n raise FileNotFound(file)\n basename = os.path.basename(file)\n dirname = os.path.dirname(file)\n thumbname = os.path.join(dirname, settings.KEPTAR_THUMBS[type]['dir'],\n basename)\n if regenerate or not os.path.isfile(thumbname):\n if not os.path.isdir(os.path.dirname(thumbname)):\n os.mkdir(os.path.dirname(thumbname))\n generate_thumbnail(file, thumbname, settings.KEPTAR_THUMBS[type][\n 'size'])\n thumburl = getattr(settings, 'KEPTAR_URL', '/media') + thumbname[len(\n settings.KEPTAR_ROOT):]\n return thumburl\n\n\ndef generate_thumbnail(file, thumbname, size):\n image = Image.open(file)\n image.thumbnail(size)\n image.save(thumbname, image.format)\n",
"step-5": "from django.core.urlresolvers import reverse\nfrom keptar import settings\nimport os, os.path\nimport Image\ntry:\n from collections import OrderedDict\nexcept ImportError:\n from keptar.odict import OrderedDict\n\nclass AccessDenied(Exception):\n pass\n\nclass FileNotFound(Exception):\n pass\n\nclass NotDirectory(Exception):\n pass\n\ndef enrich(filelist, relpath='', thumbnails=True):\n \"\"\"A kep neveihez hozzateszi a szukseges adatokat\"\"\"\n\n files = OrderedDict()\n\n for f in filelist:\n abspath = os.path.abspath(os.path.join(settings.KEPTAR_ROOT, relpath, f))\n if os.path.isdir(abspath):\n thumb = settings.KEPTAR_ICONS.get('dir', None)\n url = reverse('keptar.views.listdir', args=[os.path.join(relpath, f)])\n direct_url = None\n type = 'dir'\n else:\n if thumbnails:\n try:\n thumb = get_thumbnail(abspath)\n except:\n thumb = None\n else:\n thumb = settings.KEPTAR_ICONS.get('file', None)\n url = reverse('keptar.views.showfile', args=[os.path.join(relpath, f)])\n direct_url = getattr(settings, 'KEPTAR_URL', '/media/')+relpath+f\n type = 'file'\n\n # TODO: egyeb adatok\n files[f] = {\n 'relpath': relpath,\n 'url': url,\n 'abspath': abspath,\n 'thumb': thumb,\n 'type': type,\n 'direct_url': direct_url,\n }\n\n return files\n\n\ndef get_parent(path):\n \"\"\"A megadott elem szulokonyvtarat adja meg\"\"\"\n\n # security check\n parent = os.path.dirname(path)\n\n try:\n get_abspath(parent)\n except:\n parent = ''\n\n return parent\n\n\ndef get_abspath(path):\n \"\"\"AccessDenied exceptiont dob, ha valaki cselezni akar\"\"\"\n\n abspath = os.path.abspath(os.path.join(settings.KEPTAR_ROOT, path))\n # vajon a celkonyvtar valoban a root-on belul talalhato? - /../... miatt\n if not abspath.startswith(settings.KEPTAR_ROOT):\n raise AccessDenied(\"%s < %s\" % (abspath, settings.KEPTAR_ROOT))\n \n return abspath\n\n\ndef get_filelist(path, show_hidden=getattr(settings, 'KEPTAR_SHOW_HIDDEN', False), thumbnails=True):\n \"\"\"Visszaadja a ``path`` konyvtarban levo konyvtarak es fileok listajat.\n A ``path`` a ``settings.KEPTAR_ROOT``-hoz relativ.\n A konyvtarak es a fileok listajat ket kulon dict-ben adja vissza, \n mindenfele extra parameterrel.\n A ``settings.KEPTAR_EXTENSIONS``-nel allithatoak a tamogatott \n kiterjesztesek.\n \"\"\"\n\n abspath = get_abspath(path)\n\n if not os.path.isdir(abspath):\n raise NotDirectory(abspath)\n\n dirs = []\n pictures = []\n\n for fname in os.listdir(abspath):\n file = os.path.join(abspath, fname)\n if os.path.isdir(file) and (show_hidden or not fname.startswith('.')):\n dirs.append(fname)\n if os.path.isfile(file):\n # a kiterjesztes tamogatott-e\n ext = file[file.rfind('.')+1:]\n if ext.lower() in settings.KEPTAR_EXTENSIONS and (show_hidden or not fname.startswith('.')):\n pictures.append(fname)\n\n dirs.sort()\n pictures.sort()\n\n return enrich(dirs+pictures, relpath=path)\n\n\ndef get_thumbnail(file, type='', regenerate=False):\n \"\"\"Visszaadja, illetve ha nem letezik, akkor legeneralja a ``file``-hoz\n tartozo thumbnailt.\n A ``type``-on keresztul mondhatjuk meg, hogy milyen tipusu thumbnailre\n van szuksegunk, a tipusok parametereit a ``settings.py``-ben allithatjuk.\n Ha a ``regenerate`` ``True``, akkor ujrageneralja a thumbnailt.\n \"\"\"\n\n ext = file[file.rfind('.')+1:]\n if not os.path.isfile(file) or ext.lower() not in settings.KEPTAR_EXTENSIONS:\n raise FileNotFound(file)\n \n basename = os.path.basename(file)\n dirname = os.path.dirname(file)\n thumbname = os.path.join(dirname, settings.KEPTAR_THUMBS[type]['dir'], basename)\n if regenerate or not os.path.isfile(thumbname):\n if not os.path.isdir(os.path.dirname(thumbname)):\n os.mkdir(os.path.dirname(thumbname))\n generate_thumbnail(file, thumbname, settings.KEPTAR_THUMBS[type]['size'])\n \n thumburl = getattr(settings, 'KEPTAR_URL', '/media') + thumbname[len(settings.KEPTAR_ROOT):]\n\n return thumburl\n\n\ndef generate_thumbnail(file, thumbname, size):\n image = Image.open(file)\n image.thumbnail(size)\n image.save(thumbname, image.format)\n\n",
"step-ids": [
5,
6,
10,
11,
12
]
}
|
[
5,
6,
10,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('------rectangle-------')
<|reserved_special_token_0|>
print('area of rectangle : ', RectArea(1, b))
print('perimeter of rectangle : ', Rectperimeter(1, b))
print()
print('-------circle-------')
<|reserved_special_token_0|>
print('area of circle : ', circlearea(r))
print('perimeter of circle : ', circleperimeter(r))
print()
print('-----cuboid-----')
<|reserved_special_token_0|>
print('area of cuboid :', cuboidarea(1, w, h))
print('perimeter of cuboid : ', cuboidperimeter(1, w, h))
print()
print('-------shpere-----')
<|reserved_special_token_0|>
print('area of shpere: ', spherearea(r))
print('perimeter of shpere : ', sphereperimeter(r))
print()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('------rectangle-------')
l = int(input('enter length : '))
b = int(input('enter breadth : '))
print('area of rectangle : ', RectArea(1, b))
print('perimeter of rectangle : ', Rectperimeter(1, b))
print()
print('-------circle-------')
r = int(input('enter radius : '))
print('area of circle : ', circlearea(r))
print('perimeter of circle : ', circleperimeter(r))
print()
print('-----cuboid-----')
l = int(input('enter length : '))
w = int(input('enter width : '))
h = int(input('enter height : '))
print('area of cuboid :', cuboidarea(1, w, h))
print('perimeter of cuboid : ', cuboidperimeter(1, w, h))
print()
print('-------shpere-----')
r = int(input('enter radius: '))
print('area of shpere: ', spherearea(r))
print('perimeter of shpere : ', sphereperimeter(r))
print()
<|reserved_special_token_1|>
from graphics.rectangle import *
from graphics.circle import *
from graphics.DGraphics.cuboid import *
from graphics.DGraphics.sphere import *
print('------rectangle-------')
l = int(input('enter length : '))
b = int(input('enter breadth : '))
print('area of rectangle : ', RectArea(1, b))
print('perimeter of rectangle : ', Rectperimeter(1, b))
print()
print('-------circle-------')
r = int(input('enter radius : '))
print('area of circle : ', circlearea(r))
print('perimeter of circle : ', circleperimeter(r))
print()
print('-----cuboid-----')
l = int(input('enter length : '))
w = int(input('enter width : '))
h = int(input('enter height : '))
print('area of cuboid :', cuboidarea(1, w, h))
print('perimeter of cuboid : ', cuboidperimeter(1, w, h))
print()
print('-------shpere-----')
r = int(input('enter radius: '))
print('area of shpere: ', spherearea(r))
print('perimeter of shpere : ', sphereperimeter(r))
print()
<|reserved_special_token_1|>
from graphics.rectangle import *
from graphics.circle import *
from graphics.DGraphics.cuboid import *
from graphics.DGraphics.sphere import *
print ("------rectangle-------")
l=int(input("enter length : "))
b=int(input("enter breadth : "))
print("area of rectangle : ",RectArea(1,b))
print("perimeter of rectangle : ",Rectperimeter(1,b))
print()
print ("-------circle-------")
r=int(input("enter radius : "))
print("area of circle : ",circlearea(r))
print("perimeter of circle : ",circleperimeter(r))
print()
print ("-----cuboid-----")
l=int(input("enter length : "))
w=int(input("enter width : "))
h=int(input("enter height : "))
print("area of cuboid :",cuboidarea(1,w,h))
print("perimeter of cuboid : ",cuboidperimeter(1,w,h))
print()
print ("-------shpere-----")
r=int(input("enter radius: "))
print("area of shpere: ",spherearea(r))
print("perimeter of shpere : ",sphereperimeter(r))
print()
|
flexible
|
{
"blob_id": "f275085a2e4e3efc8eb841b5322d9d71f2e43846",
"index": 7998,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('------rectangle-------')\n<mask token>\nprint('area of rectangle : ', RectArea(1, b))\nprint('perimeter of rectangle : ', Rectperimeter(1, b))\nprint()\nprint('-------circle-------')\n<mask token>\nprint('area of circle : ', circlearea(r))\nprint('perimeter of circle : ', circleperimeter(r))\nprint()\nprint('-----cuboid-----')\n<mask token>\nprint('area of cuboid :', cuboidarea(1, w, h))\nprint('perimeter of cuboid : ', cuboidperimeter(1, w, h))\nprint()\nprint('-------shpere-----')\n<mask token>\nprint('area of shpere: ', spherearea(r))\nprint('perimeter of shpere : ', sphereperimeter(r))\nprint()\n",
"step-3": "<mask token>\nprint('------rectangle-------')\nl = int(input('enter length : '))\nb = int(input('enter breadth : '))\nprint('area of rectangle : ', RectArea(1, b))\nprint('perimeter of rectangle : ', Rectperimeter(1, b))\nprint()\nprint('-------circle-------')\nr = int(input('enter radius : '))\nprint('area of circle : ', circlearea(r))\nprint('perimeter of circle : ', circleperimeter(r))\nprint()\nprint('-----cuboid-----')\nl = int(input('enter length : '))\nw = int(input('enter width : '))\nh = int(input('enter height : '))\nprint('area of cuboid :', cuboidarea(1, w, h))\nprint('perimeter of cuboid : ', cuboidperimeter(1, w, h))\nprint()\nprint('-------shpere-----')\nr = int(input('enter radius: '))\nprint('area of shpere: ', spherearea(r))\nprint('perimeter of shpere : ', sphereperimeter(r))\nprint()\n",
"step-4": "from graphics.rectangle import *\nfrom graphics.circle import *\nfrom graphics.DGraphics.cuboid import *\nfrom graphics.DGraphics.sphere import *\nprint('------rectangle-------')\nl = int(input('enter length : '))\nb = int(input('enter breadth : '))\nprint('area of rectangle : ', RectArea(1, b))\nprint('perimeter of rectangle : ', Rectperimeter(1, b))\nprint()\nprint('-------circle-------')\nr = int(input('enter radius : '))\nprint('area of circle : ', circlearea(r))\nprint('perimeter of circle : ', circleperimeter(r))\nprint()\nprint('-----cuboid-----')\nl = int(input('enter length : '))\nw = int(input('enter width : '))\nh = int(input('enter height : '))\nprint('area of cuboid :', cuboidarea(1, w, h))\nprint('perimeter of cuboid : ', cuboidperimeter(1, w, h))\nprint()\nprint('-------shpere-----')\nr = int(input('enter radius: '))\nprint('area of shpere: ', spherearea(r))\nprint('perimeter of shpere : ', sphereperimeter(r))\nprint()\n",
"step-5": "from graphics.rectangle import *\r\nfrom graphics.circle import *\r\nfrom graphics.DGraphics.cuboid import *\r\nfrom graphics.DGraphics.sphere import *\r\nprint (\"------rectangle-------\")\r\nl=int(input(\"enter length : \"))\r\nb=int(input(\"enter breadth : \"))\r\nprint(\"area of rectangle : \",RectArea(1,b))\r\nprint(\"perimeter of rectangle : \",Rectperimeter(1,b))\r\nprint()\r\nprint (\"-------circle-------\")\r\nr=int(input(\"enter radius : \"))\r\nprint(\"area of circle : \",circlearea(r))\r\nprint(\"perimeter of circle : \",circleperimeter(r))\r\nprint()\r\nprint (\"-----cuboid-----\")\r\nl=int(input(\"enter length : \"))\r\nw=int(input(\"enter width : \"))\r\nh=int(input(\"enter height : \"))\r\nprint(\"area of cuboid :\",cuboidarea(1,w,h))\r\nprint(\"perimeter of cuboid : \",cuboidperimeter(1,w,h))\r\nprint()\r\nprint (\"-------shpere-----\")\r\nr=int(input(\"enter radius: \"))\r\nprint(\"area of shpere: \",spherearea(r))\r\nprint(\"perimeter of shpere : \",sphereperimeter(r))\r\nprint()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n \r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import uuid
import json
import pytest
import requests
import httpx
from spinta.testing.manifest import bootstrap_manifest
from spinta.utils.data import take
from spinta.testing.utils import error
from spinta.testing.utils import get_error_codes, RowIds
from spinta.testing.context import create_test_context
from spinta.testing.client import create_test_client
from spinta.manifests.tabular.helpers import striptable
from spinta.testing.tabular import create_tabular_manifest
from spinta.testing.data import listdata
test_data = [
{
'_type': 'report',
'status': 'OK',
'report_type': 'STV',
'count': 10,
'notes': [{
'note': 'hello',
'note_type': 'simple',
'create_date': '2019-03-14',
}],
'operating_licenses': [{
'license_types': ['valid', 'invalid'],
}],
},
{
'_type': 'report',
'status': 'invalid',
'report_type': 'VMI',
'count': 42,
'notes': [{
'note': 'world',
'note_type': 'daily',
'create_date': '2019-04-20',
}],
'operating_licenses': [{
'license_types': ['expired'],
}],
},
{
'_type': 'report',
'status': 'invalid',
'report_type': 'STV',
'count': 13,
'notes': [{
'note': 'foo bar',
'note_type': 'important',
'create_date': '2019-02-01',
}],
},
]
def _push_test_data(app, model, data=None):
app.authmodel(model, ['insert'])
resp = app.post('/', json={'_data': [
{
**res,
'_op': 'insert',
'_type': model,
}
for res in data or test_data
]})
assert resp.status_code == 200, resp.json()
resp = resp.json()
assert '_data' in resp, resp
return resp['_data']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_exact(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?status="OK"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_exact_lower(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_exact_non_string(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search, non string type
resp = app.get(f'/{model}?count=13')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# single field fsearch, non string type
resp = app.get(f'/{model}?count="abc"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
# single non-existing field value search
resp = app.get(f'/{model}?status="o"')
data = resp.json()['_data']
assert len(data) == 0
# single non-existing field search
resp = app.get(f'/{model}?state="o"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["FieldNotInResource"]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_exact_multiple_props(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status.lower()="invalid"&report_type.lower()="stv"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_exact_same_prop_multiple_times(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status.lower()="invalid"&status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_gt(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?count>40')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# search for string value
resp = app.get(f'/{model}?status>"ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count>40&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count>40&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# test `greater_than` works as expected
resp = app.get(f'/{model}?count>42')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_gt_with_nested_date(model, context, app):
ids = RowIds(_push_test_data(app, model))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)>"2019-04-19"')
assert ids(resp) == [1]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_gte(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?count>=40')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# search for string value
resp = app.get(f'/{model}?status>="ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count>=40&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count>=40&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# test `greater_than` works as expected
resp = app.get(f'/{model}?count>=42')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ge_with_nested_date(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)>="2019-04-20"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_lt(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?count<12')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
# search for string value
resp = app.get(f'/{model}?status<"ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count<20&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count<50&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# test `lower_than` works as expected
resp = app.get(f'/{model}?count<10')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_lt_with_nested_date(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)<"2019-02-02"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_lte(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?count<=12')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
# search for string value
resp = app.get(f'/{model}?status<="ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count<=20&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count<=50&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# test `lower_than` works as expected
resp = app.get(f'/{model}?count<=10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_le_with_nested_date(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)<="2019-02-01"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# single field search
resp = app.get(f'/{model}?status!="invalid"')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne_lower(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# single field search, case insensitive
resp = app.get(f'/{model}?status.lower()!="ok"')
assert ids(resp) == [1, 2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne_multiple_props(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count!=10&count!=42')
assert ids(resp) == [2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne_multiple_props_and_logic(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?status.lower()!="ok"&report_type.lower()="stv"')
assert ids(resp) == [2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne_nested(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# test `ne` with nested structure
resp = app.get(f'/{model}?notes.create_date!="2019-02-01"&status!="invalid"')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne_nested_missing_data(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# test `ne` with nested structures and not full data in all resources
resp = app.get(f'/{model}?operating_licenses.license_types!="valid"')
assert ids(resp) == [1]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_contains(model, context, app, mocker):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?report_type.lower().contains("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_contains_case_insensitive(model, context, app, mocker):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search, case insensitive
resp = app.get(f'/{model}?report_type.lower().contains("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_contains_multi_field(model, context, app, mocker):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?status.contains("valid")&report_type.lower().contains("tv")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# test if operators are joined with AND logic
resp = app.get(f'/{model}?status.contains("valid")&report_type.contains("TV")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# multi field search
# test if operators are joined with AND logic for same field
resp = app.get(f'/{model}?report_type.lower().contains("vm")&report_type.lower().contains("mi")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?status.contains("valid")&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_contains_type_check(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date).contains("2019-04-20")')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_contains_with_select(model, context, app, mocker):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# `contains` with select
resp = app.get(f'/{model}?report_type.lower().contains("vm")&select(count)')
assert resp.status_code == 200
data = resp.json()['_data']
assert len(data) == 1
assert data[0] == {
'count': 42,
}
# `contains` with select and always_show_id
mocker.patch.object(context.get('config'), 'always_show_id', True)
resp = app.get(f'/{model}?report_type.lower().contains("vm")&select(count)')
assert resp.status_code == 200
data = resp.json()['_data']
assert len(data) == 1
assert data[0] == {
'_id': r2['_id'],
'count': 42,
}
# `contains` with always_show_id should return just id
resp = app.get(f'/{model}?report_type.lower().contains("vm")')
assert resp.status_code == 200
data = resp.json()['_data']
assert len(data) == 1
assert data[0] == {
'_id': r2['_id'],
}
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_select_unknown_property(model, context, app, mocker):
_push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?select(nothere)')
assert error(resp) == 'FieldNotInResource'
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_select_unknown_property_in_object(model, context, app, mocker):
_push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?select(notes.nothere)')
assert error(resp) == 'FieldNotInResource'
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_startswith(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?report_type.startswith("VM")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# single field search, case insensitive
resp = app.get(f'/{model}?report_type.lower().startswith("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?status.startswith("in")&report_type.lower().startswith("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?report_type.lower().startswith("st")&status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
# sanity check that `startswith` searches from the start
resp = app.get(f'/{model}?status.startswith("valid")')
data = resp.json()['_data']
assert len(data) == 0
# `startswith` type check
resp = app.get(f'/{model}?notes.create_date.startswith("2019-04-20")')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_nested(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# nested `exact` search
resp = app.get(f'/{model}?notes.note="foo bar"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# nested `exact` search, case insensitive
resp = app.get(f'/{model}?notes.note.lower()="foo bar"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# nested `exact` search with dates
resp = app.get(f'/{model}?notes.create_date="2019-03-14"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
# nested `gt` search
resp = app.get(f'/{model}?notes.create_date>"2019-04-01"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# nested non existant field
resp = app.get(f'/{model}?notes.foo.bar="baz"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["FieldNotInResource"]
# nested `contains` search
resp = app.get(f'/{model}?notes.note.contains("bar")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_nested_contains(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?operating_licenses.license_types.contains("lid")')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_nested_startswith(model, context, app):
app.authmodel(model, ['search'])
r1, r2, r3, = _push_test_data(app, model)
# nested `startswith` search
resp = app.get(f'/{model}?notes.note.startswith("fo")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(f'/{model}?operating_licenses.license_types.startswith("exp")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
def ids(resources):
if isinstance(resources, (requests.models.Response, httpx.Response)):
resp = resources
assert resp.status_code == 200, resp.json()
resources = resp.json()['_data']
return [r['_id'] for r in resources]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_or(model, context, app):
ids = RowIds(_push_test_data(app, model))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count=42|status.lower()="ok"')
assert ids(resp) == [0, 1]
resp = app.get(f'/{model}?count<=10|count=13')
assert ids(resp) == [0, 2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_nested_recurse(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(note)="foo bar"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_nested_recurse_lower(model, context, app):
r1, r2, r3, = ids(_push_test_data(app, model))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(status).lower()="ok"')
assert ids(resp) == [r1]
@pytest.mark.models(
'backends/mongo/recurse',
'backends/postgres/recurse',
)
def test_search_nested_recurse_multiple_props(model, context, app):
r1, r2, = ids(_push_test_data(app, model, [
{
'title': "Org",
'country': 'fi',
'govids': [
{'govid': '1', 'country': 'fi'},
{'govid': '2', 'country': 'se'},
]
},
{
'title': "Org",
'country': 'no',
'govids': [
{'govid': '3', 'country': 'no'},
]
},
]))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(country)="se"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country)="fi"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country)="no"')
assert ids(resp) == [r2]
@pytest.mark.models(
'backends/mongo/recurse',
'backends/postgres/recurse',
)
def test_search_recurse_multiple_props_lower(model, app):
r1, r2, = ids(_push_test_data(app, model, [
{
'title': "Org",
'country': 'fi',
'govids': [
{'govid': '1', 'country': 'FI'},
{'govid': '2', 'country': 'SE'},
]
},
{
'title': "Org",
'country': 'no',
'govids': [
{'govid': '3', 'country': 'NO'},
]
},
]))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(country).lower()="se"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country).lower()="fi"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country).lower()="no"')
assert ids(resp) == [r2]
# TODO: add mongo
def test_search_any(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",count,10,42)')
assert ids(resp) == [0, 1]
resp = app.get(f'/{model}?any("ne",count,42)')
assert ids(resp) == [0, 2]
# TODO: add mongo
def test_search_any_in_list(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",notes.note,"hello","world")')
assert sorted(ids(resp)) == [0, 1]
resp = app.get(f'/{model}?any("ne",notes.note,"foo bar")')
assert sorted(ids(resp)) == [0, 1]
# TODO: add mongo
def test_search_any_in_list_of_scalars(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",operating_licenses.license_types,"valid","invalid","expired")')
assert sorted(ids(resp)) == [0, 1]
resp = app.get(f'/{model}?any("ne",operating_licenses.license_types,"expired")')
assert sorted(ids(resp)) == [0]
# TODO: add mongo
def test_search_any_recurse(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",recurse(status),"OK","none")')
assert ids(resp) == [0]
# TODO: add mongo
def test_search_any_recurse_lower(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",recurse(status).lower(),"ok","none")')
assert ids(resp) == [0]
# TODO: add mongo
def test_search_any_contains(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",status,"inv","val","lid")')
assert sorted(ids(resp)) == [1, 2]
# TODO: add mongo
def test_search_any_contains_nested(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",notes.note,"hel","wor")')
assert sorted(ids(resp)) == [0, 1]
# TODO: add mongo
def test_search_any_contains_recurse_lower(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",recurse(status).lower(),"o","k")')
assert sorted(ids(resp)) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_id_contains(model, app):
app.authmodel(model, ['search', 'getall'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?_id.contains("-")')
assert sorted(ids(resp)) == [0, 1, 2]
subid = ids[0][5:10]
resp = app.get(f'/{model}?_id.contains("{subid}")')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_id_not_contains(model, app):
app.authmodel(model, ['search', 'getall'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?_id.contains("AAAAA")')
assert ids(resp) == []
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_id_startswith(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
subid = ids[0][:5]
resp = app.get(f'/{model}?_id.startswith("{subid}")')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_id_not_startswith(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
subid = ids[0][5:10]
resp = app.get(f'/{model}?_id.startswith("{subid}")')
assert ids(resp) == []
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_revision_contains(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?_revision.contains("-")')
assert sorted(ids(resp)) == [0, 1, 2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_revision_startswith(model, app):
app.authmodel(model, ['search', 'getone'])
ids = RowIds(_push_test_data(app, model))
id0 = ids[0]
resp = app.get(f'/{model}/{id0}')
revision = resp.json()['_revision'][:5]
resp = app.get(f'/{model}?_revision.startswith("{revision}")')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_group(model, app):
app.authmodel(model, ['search', 'getone'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?(report_type="STV"&status="OK")')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_select_in_or(model, app):
app.authmodel(model, ['search', 'getone'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?(report_type="STV"|status="OK")&select(_id)')
# XXX: Flaky test, some times it gives [2, 0], don't know why.
assert ids(resp) == [0, 2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_lower_contains(model, app):
app.authmodel(model, ['search', 'getone'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?report_type.lower().contains("st")')
# XXX: Flaky test, some times it gives [2, 0], don't know why.
assert ids(resp) == [0, 2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_null(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model, [
{'status': 'OK'},
{},
]))
resp = app.get(f'/{model}?status=null')
assert ids(resp) == [1]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_not_null(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model, [
{'status': 'OK'},
{},
]))
resp = app.get(f'/{model}?status!=null')
assert ids(resp) == [0]
@pytest.mark.parametrize('backend', ['default', 'mongo'])
def test_extra_fields(postgresql, mongo, backend, rc, tmp_path, request):
rc = rc.fork({
'backends': [backend],
'manifests.default': {
'type': 'tabular',
'path': str(tmp_path / 'manifest.csv'),
'backend': backend,
},
})
# Create data into a extrafields model with code and name properties.
create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''
m | property | type
extrafields |
| code | string
| name | string
'''))
context = create_test_context(rc)
request.addfinalizer(context.wipe_all)
app = create_test_client(context)
app.authmodel('extrafields', ['insert'])
resp = app.post('/extrafields', json={'_data': [
{'_op': 'insert', 'code': 'lt', 'name': 'Lietuva'},
{'_op': 'insert', 'code': 'lv', 'name': 'Latvija'},
{'_op': 'insert', 'code': 'ee', 'name': 'Estija'},
]})
assert resp.status_code == 200, resp.json()
# Now try to read from same model, but loaded with just one property.
create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''
m | property | type
extrafields |
| name | string
'''))
context = create_test_context(rc)
app = create_test_client(context)
app.authmodel('extrafields', ['getall', 'getone'])
resp = app.get('/extrafields')
assert listdata(resp, sort=True) == [
"Estija",
"Latvija",
"Lietuva",
]
pk = resp.json()['_data'][0]['_id']
resp = app.get(f'/extrafields/{pk}')
data = resp.json()
assert resp.status_code == 200, data
assert take(data) == {'name': 'Lietuva'}
@pytest.mark.parametrize('backend', ['mongo'])
def test_missing_fields(postgresql, mongo, backend, rc, tmp_path):
rc = rc.fork({
'backends': [backend],
'manifests.default': {
'type': 'tabular',
'path': str(tmp_path / 'manifest.csv'),
'backend': backend,
},
})
# Create data into a extrafields model with code and name properties.
create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''
m | property | type
missingfields |
| code | string
'''))
context = create_test_context(rc)
app = create_test_client(context)
app.authmodel('missingfields', ['insert'])
resp = app.post('/missingfields', json={'_data': [
{'_op': 'insert', 'code': 'lt'},
{'_op': 'insert', 'code': 'lv'},
{'_op': 'insert', 'code': 'ee'},
]})
assert resp.status_code == 200, resp.json()
# Now try to read from same model, but loaded with just one property.
create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''
m | property | type
missingfields |
| code | string
| name | string
'''))
context = create_test_context(rc)
app = create_test_client(context)
app.authmodel('missingfields', ['search', 'getone'])
resp = app.get('/missingfields?select(_id,code,name)')
assert listdata(resp, sort=True) == [
('ee', None),
('lt', None),
('lv', None),
]
pk = resp.json()['_data'][0]['_id']
resp = app.get(f'/missingfields/{pk}')
data = resp.json()
assert resp.status_code == 200, data
assert take(data) == {'code': 'lt'}
def test_base_select(rc, postgresql, request):
context = bootstrap_manifest(rc, '''
d | r | b | m | property | type | ref
datasets/gov/example/base | |
| |
| | | Location | |
| | | | id | integer |
| | | | name | string |
| | | | type | string |
| |
| | Location | |
| | | City | |
| | | | id | |
| | | | name | string |
| | | | population | integer |
''', backend=postgresql, request=request)
app = create_test_client(context)
app.authorize(['spinta_set_meta_fields'])
app.authmodel('datasets/gov/example/base/Location', ['insert', 'delete'])
app.authmodel('datasets/gov/example/base/City', ['insert', 'delete', 'getall', 'search'])
_id = str(uuid.uuid4())
app.post('/datasets/gov/example/base/Location', json={
'_id': _id,
'id': 1,
'name': 'Base location',
'type': 'city'
})
app.post('/datasets/gov/example/base/City', json={
'_id': _id,
'name': 'City',
'population': 100
})
resp = app.get('/datasets/gov/example/base/City?select(id,name,_base.name,population,_base.type)')
assert resp.json()['_data'] == [
{
'_base': {'name': 'Base location', 'type': 'city'},
'id': 1,
'name': 'City',
'population': 100
}
]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_select_revision(model, app):
app.authmodel(model, ['search', 'getone', 'getall'])
ids = RowIds(_push_test_data(app, model))
id0 = ids[0]
resp = app.get(f'/{model}/{id0}')
revision = resp.json()['_revision']
resp = app.get(f'/{model}/:format/jsonl?limit(1)&select(_revision)')
assert json.loads(resp.content) == {
'_revision': revision
}
|
normal
|
{
"blob_id": "57e9c1a4ac57f68e0e73c2c67c6828de8efb1b16",
"index": 3903,
"step-1": "<mask token>\n\n\ndef _push_test_data(app, model, data=None):\n app.authmodel(model, ['insert'])\n resp = app.post('/', json={'_data': [{**res, '_op': 'insert', '_type':\n model} for res in data or test_data]})\n assert resp.status_code == 200, resp.json()\n resp = resp.json()\n assert '_data' in resp, resp\n return resp['_data']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status=\"OK\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_lower(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_non_string(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=13')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count=\"abc\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?status=\"o\"')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?state=\"o\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['FieldNotInResource']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_multiple_props(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.lower()=\"invalid\"&report_type.lower()=\"stv\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_same_prop_multiple_times(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"invalid\"&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>42')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt_with_nested_date(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>\"2019-04-19\"')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>=40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>=40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=42')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ge_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>=\"2019-04-20\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<10')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<\"2019-02-02\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<=12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<=20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<=50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<=10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_le_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<=\"2019-02-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_lower(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"')\n assert ids(resp) == [1, 2]\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_multiple_props_and_logic(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"&report_type.lower()=\"stv\"')\n assert ids(resp) == [2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(\n f'/{model}?notes.create_date!=\"2019-02-01\"&status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested_missing_data(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types!=\"valid\"')\n assert ids(resp) == [1]\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_case_insensitive(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_multi_field(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower().contains(\"tv\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.contains(\"TV\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?report_type.lower().contains(\"vm\")&report_type.lower().contains(\"mi\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property_in_object(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(notes.nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_startswith(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.startswith(\"VM\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?report_type.lower().startswith(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.startswith(\"in\")&report_type.lower().startswith(\"vm\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?report_type.lower().startswith(\"st\")&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status.startswith(\"valid\")')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?notes.create_date.startswith(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_contains(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types.contains(\"lid\")'\n )\n assert ids(resp) == [0]\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_or(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=42|status.lower()=\"ok\"')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?count<=10|count=13')\n assert ids(resp) == [0, 2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_recurse(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(note)=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/recurse', 'backends/postgres/recurse')\ndef test_search_recurse_multiple_props_lower(model, app):\n r1, r2 = ids(_push_test_data(app, model, [{'title': 'Org', 'country':\n 'fi', 'govids': [{'govid': '1', 'country': 'FI'}, {'govid': '2',\n 'country': 'SE'}]}, {'title': 'Org', 'country': 'no', 'govids': [{\n 'govid': '3', 'country': 'NO'}]}]))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(country).lower()=\"se\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"fi\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"no\"')\n assert ids(resp) == [r2]\n\n\ndef test_search_any(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",count,10,42)')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",count,42)')\n assert ids(resp) == [0, 2]\n\n\ndef test_search_any_in_list(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",notes.note,\"hello\",\"world\")')\n assert sorted(ids(resp)) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",notes.note,\"foo bar\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\n<mask token>\n\n\ndef test_search_any_recurse(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status),\"OK\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status).lower(),\"ok\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_contains(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",status,\"inv\",\"val\",\"lid\")')\n assert sorted(ids(resp)) == [1, 2]\n\n\ndef test_search_any_contains_nested(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",notes.note,\"hel\",\"wor\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\ndef test_search_any_contains_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",recurse(status).lower(),\"o\",\"k\")')\n assert sorted(ids(resp)) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.contains(\"{subid}\")')\n assert ids(resp) == [0]\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][:5]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_not_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == []\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_revision_contains(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_revision.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status=null')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_not_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status!=null')\n assert ids(resp) == [0]\n\n\n@pytest.mark.parametrize('backend', ['default', 'mongo'])\ndef test_extra_fields(postgresql, mongo, backend, rc, tmp_path, request):\n rc = rc.fork({'backends': [backend], 'manifests.default': {'type':\n 'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}\n )\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | code | string\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n request.addfinalizer(context.wipe_all)\n app = create_test_client(context)\n app.authmodel('extrafields', ['insert'])\n resp = app.post('/extrafields', json={'_data': [{'_op': 'insert',\n 'code': 'lt', 'name': 'Lietuva'}, {'_op': 'insert', 'code': 'lv',\n 'name': 'Latvija'}, {'_op': 'insert', 'code': 'ee', 'name': 'Estija'}]}\n )\n assert resp.status_code == 200, resp.json()\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('extrafields', ['getall', 'getone'])\n resp = app.get('/extrafields')\n assert listdata(resp, sort=True) == ['Estija', 'Latvija', 'Lietuva']\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/extrafields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'name': 'Lietuva'}\n\n\n<mask token>\n\n\ndef test_base_select(rc, postgresql, request):\n context = bootstrap_manifest(rc,\n \"\"\"\n d | r | b | m | property | type | ref\n datasets/gov/example/base | |\n | |\n | | | Location | |\n | | | | id | integer |\n | | | | name | string |\n | | | | type | string |\n | |\n | | Location | |\n | | | City | |\n | | | | id | |\n | | | | name | string |\n | | | | population | integer |\n \"\"\"\n , backend=postgresql, request=request)\n app = create_test_client(context)\n app.authorize(['spinta_set_meta_fields'])\n app.authmodel('datasets/gov/example/base/Location', ['insert', 'delete'])\n app.authmodel('datasets/gov/example/base/City', ['insert', 'delete',\n 'getall', 'search'])\n _id = str(uuid.uuid4())\n app.post('/datasets/gov/example/base/Location', json={'_id': _id, 'id':\n 1, 'name': 'Base location', 'type': 'city'})\n app.post('/datasets/gov/example/base/City', json={'_id': _id, 'name':\n 'City', 'population': 100})\n resp = app.get(\n '/datasets/gov/example/base/City?select(id,name,_base.name,population,_base.type)'\n )\n assert resp.json()['_data'] == [{'_base': {'name': 'Base location',\n 'type': 'city'}, 'id': 1, 'name': 'City', 'population': 100}]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_revision(model, app):\n app.authmodel(model, ['search', 'getone', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n id0 = ids[0]\n resp = app.get(f'/{model}/{id0}')\n revision = resp.json()['_revision']\n resp = app.get(f'/{model}/:format/jsonl?limit(1)&select(_revision)')\n assert json.loads(resp.content) == {'_revision': revision}\n",
"step-2": "<mask token>\n\n\ndef _push_test_data(app, model, data=None):\n app.authmodel(model, ['insert'])\n resp = app.post('/', json={'_data': [{**res, '_op': 'insert', '_type':\n model} for res in data or test_data]})\n assert resp.status_code == 200, resp.json()\n resp = resp.json()\n assert '_data' in resp, resp\n return resp['_data']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status=\"OK\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_lower(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_non_string(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=13')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count=\"abc\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?status=\"o\"')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?state=\"o\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['FieldNotInResource']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_multiple_props(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.lower()=\"invalid\"&report_type.lower()=\"stv\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_same_prop_multiple_times(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"invalid\"&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>42')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt_with_nested_date(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>\"2019-04-19\"')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>=40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>=40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=42')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ge_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>=\"2019-04-20\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<10')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<\"2019-02-02\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<=12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<=20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<=50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<=10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_le_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<=\"2019-02-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_lower(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"')\n assert ids(resp) == [1, 2]\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_multiple_props_and_logic(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"&report_type.lower()=\"stv\"')\n assert ids(resp) == [2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(\n f'/{model}?notes.create_date!=\"2019-02-01\"&status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested_missing_data(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types!=\"valid\"')\n assert ids(resp) == [1]\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_case_insensitive(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_multi_field(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower().contains(\"tv\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.contains(\"TV\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?report_type.lower().contains(\"vm\")&report_type.lower().contains(\"mi\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_type_check(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date).contains(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property_in_object(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(notes.nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_startswith(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.startswith(\"VM\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?report_type.lower().startswith(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.startswith(\"in\")&report_type.lower().startswith(\"vm\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?report_type.lower().startswith(\"st\")&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status.startswith(\"valid\")')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?notes.create_date.startswith(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_contains(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types.contains(\"lid\")'\n )\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_startswith(model, context, app):\n app.authmodel(model, ['search'])\n r1, r2, r3 = _push_test_data(app, model)\n resp = app.get(f'/{model}?notes.note.startswith(\"fo\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?operating_licenses.license_types.startswith(\"exp\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_or(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=42|status.lower()=\"ok\"')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?count<=10|count=13')\n assert ids(resp) == [0, 2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_recurse(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(note)=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/recurse', 'backends/postgres/recurse')\ndef test_search_recurse_multiple_props_lower(model, app):\n r1, r2 = ids(_push_test_data(app, model, [{'title': 'Org', 'country':\n 'fi', 'govids': [{'govid': '1', 'country': 'FI'}, {'govid': '2',\n 'country': 'SE'}]}, {'title': 'Org', 'country': 'no', 'govids': [{\n 'govid': '3', 'country': 'NO'}]}]))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(country).lower()=\"se\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"fi\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"no\"')\n assert ids(resp) == [r2]\n\n\ndef test_search_any(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",count,10,42)')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",count,42)')\n assert ids(resp) == [0, 2]\n\n\ndef test_search_any_in_list(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",notes.note,\"hello\",\"world\")')\n assert sorted(ids(resp)) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",notes.note,\"foo bar\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\n<mask token>\n\n\ndef test_search_any_recurse(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status),\"OK\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status).lower(),\"ok\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_contains(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",status,\"inv\",\"val\",\"lid\")')\n assert sorted(ids(resp)) == [1, 2]\n\n\ndef test_search_any_contains_nested(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",notes.note,\"hel\",\"wor\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\ndef test_search_any_contains_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",recurse(status).lower(),\"o\",\"k\")')\n assert sorted(ids(resp)) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.contains(\"{subid}\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_not_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"AAAAA\")')\n assert ids(resp) == []\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][:5]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_not_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == []\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_revision_contains(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_revision.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_select_in_or(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?(report_type=\"STV\"|status=\"OK\")&select(_id)')\n assert ids(resp) == [0, 2]\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status=null')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_not_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status!=null')\n assert ids(resp) == [0]\n\n\n@pytest.mark.parametrize('backend', ['default', 'mongo'])\ndef test_extra_fields(postgresql, mongo, backend, rc, tmp_path, request):\n rc = rc.fork({'backends': [backend], 'manifests.default': {'type':\n 'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}\n )\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | code | string\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n request.addfinalizer(context.wipe_all)\n app = create_test_client(context)\n app.authmodel('extrafields', ['insert'])\n resp = app.post('/extrafields', json={'_data': [{'_op': 'insert',\n 'code': 'lt', 'name': 'Lietuva'}, {'_op': 'insert', 'code': 'lv',\n 'name': 'Latvija'}, {'_op': 'insert', 'code': 'ee', 'name': 'Estija'}]}\n )\n assert resp.status_code == 200, resp.json()\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('extrafields', ['getall', 'getone'])\n resp = app.get('/extrafields')\n assert listdata(resp, sort=True) == ['Estija', 'Latvija', 'Lietuva']\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/extrafields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'name': 'Lietuva'}\n\n\n@pytest.mark.parametrize('backend', ['mongo'])\ndef test_missing_fields(postgresql, mongo, backend, rc, tmp_path):\n rc = rc.fork({'backends': [backend], 'manifests.default': {'type':\n 'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}\n )\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n missingfields |\n | code | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['insert'])\n resp = app.post('/missingfields', json={'_data': [{'_op': 'insert',\n 'code': 'lt'}, {'_op': 'insert', 'code': 'lv'}, {'_op': 'insert',\n 'code': 'ee'}]})\n assert resp.status_code == 200, resp.json()\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n missingfields |\n | code | string\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['search', 'getone'])\n resp = app.get('/missingfields?select(_id,code,name)')\n assert listdata(resp, sort=True) == [('ee', None), ('lt', None), ('lv',\n None)]\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/missingfields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'code': 'lt'}\n\n\ndef test_base_select(rc, postgresql, request):\n context = bootstrap_manifest(rc,\n \"\"\"\n d | r | b | m | property | type | ref\n datasets/gov/example/base | |\n | |\n | | | Location | |\n | | | | id | integer |\n | | | | name | string |\n | | | | type | string |\n | |\n | | Location | |\n | | | City | |\n | | | | id | |\n | | | | name | string |\n | | | | population | integer |\n \"\"\"\n , backend=postgresql, request=request)\n app = create_test_client(context)\n app.authorize(['spinta_set_meta_fields'])\n app.authmodel('datasets/gov/example/base/Location', ['insert', 'delete'])\n app.authmodel('datasets/gov/example/base/City', ['insert', 'delete',\n 'getall', 'search'])\n _id = str(uuid.uuid4())\n app.post('/datasets/gov/example/base/Location', json={'_id': _id, 'id':\n 1, 'name': 'Base location', 'type': 'city'})\n app.post('/datasets/gov/example/base/City', json={'_id': _id, 'name':\n 'City', 'population': 100})\n resp = app.get(\n '/datasets/gov/example/base/City?select(id,name,_base.name,population,_base.type)'\n )\n assert resp.json()['_data'] == [{'_base': {'name': 'Base location',\n 'type': 'city'}, 'id': 1, 'name': 'City', 'population': 100}]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_revision(model, app):\n app.authmodel(model, ['search', 'getone', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n id0 = ids[0]\n resp = app.get(f'/{model}/{id0}')\n revision = resp.json()['_revision']\n resp = app.get(f'/{model}/:format/jsonl?limit(1)&select(_revision)')\n assert json.loads(resp.content) == {'_revision': revision}\n",
"step-3": "<mask token>\n\n\ndef _push_test_data(app, model, data=None):\n app.authmodel(model, ['insert'])\n resp = app.post('/', json={'_data': [{**res, '_op': 'insert', '_type':\n model} for res in data or test_data]})\n assert resp.status_code == 200, resp.json()\n resp = resp.json()\n assert '_data' in resp, resp\n return resp['_data']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status=\"OK\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_lower(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_non_string(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=13')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count=\"abc\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?status=\"o\"')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?state=\"o\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['FieldNotInResource']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_multiple_props(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.lower()=\"invalid\"&report_type.lower()=\"stv\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_same_prop_multiple_times(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"invalid\"&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>42')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt_with_nested_date(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>\"2019-04-19\"')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>=40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>=40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=42')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ge_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>=\"2019-04-20\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<10')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<\"2019-02-02\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<=12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<=20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<=50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<=10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_le_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<=\"2019-02-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_lower(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"')\n assert ids(resp) == [1, 2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_multiple_props(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?count!=10&count!=42')\n assert ids(resp) == [2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_multiple_props_and_logic(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"&report_type.lower()=\"stv\"')\n assert ids(resp) == [2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(\n f'/{model}?notes.create_date!=\"2019-02-01\"&status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested_missing_data(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types!=\"valid\"')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_case_insensitive(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_multi_field(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower().contains(\"tv\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.contains(\"TV\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?report_type.lower().contains(\"vm\")&report_type.lower().contains(\"mi\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_type_check(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date).contains(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_with_select(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")&select(count)'\n )\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {'count': 42}\n mocker.patch.object(context.get('config'), 'always_show_id', True)\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")&select(count)'\n )\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {'_id': r2['_id'], 'count': 42}\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {'_id': r2['_id']}\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property_in_object(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(notes.nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_startswith(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.startswith(\"VM\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?report_type.lower().startswith(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.startswith(\"in\")&report_type.lower().startswith(\"vm\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?report_type.lower().startswith(\"st\")&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status.startswith(\"valid\")')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?notes.create_date.startswith(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?notes.note=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?notes.note.lower()=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?notes.create_date=\"2019-03-14\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?notes.create_date>\"2019-04-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?notes.foo.bar=\"baz\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['FieldNotInResource']\n resp = app.get(f'/{model}?notes.note.contains(\"bar\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_contains(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types.contains(\"lid\")'\n )\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_startswith(model, context, app):\n app.authmodel(model, ['search'])\n r1, r2, r3 = _push_test_data(app, model)\n resp = app.get(f'/{model}?notes.note.startswith(\"fo\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?operating_licenses.license_types.startswith(\"exp\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_or(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=42|status.lower()=\"ok\"')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?count<=10|count=13')\n assert ids(resp) == [0, 2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_recurse(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(note)=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/recurse', 'backends/postgres/recurse')\ndef test_search_recurse_multiple_props_lower(model, app):\n r1, r2 = ids(_push_test_data(app, model, [{'title': 'Org', 'country':\n 'fi', 'govids': [{'govid': '1', 'country': 'FI'}, {'govid': '2',\n 'country': 'SE'}]}, {'title': 'Org', 'country': 'no', 'govids': [{\n 'govid': '3', 'country': 'NO'}]}]))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(country).lower()=\"se\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"fi\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"no\"')\n assert ids(resp) == [r2]\n\n\ndef test_search_any(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",count,10,42)')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",count,42)')\n assert ids(resp) == [0, 2]\n\n\ndef test_search_any_in_list(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",notes.note,\"hello\",\"world\")')\n assert sorted(ids(resp)) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",notes.note,\"foo bar\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\ndef test_search_any_in_list_of_scalars(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(\n f'/{model}?any(\"eq\",operating_licenses.license_types,\"valid\",\"invalid\",\"expired\")'\n )\n assert sorted(ids(resp)) == [0, 1]\n resp = app.get(\n f'/{model}?any(\"ne\",operating_licenses.license_types,\"expired\")')\n assert sorted(ids(resp)) == [0]\n\n\ndef test_search_any_recurse(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status),\"OK\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status).lower(),\"ok\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_contains(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",status,\"inv\",\"val\",\"lid\")')\n assert sorted(ids(resp)) == [1, 2]\n\n\ndef test_search_any_contains_nested(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",notes.note,\"hel\",\"wor\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\ndef test_search_any_contains_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",recurse(status).lower(),\"o\",\"k\")')\n assert sorted(ids(resp)) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.contains(\"{subid}\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_not_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"AAAAA\")')\n assert ids(resp) == []\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][:5]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_not_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == []\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_revision_contains(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_revision.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_group(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?(report_type=\"STV\"&status=\"OK\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_select_in_or(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?(report_type=\"STV\"|status=\"OK\")&select(_id)')\n assert ids(resp) == [0, 2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lower_contains(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?report_type.lower().contains(\"st\")')\n assert ids(resp) == [0, 2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status=null')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_not_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status!=null')\n assert ids(resp) == [0]\n\n\n@pytest.mark.parametrize('backend', ['default', 'mongo'])\ndef test_extra_fields(postgresql, mongo, backend, rc, tmp_path, request):\n rc = rc.fork({'backends': [backend], 'manifests.default': {'type':\n 'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}\n )\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | code | string\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n request.addfinalizer(context.wipe_all)\n app = create_test_client(context)\n app.authmodel('extrafields', ['insert'])\n resp = app.post('/extrafields', json={'_data': [{'_op': 'insert',\n 'code': 'lt', 'name': 'Lietuva'}, {'_op': 'insert', 'code': 'lv',\n 'name': 'Latvija'}, {'_op': 'insert', 'code': 'ee', 'name': 'Estija'}]}\n )\n assert resp.status_code == 200, resp.json()\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('extrafields', ['getall', 'getone'])\n resp = app.get('/extrafields')\n assert listdata(resp, sort=True) == ['Estija', 'Latvija', 'Lietuva']\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/extrafields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'name': 'Lietuva'}\n\n\n@pytest.mark.parametrize('backend', ['mongo'])\ndef test_missing_fields(postgresql, mongo, backend, rc, tmp_path):\n rc = rc.fork({'backends': [backend], 'manifests.default': {'type':\n 'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}\n )\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n missingfields |\n | code | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['insert'])\n resp = app.post('/missingfields', json={'_data': [{'_op': 'insert',\n 'code': 'lt'}, {'_op': 'insert', 'code': 'lv'}, {'_op': 'insert',\n 'code': 'ee'}]})\n assert resp.status_code == 200, resp.json()\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n missingfields |\n | code | string\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['search', 'getone'])\n resp = app.get('/missingfields?select(_id,code,name)')\n assert listdata(resp, sort=True) == [('ee', None), ('lt', None), ('lv',\n None)]\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/missingfields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'code': 'lt'}\n\n\ndef test_base_select(rc, postgresql, request):\n context = bootstrap_manifest(rc,\n \"\"\"\n d | r | b | m | property | type | ref\n datasets/gov/example/base | |\n | |\n | | | Location | |\n | | | | id | integer |\n | | | | name | string |\n | | | | type | string |\n | |\n | | Location | |\n | | | City | |\n | | | | id | |\n | | | | name | string |\n | | | | population | integer |\n \"\"\"\n , backend=postgresql, request=request)\n app = create_test_client(context)\n app.authorize(['spinta_set_meta_fields'])\n app.authmodel('datasets/gov/example/base/Location', ['insert', 'delete'])\n app.authmodel('datasets/gov/example/base/City', ['insert', 'delete',\n 'getall', 'search'])\n _id = str(uuid.uuid4())\n app.post('/datasets/gov/example/base/Location', json={'_id': _id, 'id':\n 1, 'name': 'Base location', 'type': 'city'})\n app.post('/datasets/gov/example/base/City', json={'_id': _id, 'name':\n 'City', 'population': 100})\n resp = app.get(\n '/datasets/gov/example/base/City?select(id,name,_base.name,population,_base.type)'\n )\n assert resp.json()['_data'] == [{'_base': {'name': 'Base location',\n 'type': 'city'}, 'id': 1, 'name': 'City', 'population': 100}]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_revision(model, app):\n app.authmodel(model, ['search', 'getone', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n id0 = ids[0]\n resp = app.get(f'/{model}/{id0}')\n revision = resp.json()['_revision']\n resp = app.get(f'/{model}/:format/jsonl?limit(1)&select(_revision)')\n assert json.loads(resp.content) == {'_revision': revision}\n",
"step-4": "<mask token>\n\n\ndef _push_test_data(app, model, data=None):\n app.authmodel(model, ['insert'])\n resp = app.post('/', json={'_data': [{**res, '_op': 'insert', '_type':\n model} for res in data or test_data]})\n assert resp.status_code == 200, resp.json()\n resp = resp.json()\n assert '_data' in resp, resp\n return resp['_data']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status=\"OK\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_lower(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_non_string(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=13')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count=\"abc\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?status=\"o\"')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?state=\"o\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['FieldNotInResource']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_multiple_props(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.lower()=\"invalid\"&report_type.lower()=\"stv\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_same_prop_multiple_times(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"invalid\"&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>42')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt_with_nested_date(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>\"2019-04-19\"')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>=40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>=40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=42')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ge_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>=\"2019-04-20\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<10')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<\"2019-02-02\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<=12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<=20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<=50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<=10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_le_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<=\"2019-02-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_lower(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"')\n assert ids(resp) == [1, 2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_multiple_props(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?count!=10&count!=42')\n assert ids(resp) == [2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_multiple_props_and_logic(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"&report_type.lower()=\"stv\"')\n assert ids(resp) == [2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(\n f'/{model}?notes.create_date!=\"2019-02-01\"&status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested_missing_data(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types!=\"valid\"')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_case_insensitive(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_multi_field(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower().contains(\"tv\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.contains(\"TV\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?report_type.lower().contains(\"vm\")&report_type.lower().contains(\"mi\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_type_check(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date).contains(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_with_select(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")&select(count)'\n )\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {'count': 42}\n mocker.patch.object(context.get('config'), 'always_show_id', True)\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")&select(count)'\n )\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {'_id': r2['_id'], 'count': 42}\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {'_id': r2['_id']}\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property_in_object(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(notes.nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_startswith(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.startswith(\"VM\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?report_type.lower().startswith(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.startswith(\"in\")&report_type.lower().startswith(\"vm\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?report_type.lower().startswith(\"st\")&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status.startswith(\"valid\")')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?notes.create_date.startswith(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?notes.note=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?notes.note.lower()=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?notes.create_date=\"2019-03-14\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?notes.create_date>\"2019-04-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?notes.foo.bar=\"baz\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['FieldNotInResource']\n resp = app.get(f'/{model}?notes.note.contains(\"bar\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_contains(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types.contains(\"lid\")'\n )\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_startswith(model, context, app):\n app.authmodel(model, ['search'])\n r1, r2, r3 = _push_test_data(app, model)\n resp = app.get(f'/{model}?notes.note.startswith(\"fo\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?operating_licenses.license_types.startswith(\"exp\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\ndef ids(resources):\n if isinstance(resources, (requests.models.Response, httpx.Response)):\n resp = resources\n assert resp.status_code == 200, resp.json()\n resources = resp.json()['_data']\n return [r['_id'] for r in resources]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_or(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=42|status.lower()=\"ok\"')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?count<=10|count=13')\n assert ids(resp) == [0, 2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_recurse(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(note)=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/recurse', 'backends/postgres/recurse')\ndef test_search_nested_recurse_multiple_props(model, context, app):\n r1, r2 = ids(_push_test_data(app, model, [{'title': 'Org', 'country':\n 'fi', 'govids': [{'govid': '1', 'country': 'fi'}, {'govid': '2',\n 'country': 'se'}]}, {'title': 'Org', 'country': 'no', 'govids': [{\n 'govid': '3', 'country': 'no'}]}]))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(country)=\"se\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country)=\"fi\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country)=\"no\"')\n assert ids(resp) == [r2]\n\n\n@pytest.mark.models('backends/mongo/recurse', 'backends/postgres/recurse')\ndef test_search_recurse_multiple_props_lower(model, app):\n r1, r2 = ids(_push_test_data(app, model, [{'title': 'Org', 'country':\n 'fi', 'govids': [{'govid': '1', 'country': 'FI'}, {'govid': '2',\n 'country': 'SE'}]}, {'title': 'Org', 'country': 'no', 'govids': [{\n 'govid': '3', 'country': 'NO'}]}]))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(country).lower()=\"se\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"fi\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"no\"')\n assert ids(resp) == [r2]\n\n\ndef test_search_any(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",count,10,42)')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",count,42)')\n assert ids(resp) == [0, 2]\n\n\ndef test_search_any_in_list(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",notes.note,\"hello\",\"world\")')\n assert sorted(ids(resp)) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",notes.note,\"foo bar\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\ndef test_search_any_in_list_of_scalars(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(\n f'/{model}?any(\"eq\",operating_licenses.license_types,\"valid\",\"invalid\",\"expired\")'\n )\n assert sorted(ids(resp)) == [0, 1]\n resp = app.get(\n f'/{model}?any(\"ne\",operating_licenses.license_types,\"expired\")')\n assert sorted(ids(resp)) == [0]\n\n\ndef test_search_any_recurse(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status),\"OK\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status).lower(),\"ok\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_contains(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",status,\"inv\",\"val\",\"lid\")')\n assert sorted(ids(resp)) == [1, 2]\n\n\ndef test_search_any_contains_nested(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",notes.note,\"hel\",\"wor\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\ndef test_search_any_contains_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",recurse(status).lower(),\"o\",\"k\")')\n assert sorted(ids(resp)) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.contains(\"{subid}\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_not_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"AAAAA\")')\n assert ids(resp) == []\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][:5]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_not_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == []\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_revision_contains(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_revision.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n\n\n<mask token>\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_group(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?(report_type=\"STV\"&status=\"OK\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_select_in_or(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?(report_type=\"STV\"|status=\"OK\")&select(_id)')\n assert ids(resp) == [0, 2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lower_contains(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?report_type.lower().contains(\"st\")')\n assert ids(resp) == [0, 2]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status=null')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_search_not_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status!=null')\n assert ids(resp) == [0]\n\n\n@pytest.mark.parametrize('backend', ['default', 'mongo'])\ndef test_extra_fields(postgresql, mongo, backend, rc, tmp_path, request):\n rc = rc.fork({'backends': [backend], 'manifests.default': {'type':\n 'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}\n )\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | code | string\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n request.addfinalizer(context.wipe_all)\n app = create_test_client(context)\n app.authmodel('extrafields', ['insert'])\n resp = app.post('/extrafields', json={'_data': [{'_op': 'insert',\n 'code': 'lt', 'name': 'Lietuva'}, {'_op': 'insert', 'code': 'lv',\n 'name': 'Latvija'}, {'_op': 'insert', 'code': 'ee', 'name': 'Estija'}]}\n )\n assert resp.status_code == 200, resp.json()\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('extrafields', ['getall', 'getone'])\n resp = app.get('/extrafields')\n assert listdata(resp, sort=True) == ['Estija', 'Latvija', 'Lietuva']\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/extrafields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'name': 'Lietuva'}\n\n\n@pytest.mark.parametrize('backend', ['mongo'])\ndef test_missing_fields(postgresql, mongo, backend, rc, tmp_path):\n rc = rc.fork({'backends': [backend], 'manifests.default': {'type':\n 'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}\n )\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n missingfields |\n | code | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['insert'])\n resp = app.post('/missingfields', json={'_data': [{'_op': 'insert',\n 'code': 'lt'}, {'_op': 'insert', 'code': 'lv'}, {'_op': 'insert',\n 'code': 'ee'}]})\n assert resp.status_code == 200, resp.json()\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n missingfields |\n | code | string\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['search', 'getone'])\n resp = app.get('/missingfields?select(_id,code,name)')\n assert listdata(resp, sort=True) == [('ee', None), ('lt', None), ('lv',\n None)]\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/missingfields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'code': 'lt'}\n\n\ndef test_base_select(rc, postgresql, request):\n context = bootstrap_manifest(rc,\n \"\"\"\n d | r | b | m | property | type | ref\n datasets/gov/example/base | |\n | |\n | | | Location | |\n | | | | id | integer |\n | | | | name | string |\n | | | | type | string |\n | |\n | | Location | |\n | | | City | |\n | | | | id | |\n | | | | name | string |\n | | | | population | integer |\n \"\"\"\n , backend=postgresql, request=request)\n app = create_test_client(context)\n app.authorize(['spinta_set_meta_fields'])\n app.authmodel('datasets/gov/example/base/Location', ['insert', 'delete'])\n app.authmodel('datasets/gov/example/base/City', ['insert', 'delete',\n 'getall', 'search'])\n _id = str(uuid.uuid4())\n app.post('/datasets/gov/example/base/Location', json={'_id': _id, 'id':\n 1, 'name': 'Base location', 'type': 'city'})\n app.post('/datasets/gov/example/base/City', json={'_id': _id, 'name':\n 'City', 'population': 100})\n resp = app.get(\n '/datasets/gov/example/base/City?select(id,name,_base.name,population,_base.type)'\n )\n assert resp.json()['_data'] == [{'_base': {'name': 'Base location',\n 'type': 'city'}, 'id': 1, 'name': 'City', 'population': 100}]\n\n\n@pytest.mark.models('backends/mongo/report', 'backends/postgres/report')\ndef test_select_revision(model, app):\n app.authmodel(model, ['search', 'getone', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n id0 = ids[0]\n resp = app.get(f'/{model}/{id0}')\n revision = resp.json()['_revision']\n resp = app.get(f'/{model}/:format/jsonl?limit(1)&select(_revision)')\n assert json.loads(resp.content) == {'_revision': revision}\n",
"step-5": "import uuid\nimport json\n\nimport pytest\nimport requests\nimport httpx\nfrom spinta.testing.manifest import bootstrap_manifest\n\nfrom spinta.utils.data import take\nfrom spinta.testing.utils import error\nfrom spinta.testing.utils import get_error_codes, RowIds\nfrom spinta.testing.context import create_test_context\nfrom spinta.testing.client import create_test_client\nfrom spinta.manifests.tabular.helpers import striptable\nfrom spinta.testing.tabular import create_tabular_manifest\nfrom spinta.testing.data import listdata\n\n\ntest_data = [\n {\n '_type': 'report',\n 'status': 'OK',\n 'report_type': 'STV',\n 'count': 10,\n 'notes': [{\n 'note': 'hello',\n 'note_type': 'simple',\n 'create_date': '2019-03-14',\n }],\n 'operating_licenses': [{\n 'license_types': ['valid', 'invalid'],\n }],\n },\n {\n '_type': 'report',\n 'status': 'invalid',\n 'report_type': 'VMI',\n 'count': 42,\n 'notes': [{\n 'note': 'world',\n 'note_type': 'daily',\n 'create_date': '2019-04-20',\n }],\n 'operating_licenses': [{\n 'license_types': ['expired'],\n }],\n },\n {\n '_type': 'report',\n 'status': 'invalid',\n 'report_type': 'STV',\n 'count': 13,\n 'notes': [{\n 'note': 'foo bar',\n 'note_type': 'important',\n 'create_date': '2019-02-01',\n }],\n },\n]\n\n\ndef _push_test_data(app, model, data=None):\n app.authmodel(model, ['insert'])\n resp = app.post('/', json={'_data': [\n {\n **res,\n '_op': 'insert',\n '_type': model,\n }\n for res in data or test_data\n ]})\n assert resp.status_code == 200, resp.json()\n resp = resp.json()\n assert '_data' in resp, resp\n return resp['_data']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_exact(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search\n resp = app.get(f'/{model}?status=\"OK\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_exact_lower(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_exact_non_string(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search, non string type\n resp = app.get(f'/{model}?count=13')\n data = resp.json()['_data']\n\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n # single field fsearch, non string type\n resp = app.get(f'/{model}?count=\"abc\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"InvalidValue\"]\n\n # single non-existing field value search\n resp = app.get(f'/{model}?status=\"o\"')\n data = resp.json()['_data']\n assert len(data) == 0\n\n # single non-existing field search\n resp = app.get(f'/{model}?state=\"o\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"FieldNotInResource\"]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_exact_multiple_props(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"invalid\"&report_type.lower()=\"stv\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_exact_same_prop_multiple_times(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"invalid\"&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_gt(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search\n resp = app.get(f'/{model}?count>40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # search for string value\n resp = app.get(f'/{model}?status>\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"InvalidValue\"]\n\n # multi field search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count>40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # multi field and multi operator search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count>40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # test `greater_than` works as expected\n resp = app.get(f'/{model}?count>42')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_gt_with_nested_date(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>\"2019-04-19\"')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_gte(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search\n resp = app.get(f'/{model}?count>=40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # search for string value\n resp = app.get(f'/{model}?status>=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"InvalidValue\"]\n\n # multi field search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count>=40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # multi field and multi operator search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count>=40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # test `greater_than` works as expected\n resp = app.get(f'/{model}?count>=42')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_ge_with_nested_date(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>=\"2019-04-20\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_lt(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search\n resp = app.get(f'/{model}?count<12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n # search for string value\n resp = app.get(f'/{model}?status<\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"InvalidValue\"]\n\n # multi field search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count<20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n # multi field and multi operator search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count<50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # test `lower_than` works as expected\n resp = app.get(f'/{model}?count<10')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_lt_with_nested_date(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<\"2019-02-02\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_lte(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search\n resp = app.get(f'/{model}?count<=12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n # search for string value\n resp = app.get(f'/{model}?status<=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"InvalidValue\"]\n\n # multi field search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count<=20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n # multi field and multi operator search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count<=50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # test `lower_than` works as expected\n resp = app.get(f'/{model}?count<=10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_le_with_nested_date(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<=\"2019-02-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_ne(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n\n # single field search\n resp = app.get(f'/{model}?status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_ne_lower(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n # single field search, case insensitive\n resp = app.get(f'/{model}?status.lower()!=\"ok\"')\n assert ids(resp) == [1, 2]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_ne_multiple_props(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n # multi field search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count!=10&count!=42')\n assert ids(resp) == [2]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_ne_multiple_props_and_logic(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n # multi field and multi operator search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?status.lower()!=\"ok\"&report_type.lower()=\"stv\"')\n assert ids(resp) == [2]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_ne_nested(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n # test `ne` with nested structure\n resp = app.get(f'/{model}?notes.create_date!=\"2019-02-01\"&status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_ne_nested_missing_data(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n # test `ne` with nested structures and not full data in all resources\n resp = app.get(f'/{model}?operating_licenses.license_types!=\"valid\"')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_contains(model, context, app, mocker):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_contains_case_insensitive(model, context, app, mocker):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n # single field search, case insensitive\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_contains_multi_field(model, context, app, mocker):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # multi field search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?status.contains(\"valid\")&report_type.lower().contains(\"tv\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?status.contains(\"valid\")&report_type.contains(\"TV\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n # multi field search\n # test if operators are joined with AND logic for same field\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")&report_type.lower().contains(\"mi\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # multi field and multi operator search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?status.contains(\"valid\")&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_contains_type_check(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date).contains(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"InvalidValue\"]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_contains_with_select(model, context, app, mocker):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n\n # `contains` with select\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")&select(count)')\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {\n 'count': 42,\n }\n\n # `contains` with select and always_show_id\n mocker.patch.object(context.get('config'), 'always_show_id', True)\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")&select(count)')\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {\n '_id': r2['_id'],\n 'count': 42,\n }\n\n # `contains` with always_show_id should return just id\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {\n '_id': r2['_id'],\n }\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_select_unknown_property(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_select_unknown_property_in_object(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(notes.nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_startswith(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search\n resp = app.get(f'/{model}?report_type.startswith(\"VM\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # single field search, case insensitive\n resp = app.get(f'/{model}?report_type.lower().startswith(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # multi field search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?status.startswith(\"in\")&report_type.lower().startswith(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # multi field and multi operator search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?report_type.lower().startswith(\"st\")&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n # sanity check that `startswith` searches from the start\n resp = app.get(f'/{model}?status.startswith(\"valid\")')\n data = resp.json()['_data']\n assert len(data) == 0\n\n # `startswith` type check\n resp = app.get(f'/{model}?notes.create_date.startswith(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"InvalidValue\"]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_nested(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # nested `exact` search\n resp = app.get(f'/{model}?notes.note=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n # nested `exact` search, case insensitive\n resp = app.get(f'/{model}?notes.note.lower()=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n # nested `exact` search with dates\n resp = app.get(f'/{model}?notes.create_date=\"2019-03-14\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n # nested `gt` search\n resp = app.get(f'/{model}?notes.create_date>\"2019-04-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # nested non existant field\n resp = app.get(f'/{model}?notes.foo.bar=\"baz\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"FieldNotInResource\"]\n\n # nested `contains` search\n resp = app.get(f'/{model}?notes.note.contains(\"bar\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_nested_contains(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types.contains(\"lid\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_nested_startswith(model, context, app):\n app.authmodel(model, ['search'])\n r1, r2, r3, = _push_test_data(app, model)\n\n # nested `startswith` search\n resp = app.get(f'/{model}?notes.note.startswith(\"fo\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n resp = app.get(f'/{model}?operating_licenses.license_types.startswith(\"exp\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\ndef ids(resources):\n if isinstance(resources, (requests.models.Response, httpx.Response)):\n resp = resources\n assert resp.status_code == 200, resp.json()\n resources = resp.json()['_data']\n return [r['_id'] for r in resources]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_or(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=42|status.lower()=\"ok\"')\n assert ids(resp) == [0, 1]\n\n resp = app.get(f'/{model}?count<=10|count=13')\n assert ids(resp) == [0, 2]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_nested_recurse(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(note)=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_nested_recurse_lower(model, context, app):\n r1, r2, r3, = ids(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(status).lower()=\"ok\"')\n assert ids(resp) == [r1]\n\n\n@pytest.mark.models(\n 'backends/mongo/recurse',\n 'backends/postgres/recurse',\n)\ndef test_search_nested_recurse_multiple_props(model, context, app):\n r1, r2, = ids(_push_test_data(app, model, [\n {\n 'title': \"Org\",\n 'country': 'fi',\n 'govids': [\n {'govid': '1', 'country': 'fi'},\n {'govid': '2', 'country': 'se'},\n ]\n },\n {\n 'title': \"Org\",\n 'country': 'no',\n 'govids': [\n {'govid': '3', 'country': 'no'},\n ]\n },\n ]))\n app.authmodel(model, ['search'])\n\n resp = app.get(f'/{model}?recurse(country)=\"se\"')\n assert ids(resp) == [r1]\n\n resp = app.get(f'/{model}?recurse(country)=\"fi\"')\n assert ids(resp) == [r1]\n\n resp = app.get(f'/{model}?recurse(country)=\"no\"')\n assert ids(resp) == [r2]\n\n\n@pytest.mark.models(\n 'backends/mongo/recurse',\n 'backends/postgres/recurse',\n)\ndef test_search_recurse_multiple_props_lower(model, app):\n r1, r2, = ids(_push_test_data(app, model, [\n {\n 'title': \"Org\",\n 'country': 'fi',\n 'govids': [\n {'govid': '1', 'country': 'FI'},\n {'govid': '2', 'country': 'SE'},\n ]\n },\n {\n 'title': \"Org\",\n 'country': 'no',\n 'govids': [\n {'govid': '3', 'country': 'NO'},\n ]\n },\n ]))\n app.authmodel(model, ['search'])\n\n resp = app.get(f'/{model}?recurse(country).lower()=\"se\"')\n assert ids(resp) == [r1]\n\n resp = app.get(f'/{model}?recurse(country).lower()=\"fi\"')\n assert ids(resp) == [r1]\n\n resp = app.get(f'/{model}?recurse(country).lower()=\"no\"')\n assert ids(resp) == [r2]\n\n\n# TODO: add mongo\ndef test_search_any(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",count,10,42)')\n assert ids(resp) == [0, 1]\n\n resp = app.get(f'/{model}?any(\"ne\",count,42)')\n assert ids(resp) == [0, 2]\n\n\n# TODO: add mongo\ndef test_search_any_in_list(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",notes.note,\"hello\",\"world\")')\n assert sorted(ids(resp)) == [0, 1]\n\n resp = app.get(f'/{model}?any(\"ne\",notes.note,\"foo bar\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\n# TODO: add mongo\ndef test_search_any_in_list_of_scalars(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",operating_licenses.license_types,\"valid\",\"invalid\",\"expired\")')\n assert sorted(ids(resp)) == [0, 1]\n\n resp = app.get(f'/{model}?any(\"ne\",operating_licenses.license_types,\"expired\")')\n assert sorted(ids(resp)) == [0]\n\n\n# TODO: add mongo\ndef test_search_any_recurse(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status),\"OK\",\"none\")')\n assert ids(resp) == [0]\n\n\n# TODO: add mongo\ndef test_search_any_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status).lower(),\"ok\",\"none\")')\n assert ids(resp) == [0]\n\n\n# TODO: add mongo\ndef test_search_any_contains(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",status,\"inv\",\"val\",\"lid\")')\n assert sorted(ids(resp)) == [1, 2]\n\n\n# TODO: add mongo\ndef test_search_any_contains_nested(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",notes.note,\"hel\",\"wor\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\n# TODO: add mongo\ndef test_search_any_contains_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",recurse(status).lower(),\"o\",\"k\")')\n assert sorted(ids(resp)) == [0]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_id_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.contains(\"{subid}\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_id_not_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"AAAAA\")')\n assert ids(resp) == []\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_id_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][:5]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_id_not_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == []\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_revision_contains(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_revision.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_revision_startswith(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n id0 = ids[0]\n resp = app.get(f'/{model}/{id0}')\n revision = resp.json()['_revision'][:5]\n resp = app.get(f'/{model}?_revision.startswith(\"{revision}\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_group(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?(report_type=\"STV\"&status=\"OK\")')\n assert ids(resp) == [0]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_select_in_or(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?(report_type=\"STV\"|status=\"OK\")&select(_id)')\n # XXX: Flaky test, some times it gives [2, 0], don't know why.\n assert ids(resp) == [0, 2]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_lower_contains(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?report_type.lower().contains(\"st\")')\n # XXX: Flaky test, some times it gives [2, 0], don't know why.\n assert ids(resp) == [0, 2]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [\n {'status': 'OK'},\n {},\n ]))\n resp = app.get(f'/{model}?status=null')\n assert ids(resp) == [1]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_not_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [\n {'status': 'OK'},\n {},\n ]))\n resp = app.get(f'/{model}?status!=null')\n assert ids(resp) == [0]\n\n\n@pytest.mark.parametrize('backend', ['default', 'mongo'])\ndef test_extra_fields(postgresql, mongo, backend, rc, tmp_path, request):\n rc = rc.fork({\n 'backends': [backend],\n 'manifests.default': {\n 'type': 'tabular',\n 'path': str(tmp_path / 'manifest.csv'),\n 'backend': backend,\n },\n })\n\n # Create data into a extrafields model with code and name properties.\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''\n m | property | type\n extrafields |\n | code | string\n | name | string\n '''))\n context = create_test_context(rc)\n request.addfinalizer(context.wipe_all)\n app = create_test_client(context)\n app.authmodel('extrafields', ['insert'])\n resp = app.post('/extrafields', json={'_data': [\n {'_op': 'insert', 'code': 'lt', 'name': 'Lietuva'},\n {'_op': 'insert', 'code': 'lv', 'name': 'Latvija'},\n {'_op': 'insert', 'code': 'ee', 'name': 'Estija'},\n ]})\n assert resp.status_code == 200, resp.json()\n\n # Now try to read from same model, but loaded with just one property.\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''\n m | property | type\n extrafields |\n | name | string\n '''))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('extrafields', ['getall', 'getone'])\n resp = app.get('/extrafields')\n assert listdata(resp, sort=True) == [\n \"Estija\",\n \"Latvija\",\n \"Lietuva\",\n ]\n\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/extrafields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'name': 'Lietuva'}\n\n\n@pytest.mark.parametrize('backend', ['mongo'])\ndef test_missing_fields(postgresql, mongo, backend, rc, tmp_path):\n rc = rc.fork({\n 'backends': [backend],\n 'manifests.default': {\n 'type': 'tabular',\n 'path': str(tmp_path / 'manifest.csv'),\n 'backend': backend,\n },\n })\n\n # Create data into a extrafields model with code and name properties.\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''\n m | property | type\n missingfields |\n | code | string\n '''))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['insert'])\n resp = app.post('/missingfields', json={'_data': [\n {'_op': 'insert', 'code': 'lt'},\n {'_op': 'insert', 'code': 'lv'},\n {'_op': 'insert', 'code': 'ee'},\n ]})\n assert resp.status_code == 200, resp.json()\n\n # Now try to read from same model, but loaded with just one property.\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''\n m | property | type\n missingfields |\n | code | string\n | name | string\n '''))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['search', 'getone'])\n resp = app.get('/missingfields?select(_id,code,name)')\n assert listdata(resp, sort=True) == [\n ('ee', None),\n ('lt', None),\n ('lv', None),\n ]\n\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/missingfields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'code': 'lt'}\n\n\ndef test_base_select(rc, postgresql, request):\n context = bootstrap_manifest(rc, '''\n d | r | b | m | property | type | ref\n datasets/gov/example/base | |\n | |\n | | | Location | |\n | | | | id | integer |\n | | | | name | string |\n | | | | type | string |\n | |\n | | Location | |\n | | | City | |\n | | | | id | |\n | | | | name | string |\n | | | | population | integer |\n ''', backend=postgresql, request=request)\n\n app = create_test_client(context)\n app.authorize(['spinta_set_meta_fields'])\n app.authmodel('datasets/gov/example/base/Location', ['insert', 'delete'])\n app.authmodel('datasets/gov/example/base/City', ['insert', 'delete', 'getall', 'search'])\n\n _id = str(uuid.uuid4())\n app.post('/datasets/gov/example/base/Location', json={\n '_id': _id,\n 'id': 1,\n 'name': 'Base location',\n 'type': 'city'\n })\n app.post('/datasets/gov/example/base/City', json={\n '_id': _id,\n 'name': 'City',\n 'population': 100\n })\n\n resp = app.get('/datasets/gov/example/base/City?select(id,name,_base.name,population,_base.type)')\n assert resp.json()['_data'] == [\n {\n '_base': {'name': 'Base location', 'type': 'city'},\n 'id': 1,\n 'name': 'City',\n 'population': 100\n }\n ]\n\n\n@pytest.mark.models(\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_select_revision(model, app):\n app.authmodel(model, ['search', 'getone', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n id0 = ids[0]\n resp = app.get(f'/{model}/{id0}')\n revision = resp.json()['_revision']\n resp = app.get(f'/{model}/:format/jsonl?limit(1)&select(_revision)')\n assert json.loads(resp.content) == {\n '_revision': revision\n }\n",
"step-ids": [
43,
48,
56,
58,
63
]
}
|
[
43,
48,
56,
58,
63
] |
<|reserved_special_token_0|>
class Ground:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def draw(self, window):
pygame.draw.rect(window, self.color, self.rect)
return
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Ground:
def __init__(self, space):
self.w = WIDTH - 20
self.h = 25
self.x = 10
self.y = HEIGHT - self.h
self.rect = Rect(self.x, self.y, self.w, self.h)
self.color = Color(100, 6, 107)
self.rigidbody = Body(body_type=Body.STATIC)
self.rigidbody.position = self.x + self.w / 2, self.y
self.hitbox = Poly.create_box(self.rigidbody, (self.w, self.h))
self.hitbox.elasticity = 0
self.hitbox.mass = 1
self.hitbox.friction = 0
space.add(self.rigidbody, self.hitbox)
<|reserved_special_token_0|>
def draw(self, window):
pygame.draw.rect(window, self.color, self.rect)
return
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Ground:
def __init__(self, space):
self.w = WIDTH - 20
self.h = 25
self.x = 10
self.y = HEIGHT - self.h
self.rect = Rect(self.x, self.y, self.w, self.h)
self.color = Color(100, 6, 107)
self.rigidbody = Body(body_type=Body.STATIC)
self.rigidbody.position = self.x + self.w / 2, self.y
self.hitbox = Poly.create_box(self.rigidbody, (self.w, self.h))
self.hitbox.elasticity = 0
self.hitbox.mass = 1
self.hitbox.friction = 0
space.add(self.rigidbody, self.hitbox)
def update(self, dt):
return
def draw(self, window):
pygame.draw.rect(window, self.color, self.rect)
return
<|reserved_special_token_1|>
import pygame
from pygame import Rect, Color
from pymunk import Body, Poly
from config import WIDTH, HEIGHT
class Ground:
def __init__(self, space):
self.w = WIDTH - 20
self.h = 25
self.x = 10
self.y = HEIGHT - self.h
self.rect = Rect(self.x, self.y, self.w, self.h)
self.color = Color(100, 6, 107)
self.rigidbody = Body(body_type=Body.STATIC)
self.rigidbody.position = self.x + self.w / 2, self.y
self.hitbox = Poly.create_box(self.rigidbody, (self.w, self.h))
self.hitbox.elasticity = 0
self.hitbox.mass = 1
self.hitbox.friction = 0
space.add(self.rigidbody, self.hitbox)
def update(self, dt):
return
def draw(self, window):
pygame.draw.rect(window, self.color, self.rect)
return
<|reserved_special_token_1|>
import pygame
from pygame import Rect, Color
from pymunk import Body, Poly
from config import WIDTH, HEIGHT
class Ground:
def __init__ (self, space):
# size
self.w = WIDTH - 20
self.h = 25
# position
self.x = 10
self.y = HEIGHT - self.h
# pygame rectangle
self.rect = Rect (self.x, self.y, self.w, self.h)
self.color = Color (100, 6, 107)
# physics
self.rigidbody = Body (body_type=Body.STATIC)
self.rigidbody.position = self.x + self.w / 2, self.y
self.hitbox = Poly.create_box (self.rigidbody, (self.w, self.h))
self.hitbox.elasticity = 0
self.hitbox.mass = 1
self.hitbox.friction = 0
space.add (self.rigidbody, self.hitbox)
def update (self, dt):
return
def draw (self, window):
pygame.draw.rect (window, self.color, self.rect)
return
|
flexible
|
{
"blob_id": "32fc0db68c32c2e644f9c1c2318fbeff41a0543d",
"index": 5703,
"step-1": "<mask token>\n\n\nclass Ground:\n <mask token>\n <mask token>\n\n def draw(self, window):\n pygame.draw.rect(window, self.color, self.rect)\n return\n",
"step-2": "<mask token>\n\n\nclass Ground:\n\n def __init__(self, space):\n self.w = WIDTH - 20\n self.h = 25\n self.x = 10\n self.y = HEIGHT - self.h\n self.rect = Rect(self.x, self.y, self.w, self.h)\n self.color = Color(100, 6, 107)\n self.rigidbody = Body(body_type=Body.STATIC)\n self.rigidbody.position = self.x + self.w / 2, self.y\n self.hitbox = Poly.create_box(self.rigidbody, (self.w, self.h))\n self.hitbox.elasticity = 0\n self.hitbox.mass = 1\n self.hitbox.friction = 0\n space.add(self.rigidbody, self.hitbox)\n <mask token>\n\n def draw(self, window):\n pygame.draw.rect(window, self.color, self.rect)\n return\n",
"step-3": "<mask token>\n\n\nclass Ground:\n\n def __init__(self, space):\n self.w = WIDTH - 20\n self.h = 25\n self.x = 10\n self.y = HEIGHT - self.h\n self.rect = Rect(self.x, self.y, self.w, self.h)\n self.color = Color(100, 6, 107)\n self.rigidbody = Body(body_type=Body.STATIC)\n self.rigidbody.position = self.x + self.w / 2, self.y\n self.hitbox = Poly.create_box(self.rigidbody, (self.w, self.h))\n self.hitbox.elasticity = 0\n self.hitbox.mass = 1\n self.hitbox.friction = 0\n space.add(self.rigidbody, self.hitbox)\n\n def update(self, dt):\n return\n\n def draw(self, window):\n pygame.draw.rect(window, self.color, self.rect)\n return\n",
"step-4": "import pygame\nfrom pygame import Rect, Color\nfrom pymunk import Body, Poly\nfrom config import WIDTH, HEIGHT\n\n\nclass Ground:\n\n def __init__(self, space):\n self.w = WIDTH - 20\n self.h = 25\n self.x = 10\n self.y = HEIGHT - self.h\n self.rect = Rect(self.x, self.y, self.w, self.h)\n self.color = Color(100, 6, 107)\n self.rigidbody = Body(body_type=Body.STATIC)\n self.rigidbody.position = self.x + self.w / 2, self.y\n self.hitbox = Poly.create_box(self.rigidbody, (self.w, self.h))\n self.hitbox.elasticity = 0\n self.hitbox.mass = 1\n self.hitbox.friction = 0\n space.add(self.rigidbody, self.hitbox)\n\n def update(self, dt):\n return\n\n def draw(self, window):\n pygame.draw.rect(window, self.color, self.rect)\n return\n",
"step-5": "import pygame\nfrom pygame import Rect, Color\n\nfrom pymunk import Body, Poly\n\nfrom config import WIDTH, HEIGHT\n\nclass Ground:\n\n def __init__ (self, space):\n \n # size\n self.w = WIDTH - 20\n self.h = 25\n\n # position\n self.x = 10\n self.y = HEIGHT - self.h\n\n # pygame rectangle\n self.rect = Rect (self.x, self.y, self.w, self.h)\n self.color = Color (100, 6, 107)\n\n # physics\n self.rigidbody = Body (body_type=Body.STATIC)\n self.rigidbody.position = self.x + self.w / 2, self.y\n\n self.hitbox = Poly.create_box (self.rigidbody, (self.w, self.h))\n self.hitbox.elasticity = 0\n self.hitbox.mass = 1\n self.hitbox.friction = 0\n\n space.add (self.rigidbody, self.hitbox)\n\n\n def update (self, dt):\n return\n\n\n\n def draw (self, window):\n \n pygame.draw.rect (window, self.color, self.rect)\n\n return",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import math
import decimal
from typing import Union, List, Set
from sqlalchemy import text
from .model import BaseMixin
from ..core.db import db
Orders = List[Set(str, Union(str, int, decimal.Decimal))]
class BaseDBMgr:
def get_page(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=(), page:int=1, per_page:int=10)->dict:
'''获取分页数据
@param BaseMixin cls 数据库模型实体类
@param set filters 查询条件
@param str order 排序
@param tuple field 返回字段
@param int page 页码
@param int per_page 每页数据数量
@return dict
'''
res = {
'page': {
'current_page': page,
'per_page': per_page,
'total_page': 0,
'count': 0,
},
'items': []
}
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at==0)
res['page']['count'] = query.count()
res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)
for order in orders:
field, sort = order
sort = 'desc' if sort not in ['asc', 'desc'] else sort
query = query.order_by(text(f'{field} {sort}'))
data = query.offset((page-1)*per_page).limit(per_page)
if not field:
res['items'] = [item.to_dict() for item in data]
else:
res['items'] = [item.to_dict(only=field) for item in data]
return res
def get_all(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=(), limit:int=0)->list:
'''获取所有满足条件的数据
@param BaseMixin cls 数据库模型实体类
@param set filters 查询条件
@param str order 排序
@param tuple field 返回字段
@param int limit 取数据最大数量
@return list
'''
query = db.query(cls_)
if filters:
query = query.filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at==0)
for order in orders:
field, sort = order
sort = 'desc' if sort not in ['asc', 'desc'] else sort
query = query.order_by(text(f'{field} {sort}'))
if limit != 0:
query = query.limit(limit)
query = query.all()
if not field:
items = [item.to_dict() for item in items]
else:
items = [item.to_dict(only=field) for item in items]
return items
def get_first(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=())->dict:
'''获取所有满足条件的第一条数据
@param BaseMixin cls 数据库模型实体类
@param set filters 查询条件
@param str order 排序
@param tuple field 返回字段
@return dict
'''
items = self.get_all(cls_, filters, orders, field, limit=1)
return items[0] if items else None
def add(self, cls_:BaseMixin, data:dict)->int:
'''插入一条数据
@param BaseMixin cls 数据库模型实体类
@param dict data 数据
@return int 插入数据的主键
'''
item = cls_(**data)
db.add(item)
db.flush()
return item.id
def update(self, cls_:BaseMixin, data:dict, filters:set)->int:
'''更新数据
@param BaseMixin cls 数据库模型实体类
@param dict data 数据
@param set filters 过滤条件
@return int 影响的行数
'''
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at==0)
return query.update(data, synchronize_session=False)
def delete(self, cls_:BaseMixin, filters:set)->int:
'''更新数据
@param BaseMixin cls 数据库模型实体类
@param set filters 过滤条件
@return int 影响的行数
'''
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
items = query.filter(cls_.deleted_at==0).all()
for item in items:
item.delete()
affect_rows = len(items)
else:
affect_rows = query.filter(*filters).delete(synchronize_session=False)
db.commit()
return affect_rows
def count(self, cls_:BaseMixin, filters:set, field=None)->int:
'''获取满足条件的总行数
@param BaseMixin cls 数据库模型实体类
@param set filters 过滤条件
@param string|None field 统计的字段
@return int
'''
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at==0)
if field is None:
return query.count()
else:
return query.count(field)
|
normal
|
{
"blob_id": "2c90c4e0b42a75d6d387b9b2d0118d8e991b5a08",
"index": 39,
"step-1": "<mask token>\n\n\nclass BaseDBMgr:\n\n def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), page: int=1, per_page: int=10) ->dict:\n \"\"\"获取分页数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int page 页码\n @param int per_page 每页数据数量\n @return dict\n \"\"\"\n res = {'page': {'current_page': page, 'per_page': per_page,\n 'total_page': 0, 'count': 0}, 'items': []}\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n res['page']['count'] = query.count()\n res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n data = query.offset((page - 1) * per_page).limit(per_page)\n if not field:\n res['items'] = [item.to_dict() for item in data]\n else:\n res['items'] = [item.to_dict(only=field) for item in data]\n return res\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def delete(self, cls_: BaseMixin, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at == 0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session\n =False)\n db.commit()\n return affect_rows\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass BaseDBMgr:\n\n def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), page: int=1, per_page: int=10) ->dict:\n \"\"\"获取分页数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int page 页码\n @param int per_page 每页数据数量\n @return dict\n \"\"\"\n res = {'page': {'current_page': page, 'per_page': per_page,\n 'total_page': 0, 'count': 0}, 'items': []}\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n res['page']['count'] = query.count()\n res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n data = query.offset((page - 1) * per_page).limit(per_page)\n if not field:\n res['items'] = [item.to_dict() for item in data]\n else:\n res['items'] = [item.to_dict(only=field) for item in data]\n return res\n\n def get_all(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), limit: int=0) ->list:\n \"\"\"获取所有满足条件的数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int limit 取数据最大数量\n @return list\n \"\"\"\n query = db.query(cls_)\n if filters:\n query = query.filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n if limit != 0:\n query = query.limit(limit)\n query = query.all()\n if not field:\n items = [item.to_dict() for item in items]\n else:\n items = [item.to_dict(only=field) for item in items]\n return items\n\n def get_first(self, cls_: BaseMixin, filters: set, orders: Orders=list(\n ), field: tuple=()) ->dict:\n \"\"\"获取所有满足条件的第一条数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @return dict\n \"\"\"\n items = self.get_all(cls_, filters, orders, field, limit=1)\n return items[0] if items else None\n\n def add(self, cls_: BaseMixin, data: dict) ->int:\n \"\"\"插入一条数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @return int 插入数据的主键\n \"\"\"\n item = cls_(**data)\n db.add(item)\n db.flush()\n return item.id\n\n def update(self, cls_: BaseMixin, data: dict, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n return query.update(data, synchronize_session=False)\n\n def delete(self, cls_: BaseMixin, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at == 0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session\n =False)\n db.commit()\n return affect_rows\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass BaseDBMgr:\n\n def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), page: int=1, per_page: int=10) ->dict:\n \"\"\"获取分页数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int page 页码\n @param int per_page 每页数据数量\n @return dict\n \"\"\"\n res = {'page': {'current_page': page, 'per_page': per_page,\n 'total_page': 0, 'count': 0}, 'items': []}\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n res['page']['count'] = query.count()\n res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n data = query.offset((page - 1) * per_page).limit(per_page)\n if not field:\n res['items'] = [item.to_dict() for item in data]\n else:\n res['items'] = [item.to_dict(only=field) for item in data]\n return res\n\n def get_all(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), limit: int=0) ->list:\n \"\"\"获取所有满足条件的数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int limit 取数据最大数量\n @return list\n \"\"\"\n query = db.query(cls_)\n if filters:\n query = query.filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n if limit != 0:\n query = query.limit(limit)\n query = query.all()\n if not field:\n items = [item.to_dict() for item in items]\n else:\n items = [item.to_dict(only=field) for item in items]\n return items\n\n def get_first(self, cls_: BaseMixin, filters: set, orders: Orders=list(\n ), field: tuple=()) ->dict:\n \"\"\"获取所有满足条件的第一条数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @return dict\n \"\"\"\n items = self.get_all(cls_, filters, orders, field, limit=1)\n return items[0] if items else None\n\n def add(self, cls_: BaseMixin, data: dict) ->int:\n \"\"\"插入一条数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @return int 插入数据的主键\n \"\"\"\n item = cls_(**data)\n db.add(item)\n db.flush()\n return item.id\n\n def update(self, cls_: BaseMixin, data: dict, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n return query.update(data, synchronize_session=False)\n\n def delete(self, cls_: BaseMixin, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at == 0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session\n =False)\n db.commit()\n return affect_rows\n\n def count(self, cls_: BaseMixin, filters: set, field=None) ->int:\n \"\"\"获取满足条件的总行数\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @param string|None field 统计的字段\n @return int\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n if field is None:\n return query.count()\n else:\n return query.count(field)\n",
"step-4": "<mask token>\nOrders = List[Set(str, Union(str, int, decimal.Decimal))]\n\n\nclass BaseDBMgr:\n\n def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), page: int=1, per_page: int=10) ->dict:\n \"\"\"获取分页数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int page 页码\n @param int per_page 每页数据数量\n @return dict\n \"\"\"\n res = {'page': {'current_page': page, 'per_page': per_page,\n 'total_page': 0, 'count': 0}, 'items': []}\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n res['page']['count'] = query.count()\n res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n data = query.offset((page - 1) * per_page).limit(per_page)\n if not field:\n res['items'] = [item.to_dict() for item in data]\n else:\n res['items'] = [item.to_dict(only=field) for item in data]\n return res\n\n def get_all(self, cls_: BaseMixin, filters: set, orders: Orders=list(),\n field: tuple=(), limit: int=0) ->list:\n \"\"\"获取所有满足条件的数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int limit 取数据最大数量\n @return list\n \"\"\"\n query = db.query(cls_)\n if filters:\n query = query.filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n if limit != 0:\n query = query.limit(limit)\n query = query.all()\n if not field:\n items = [item.to_dict() for item in items]\n else:\n items = [item.to_dict(only=field) for item in items]\n return items\n\n def get_first(self, cls_: BaseMixin, filters: set, orders: Orders=list(\n ), field: tuple=()) ->dict:\n \"\"\"获取所有满足条件的第一条数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @return dict\n \"\"\"\n items = self.get_all(cls_, filters, orders, field, limit=1)\n return items[0] if items else None\n\n def add(self, cls_: BaseMixin, data: dict) ->int:\n \"\"\"插入一条数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @return int 插入数据的主键\n \"\"\"\n item = cls_(**data)\n db.add(item)\n db.flush()\n return item.id\n\n def update(self, cls_: BaseMixin, data: dict, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n return query.update(data, synchronize_session=False)\n\n def delete(self, cls_: BaseMixin, filters: set) ->int:\n \"\"\"更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at == 0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session\n =False)\n db.commit()\n return affect_rows\n\n def count(self, cls_: BaseMixin, filters: set, field=None) ->int:\n \"\"\"获取满足条件的总行数\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @param string|None field 统计的字段\n @return int\n \"\"\"\n query = db.query(cls_).filter(*filters)\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at == 0)\n if field is None:\n return query.count()\n else:\n return query.count(field)\n",
"step-5": "import math\nimport decimal\nfrom typing import Union, List, Set\n\nfrom sqlalchemy import text\n\nfrom .model import BaseMixin\nfrom ..core.db import db\n\n\nOrders = List[Set(str, Union(str, int, decimal.Decimal))]\n\n\nclass BaseDBMgr:\n\n def get_page(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=(), page:int=1, per_page:int=10)->dict:\n '''获取分页数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int page 页码\n @param int per_page 每页数据数量\n @return dict\n '''\n res = {\n 'page': {\n 'current_page': page,\n 'per_page': per_page,\n 'total_page': 0,\n 'count': 0,\n },\n 'items': []\n }\n query = db.query(cls_).filter(*filters)\n \n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at==0)\n\n res['page']['count'] = query.count()\n res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)\n\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n\n data = query.offset((page-1)*per_page).limit(per_page)\n if not field:\n res['items'] = [item.to_dict() for item in data]\n else:\n res['items'] = [item.to_dict(only=field) for item in data]\n \n return res\n\n\n def get_all(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=(), limit:int=0)->list:\n '''获取所有满足条件的数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @param int limit 取数据最大数量\n @return list\n '''\n query = db.query(cls_)\n \n if filters:\n query = query.filter(*filters)\n\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at==0)\n\n for order in orders:\n field, sort = order\n sort = 'desc' if sort not in ['asc', 'desc'] else sort\n query = query.order_by(text(f'{field} {sort}'))\n\n if limit != 0:\n query = query.limit(limit)\n \n query = query.all()\n\n if not field:\n items = [item.to_dict() for item in items]\n else:\n items = [item.to_dict(only=field) for item in items]\n \n return items\n\n\n def get_first(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=())->dict:\n '''获取所有满足条件的第一条数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 查询条件\n @param str order 排序\n @param tuple field 返回字段\n @return dict\n '''\n items = self.get_all(cls_, filters, orders, field, limit=1)\n return items[0] if items else None\n\n\n def add(self, cls_:BaseMixin, data:dict)->int:\n '''插入一条数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @return int 插入数据的主键\n '''\n item = cls_(**data)\n db.add(item)\n db.flush()\n return item.id\n\n\n def update(self, cls_:BaseMixin, data:dict, filters:set)->int:\n '''更新数据\n @param BaseMixin cls 数据库模型实体类\n @param dict data 数据\n @param set filters 过滤条件\n @return int 影响的行数\n '''\n query = db.query(cls_).filter(*filters)\n\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at==0)\n\n return query.update(data, synchronize_session=False)\n\n\n def delete(self, cls_:BaseMixin, filters:set)->int:\n '''更新数据\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @return int 影响的行数\n '''\n query = db.query(cls_).filter(*filters)\n\n if hasattr(cls_, 'deleted_at'):\n items = query.filter(cls_.deleted_at==0).all()\n for item in items:\n item.delete()\n affect_rows = len(items)\n else:\n affect_rows = query.filter(*filters).delete(synchronize_session=False)\n db.commit()\n return affect_rows\n\n\n def count(self, cls_:BaseMixin, filters:set, field=None)->int:\n '''获取满足条件的总行数\n @param BaseMixin cls 数据库模型实体类\n @param set filters 过滤条件\n @param string|None field 统计的字段\n @return int\n '''\n query = db.query(cls_).filter(*filters)\n\n if hasattr(cls_, 'deleted_at'):\n query = query.filter(cls_.deleted_at==0)\n \n if field is None:\n return query.count()\n else:\n return query.count(field)\n",
"step-ids": [
3,
7,
8,
9,
11
]
}
|
[
3,
7,
8,
9,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in l:
card[i - a] += 1
for j in v:
if (j >= a) & (j <= b):
print(card[j - a], end=' ')
else:
print(0, end=' ')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
n = int(sys.stdin.readline().rstrip())
l = list(map(int, sys.stdin.readline().rstrip().split()))
m = int(sys.stdin.readline().rstrip())
v = list(map(int, sys.stdin.readline().rstrip().split()))
card = [0] * (max(l) - min(l) + 1)
a = min(l)
b = max(l)
for i in l:
card[i - a] += 1
for j in v:
if (j >= a) & (j <= b):
print(card[j - a], end=' ')
else:
print(0, end=' ')
<|reserved_special_token_1|>
import sys
n = int(sys.stdin.readline().rstrip())
l = list(map(int, sys.stdin.readline().rstrip().split()))
m = int(sys.stdin.readline().rstrip())
v = list(map(int, sys.stdin.readline().rstrip().split()))
card = [0] * (max(l) - min(l) + 1)
a = min(l)
b = max(l)
for i in l:
card[i - a] += 1
for j in v:
if (j >= a) & (j <= b):
print(card[j - a], end=' ')
else:
print(0, end=' ')
<|reserved_special_token_1|>
import sys
n = int(sys.stdin.readline().rstrip())
l = list(map(int,sys.stdin.readline().rstrip().split()))
m = int(sys.stdin.readline().rstrip())
v = list(map(int,sys.stdin.readline().rstrip().split()))
card = [0] * (max(l)-min(l)+1)
a = min(l)
b = max(l)
for i in l:
card[i-a]+=1
for j in v:
if ((j>=a)&(j<=b)):
print(card[j-a],end = " ")
else:
print(0, end = " ")
|
flexible
|
{
"blob_id": "6b0081e829f9252e44fa7b81fbfcdd4115856373",
"index": 3748,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in l:\n card[i - a] += 1\nfor j in v:\n if (j >= a) & (j <= b):\n print(card[j - a], end=' ')\n else:\n print(0, end=' ')\n",
"step-3": "<mask token>\nn = int(sys.stdin.readline().rstrip())\nl = list(map(int, sys.stdin.readline().rstrip().split()))\nm = int(sys.stdin.readline().rstrip())\nv = list(map(int, sys.stdin.readline().rstrip().split()))\ncard = [0] * (max(l) - min(l) + 1)\na = min(l)\nb = max(l)\nfor i in l:\n card[i - a] += 1\nfor j in v:\n if (j >= a) & (j <= b):\n print(card[j - a], end=' ')\n else:\n print(0, end=' ')\n",
"step-4": "import sys\nn = int(sys.stdin.readline().rstrip())\nl = list(map(int, sys.stdin.readline().rstrip().split()))\nm = int(sys.stdin.readline().rstrip())\nv = list(map(int, sys.stdin.readline().rstrip().split()))\ncard = [0] * (max(l) - min(l) + 1)\na = min(l)\nb = max(l)\nfor i in l:\n card[i - a] += 1\nfor j in v:\n if (j >= a) & (j <= b):\n print(card[j - a], end=' ')\n else:\n print(0, end=' ')\n",
"step-5": "import sys\nn = int(sys.stdin.readline().rstrip())\nl = list(map(int,sys.stdin.readline().rstrip().split()))\n\nm = int(sys.stdin.readline().rstrip())\nv = list(map(int,sys.stdin.readline().rstrip().split()))\n\ncard = [0] * (max(l)-min(l)+1)\n\na = min(l)\nb = max(l)\n\nfor i in l:\n card[i-a]+=1\n\nfor j in v:\n if ((j>=a)&(j<=b)):\n print(card[j-a],end = \" \")\n else:\n print(0, end = \" \")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
VENV = os.environ[VENV_NAME]
if VENV == '':
print("Environment variable '%s' is empty" % VENV_NAME)
print('Please activate your virtualenv first')
sys.exit(3)
if not os.path.isdir(VENV):
print("Virtual environment '%s' does not exist" % VENV)
print('Please activate a valid virtualenv first')
sys.exit(2)
except KeyError:
print('No virtualenv defined')
print('Please activate a virtualenv (with mkvirtualenv, workon, or pyenv)')
sys.exit(1)
if os.system('git config diff.userdata.textconv $PWD/userdata_decode.py'):
print('Problem configuring Git diff filter for userdata')
if os.system('pre-commit --version'):
os.system('pip install pre-commit')
if os.system('pre-commit install'):
print(
'Error setting up pre-commit hooks, try updating with pip install -U pre-commit'
)
sys.exit(4)
if os.system('pre-commit run --all-files'):
print('Problem running pre-commit hooks, check .pre-commit-config.yaml')
sys.exit(5)
sys.exit(0)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
VENV_NAME = 'VIRTUAL_ENV'
VENV = ''
try:
VENV = os.environ[VENV_NAME]
if VENV == '':
print("Environment variable '%s' is empty" % VENV_NAME)
print('Please activate your virtualenv first')
sys.exit(3)
if not os.path.isdir(VENV):
print("Virtual environment '%s' does not exist" % VENV)
print('Please activate a valid virtualenv first')
sys.exit(2)
except KeyError:
print('No virtualenv defined')
print('Please activate a virtualenv (with mkvirtualenv, workon, or pyenv)')
sys.exit(1)
if os.system('git config diff.userdata.textconv $PWD/userdata_decode.py'):
print('Problem configuring Git diff filter for userdata')
if os.system('pre-commit --version'):
os.system('pip install pre-commit')
if os.system('pre-commit install'):
print(
'Error setting up pre-commit hooks, try updating with pip install -U pre-commit'
)
sys.exit(4)
if os.system('pre-commit run --all-files'):
print('Problem running pre-commit hooks, check .pre-commit-config.yaml')
sys.exit(5)
sys.exit(0)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
VENV_NAME = 'VIRTUAL_ENV'
VENV = ''
try:
VENV = os.environ[VENV_NAME]
if VENV == '':
print("Environment variable '%s' is empty" % VENV_NAME)
print('Please activate your virtualenv first')
sys.exit(3)
if not os.path.isdir(VENV):
print("Virtual environment '%s' does not exist" % VENV)
print('Please activate a valid virtualenv first')
sys.exit(2)
except KeyError:
print('No virtualenv defined')
print('Please activate a virtualenv (with mkvirtualenv, workon, or pyenv)')
sys.exit(1)
if os.system('git config diff.userdata.textconv $PWD/userdata_decode.py'):
print('Problem configuring Git diff filter for userdata')
if os.system('pre-commit --version'):
os.system('pip install pre-commit')
if os.system('pre-commit install'):
print(
'Error setting up pre-commit hooks, try updating with pip install -U pre-commit'
)
sys.exit(4)
if os.system('pre-commit run --all-files'):
print('Problem running pre-commit hooks, check .pre-commit-config.yaml')
sys.exit(5)
sys.exit(0)
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Install and activate pre-commit and its hooks into virtual environment."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import sys
# if sys.version_info[0] > 2 or sys.version_info[1] < 7:
# print("Python 2.7 required")
# sys.exit(1)
VENV_NAME = 'VIRTUAL_ENV'
VENV = ''
try:
VENV = os.environ[VENV_NAME]
if VENV == '':
print("Environment variable '%s' is empty" % VENV_NAME)
print('Please activate your virtualenv first')
sys.exit(3)
if not os.path.isdir(VENV):
print("Virtual environment '%s' does not exist" % VENV)
print('Please activate a valid virtualenv first')
sys.exit(2)
except KeyError:
print('No virtualenv defined')
print('Please activate a virtualenv (with mkvirtualenv, workon, or pyenv)')
sys.exit(1)
if os.system('git config diff.userdata.textconv $PWD/userdata_decode.py'):
print('Problem configuring Git diff filter for userdata')
if os.system('pre-commit --version'):
os.system('pip install pre-commit')
if os.system('pre-commit install'):
print('Error setting up pre-commit hooks, try updating with '
'pip install -U pre-commit')
sys.exit(4)
if os.system('pre-commit run --all-files'):
print('Problem running pre-commit hooks, check .pre-commit-config.yaml')
sys.exit(5)
sys.exit(0)
|
flexible
|
{
"blob_id": "210d1a184d338d77d4c41327d0a9e2a5a56eb2ae",
"index": 2724,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n VENV = os.environ[VENV_NAME]\n if VENV == '':\n print(\"Environment variable '%s' is empty\" % VENV_NAME)\n print('Please activate your virtualenv first')\n sys.exit(3)\n if not os.path.isdir(VENV):\n print(\"Virtual environment '%s' does not exist\" % VENV)\n print('Please activate a valid virtualenv first')\n sys.exit(2)\nexcept KeyError:\n print('No virtualenv defined')\n print('Please activate a virtualenv (with mkvirtualenv, workon, or pyenv)')\n sys.exit(1)\nif os.system('git config diff.userdata.textconv $PWD/userdata_decode.py'):\n print('Problem configuring Git diff filter for userdata')\nif os.system('pre-commit --version'):\n os.system('pip install pre-commit')\nif os.system('pre-commit install'):\n print(\n 'Error setting up pre-commit hooks, try updating with pip install -U pre-commit'\n )\n sys.exit(4)\nif os.system('pre-commit run --all-files'):\n print('Problem running pre-commit hooks, check .pre-commit-config.yaml')\n sys.exit(5)\nsys.exit(0)\n",
"step-3": "<mask token>\nVENV_NAME = 'VIRTUAL_ENV'\nVENV = ''\ntry:\n VENV = os.environ[VENV_NAME]\n if VENV == '':\n print(\"Environment variable '%s' is empty\" % VENV_NAME)\n print('Please activate your virtualenv first')\n sys.exit(3)\n if not os.path.isdir(VENV):\n print(\"Virtual environment '%s' does not exist\" % VENV)\n print('Please activate a valid virtualenv first')\n sys.exit(2)\nexcept KeyError:\n print('No virtualenv defined')\n print('Please activate a virtualenv (with mkvirtualenv, workon, or pyenv)')\n sys.exit(1)\nif os.system('git config diff.userdata.textconv $PWD/userdata_decode.py'):\n print('Problem configuring Git diff filter for userdata')\nif os.system('pre-commit --version'):\n os.system('pip install pre-commit')\nif os.system('pre-commit install'):\n print(\n 'Error setting up pre-commit hooks, try updating with pip install -U pre-commit'\n )\n sys.exit(4)\nif os.system('pre-commit run --all-files'):\n print('Problem running pre-commit hooks, check .pre-commit-config.yaml')\n sys.exit(5)\nsys.exit(0)\n",
"step-4": "<mask token>\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport os\nimport sys\nVENV_NAME = 'VIRTUAL_ENV'\nVENV = ''\ntry:\n VENV = os.environ[VENV_NAME]\n if VENV == '':\n print(\"Environment variable '%s' is empty\" % VENV_NAME)\n print('Please activate your virtualenv first')\n sys.exit(3)\n if not os.path.isdir(VENV):\n print(\"Virtual environment '%s' does not exist\" % VENV)\n print('Please activate a valid virtualenv first')\n sys.exit(2)\nexcept KeyError:\n print('No virtualenv defined')\n print('Please activate a virtualenv (with mkvirtualenv, workon, or pyenv)')\n sys.exit(1)\nif os.system('git config diff.userdata.textconv $PWD/userdata_decode.py'):\n print('Problem configuring Git diff filter for userdata')\nif os.system('pre-commit --version'):\n os.system('pip install pre-commit')\nif os.system('pre-commit install'):\n print(\n 'Error setting up pre-commit hooks, try updating with pip install -U pre-commit'\n )\n sys.exit(4)\nif os.system('pre-commit run --all-files'):\n print('Problem running pre-commit hooks, check .pre-commit-config.yaml')\n sys.exit(5)\nsys.exit(0)\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Install and activate pre-commit and its hooks into virtual environment.\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport os\nimport sys\n\n# if sys.version_info[0] > 2 or sys.version_info[1] < 7:\n# print(\"Python 2.7 required\")\n# sys.exit(1)\n\nVENV_NAME = 'VIRTUAL_ENV'\nVENV = ''\ntry:\n VENV = os.environ[VENV_NAME]\n if VENV == '':\n print(\"Environment variable '%s' is empty\" % VENV_NAME)\n print('Please activate your virtualenv first')\n sys.exit(3)\n if not os.path.isdir(VENV):\n print(\"Virtual environment '%s' does not exist\" % VENV)\n print('Please activate a valid virtualenv first')\n sys.exit(2)\n\nexcept KeyError:\n print('No virtualenv defined')\n print('Please activate a virtualenv (with mkvirtualenv, workon, or pyenv)')\n sys.exit(1)\n\nif os.system('git config diff.userdata.textconv $PWD/userdata_decode.py'):\n print('Problem configuring Git diff filter for userdata')\n\nif os.system('pre-commit --version'):\n os.system('pip install pre-commit')\n\nif os.system('pre-commit install'):\n print('Error setting up pre-commit hooks, try updating with '\n 'pip install -U pre-commit')\n sys.exit(4)\n\nif os.system('pre-commit run --all-files'):\n print('Problem running pre-commit hooks, check .pre-commit-config.yaml')\n sys.exit(5)\n\nsys.exit(0)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""Sorting components: peak waveform features."""
import numpy as np
from spikeinterface.core.job_tools import fix_job_kwargs
from spikeinterface.core import get_channel_distances
from spikeinterface.sortingcomponents.peak_localization import LocalizeCenterOfMass, LocalizeMonopolarTriangulation
from spikeinterface.sortingcomponents.peak_pipeline import run_peak_pipeline, PipelineNode, ExtractDenseWaveforms
def compute_features_from_peaks(
recording,
peaks,
feature_list=["ptp", ],
feature_params={},
ms_before=1.,
ms_after=1.,
**job_kwargs,
):
"""Extract features on the fly from the recording given a list of peaks.
Parameters
----------
recording: RecordingExtractor
The recording extractor object.
peaks: array
Peaks array, as returned by detect_peaks() in "compact_numpy" way.
feature_list: List of features to be computed.
- amplitude
- ptp
- com
- energy
ms_before: float
The duration in ms before the peak for extracting the features (default 1 ms)
ms_after: float
The duration in ms after the peakfor extracting the features (default 1 ms)
{}
Returns
-------
A tuple of features. Even if there is one feature.
Every feature have shape[0] == peaks.shape[0].
dtype and other dim depends on features.
"""
job_kwargs = fix_job_kwargs(job_kwargs)
extract_dense_waveforms = ExtractDenseWaveforms(recording, ms_before=ms_before, ms_after=ms_after, return_output=False)
nodes = [
extract_dense_waveforms,
]
for feature_name in feature_list:
Class = _features_class[feature_name]
params = feature_params.get(feature_name, {}).copy()
node = Class(recording, parents=[extract_dense_waveforms], **params)
nodes.append(node)
features = run_peak_pipeline(recording, peaks, nodes, job_kwargs, job_name='features_from_peaks', squeeze_output=False)
return features
class AmplitudeFeature(PipelineNode):
def __init__(self, recording, name='amplitude_feature', return_output=True, parents=None,
all_channels=False, peak_sign='neg'):
PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)
self.all_channels = all_channels
self.peak_sign = peak_sign
self._kwargs.update(dict(all_channels=all_channels, peak_sign=peak_sign))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
if self.all_channels:
if self.peak_sign == 'neg':
amplitudes = np.min(waveforms, axis=1)
elif self.peak_sign == 'pos':
amplitudes = np.max(waveforms, axis=1)
elif self.peak_sign == 'both':
amplitudes = np.max(np.abs(waveforms, axis=1))
else:
if self.peak_sign == 'neg':
amplitudes = np.min(waveforms, axis=(1, 2))
elif self.peak_sign == 'pos':
amplitudes = np.max(waveforms, axis=(1, 2))
elif self.peak_sign == 'both':
amplitudes = np.max(np.abs(waveforms), axis=(1, 2))
return amplitudes
class PeakToPeakFeature(PipelineNode):
def __init__(self, recording, name='ptp_feature', return_output=True, parents=None,
local_radius_um=150., all_channels=True):
PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self.all_channels = all_channels
self._kwargs.update(dict(local_radius_um=local_radius_um, all_channels=all_channels))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
if self.all_channels:
all_ptps = np.ptp(waveforms, axis=1)
else:
all_ptps = np.zeros(peaks.size)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
all_ptps[idx] = np.max(np.ptp(wfs, axis=1))
return all_ptps
class PeakToPeakLagsFeature(PipelineNode):
def __init__(self, recording, name='ptp_lag_feature', return_output=True, parents=None,
local_radius_um=150., all_channels=True):
PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)
self.all_channels = all_channels
self.local_radius_um = local_radius_um
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um, all_channels=all_channels))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
if self.all_channels:
all_maxs = np.argmax(waveforms, axis=1)
all_mins = np.argmin(waveforms, axis=1)
all_lags = all_maxs - all_mins
else:
all_lags = np.zeros(peaks.size)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
maxs = np.argmax(wfs, axis=1)
mins = np.argmin(wfs, axis=1)
lags = maxs - mins
ptps = np.argmax(np.ptp(wfs, axis=1), axis=1)
all_lags[idx] = lags[np.arange(len(idx)), ptps]
return all_lags
class RandomProjectionsFeature(PipelineNode):
def __init__(self, recording, name='random_projections_feature', return_output=True, parents=None,
projections=None, local_radius_um=150., min_values=None):
PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)
self.projections = projections
self.local_radius_um = local_radius_um
self.min_values = min_values
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(projections=projections, local_radius_um=local_radius_um, min_values=min_values))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_projections = np.zeros((peaks.size, self.projections.shape[1]), dtype=self._dtype)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
local_projections = self.projections[chan_inds, :]
wf_ptp = (waveforms[idx][:, :, chan_inds]).ptp(axis=1)
if self.min_values is not None:
wf_ptp = (wf_ptp/self.min_values[chan_inds])**4
denom = np.sum(wf_ptp, axis=1)
mask = denom != 0
all_projections[idx[mask]] = np.dot(wf_ptp[mask], local_projections)/(denom[mask][:, np.newaxis])
return all_projections
class RandomProjectionsEnergyFeature(PipelineNode):
def __init__(self, recording, name='random_projections_energy_feature', return_output=True, parents=None,
projections=None, local_radius_um=150., min_values=None):
PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self.projections = projections
self.min_values = min_values
self.local_radius_um = local_radius_um
self._kwargs.update(dict(projections=projections, min_values=min_values, local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_projections = np.zeros((peaks.size, self.projections.shape[1]), dtype=self._dtype)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
local_projections = self.projections[chan_inds, :]
energies = np.linalg.norm(waveforms[idx][:, :, chan_inds], axis=1)
if self.min_values is not None:
energies = (energies/self.min_values[chan_inds])**4
denom = np.sum(energies, axis=1)
mask = denom != 0
all_projections[idx[mask]] = np.dot(energies[mask], local_projections)/(denom[mask][:, np.newaxis])
return all_projections
class StdPeakToPeakFeature(PipelineNode):
def __init__(self, recording, name='std_ptp_feature', return_output=True, parents=None,
local_radius_um=150.):
PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_ptps = np.zeros(peaks.size)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
all_ptps[idx] = np.std(np.ptp(wfs, axis=1), axis=1)
return all_ptps
class GlobalPeakToPeakFeature(PipelineNode):
def __init__(self, recording, name='global_ptp_feature', return_output=True, parents=None,
local_radius_um=150.):
PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_ptps = np.zeros(peaks.size)
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
all_ptps[idx] = np.max(wfs, axis=(1, 2)) - np.min(wfs, axis=(1, 2))
return all_ptps
class KurtosisPeakToPeakFeature(PipelineNode):
def __init__(self, recording, name='kurtosis_ptp_feature', return_output=True, parents=None,
local_radius_um=150.):
PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
self._dtype = recording.get_dtype()
def get_dtype(self):
return self._dtype
def compute(self, traces, peaks, waveforms):
all_ptps = np.zeros(peaks.size)
import scipy
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
all_ptps[idx] = scipy.stats.kurtosis(np.ptp(wfs, axis=1), axis=1)
return all_ptps
class EnergyFeature(PipelineNode):
def __init__(self, recording, name='energy_feature', return_output=True, parents=None,
local_radius_um=50.):
PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)
self.contact_locations = recording.get_channel_locations()
self.channel_distance = get_channel_distances(recording)
self.neighbours_mask = self.channel_distance < local_radius_um
self._kwargs.update(dict(local_radius_um=local_radius_um))
def get_dtype(self):
return np.dtype('float32')
def compute(self, traces, peaks, waveforms):
energy = np.zeros(peaks.size, dtype='float32')
for main_chan in np.unique(peaks['channel_ind']):
idx, = np.nonzero(peaks['channel_ind'] == main_chan)
chan_inds, = np.nonzero(self.neighbours_mask[main_chan])
wfs = waveforms[idx][:, :, chan_inds]
energy[idx] = np.linalg.norm(wfs, axis=(1, 2)) / chan_inds.size
return energy
_features_class = {
'amplitude': AmplitudeFeature,
'ptp' : PeakToPeakFeature,
'center_of_mass' : LocalizeCenterOfMass,
'monopolar_triangulation' : LocalizeMonopolarTriangulation,
'energy' : EnergyFeature,
'std_ptp' : StdPeakToPeakFeature,
'kurtosis_ptp' : KurtosisPeakToPeakFeature,
'random_projections_ptp' : RandomProjectionsFeature,
'random_projections_energy' : RandomProjectionsEnergyFeature,
'ptp_lag' : PeakToPeakLagsFeature,
'global_ptp' : GlobalPeakToPeakFeature
}
|
normal
|
{
"blob_id": "6fe22b3f98bff1a9b775fce631ae94a4ee22b04c",
"index": 4371,
"step-1": "<mask token>\n\n\nclass RandomProjectionsFeature(PipelineNode):\n <mask token>\n\n def get_dtype(self):\n return self._dtype\n <mask token>\n\n\nclass RandomProjectionsEnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_energy_feature',\n return_output=True, parents=None, projections=None, local_radius_um\n =150.0, min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self.projections = projections\n self.min_values = min_values\n self.local_radius_um = local_radius_um\n self._kwargs.update(dict(projections=projections, min_values=\n min_values, local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]),\n dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n energies = np.linalg.norm(waveforms[idx][:, :, chan_inds], axis=1)\n if self.min_values is not None:\n energies = (energies / self.min_values[chan_inds]) ** 4\n denom = np.sum(energies, axis=1)\n mask = denom != 0\n all_projections[idx[mask]] = np.dot(energies[mask],\n local_projections) / denom[mask][:, np.newaxis]\n return all_projections\n\n\nclass StdPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='std_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.std(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass GlobalPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='global_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.max(wfs, axis=(1, 2)) - np.min(wfs, axis=(1, 2))\n return all_ptps\n\n\nclass KurtosisPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='kurtosis_ptp_feature',\n return_output=True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n import scipy\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = scipy.stats.kurtosis(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass EnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='energy_feature', return_output=True,\n parents=None, local_radius_um=50.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n def get_dtype(self):\n return np.dtype('float32')\n\n def compute(self, traces, peaks, waveforms):\n energy = np.zeros(peaks.size, dtype='float32')\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n energy[idx] = np.linalg.norm(wfs, axis=(1, 2)) / chan_inds.size\n return energy\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RandomProjectionsFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_feature',\n return_output=True, parents=None, projections=None, local_radius_um\n =150.0, min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.projections = projections\n self.local_radius_um = local_radius_um\n self.min_values = min_values\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(projections=projections, local_radius_um=\n local_radius_um, min_values=min_values))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]),\n dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n wf_ptp = waveforms[idx][:, :, chan_inds].ptp(axis=1)\n if self.min_values is not None:\n wf_ptp = (wf_ptp / self.min_values[chan_inds]) ** 4\n denom = np.sum(wf_ptp, axis=1)\n mask = denom != 0\n all_projections[idx[mask]] = np.dot(wf_ptp[mask], local_projections\n ) / denom[mask][:, np.newaxis]\n return all_projections\n\n\nclass RandomProjectionsEnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_energy_feature',\n return_output=True, parents=None, projections=None, local_radius_um\n =150.0, min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self.projections = projections\n self.min_values = min_values\n self.local_radius_um = local_radius_um\n self._kwargs.update(dict(projections=projections, min_values=\n min_values, local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]),\n dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n energies = np.linalg.norm(waveforms[idx][:, :, chan_inds], axis=1)\n if self.min_values is not None:\n energies = (energies / self.min_values[chan_inds]) ** 4\n denom = np.sum(energies, axis=1)\n mask = denom != 0\n all_projections[idx[mask]] = np.dot(energies[mask],\n local_projections) / denom[mask][:, np.newaxis]\n return all_projections\n\n\nclass StdPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='std_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.std(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass GlobalPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='global_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.max(wfs, axis=(1, 2)) - np.min(wfs, axis=(1, 2))\n return all_ptps\n\n\nclass KurtosisPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='kurtosis_ptp_feature',\n return_output=True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n import scipy\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = scipy.stats.kurtosis(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass EnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='energy_feature', return_output=True,\n parents=None, local_radius_um=50.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n def get_dtype(self):\n return np.dtype('float32')\n\n def compute(self, traces, peaks, waveforms):\n energy = np.zeros(peaks.size, dtype='float32')\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n energy[idx] = np.linalg.norm(wfs, axis=(1, 2)) / chan_inds.size\n return energy\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass PeakToPeakLagsFeature(PipelineNode):\n\n def __init__(self, recording, name='ptp_lag_feature', return_output=\n True, parents=None, local_radius_um=150.0, all_channels=True):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.all_channels = all_channels\n self.local_radius_um = local_radius_um\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um,\n all_channels=all_channels))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n if self.all_channels:\n all_maxs = np.argmax(waveforms, axis=1)\n all_mins = np.argmin(waveforms, axis=1)\n all_lags = all_maxs - all_mins\n else:\n all_lags = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n maxs = np.argmax(wfs, axis=1)\n mins = np.argmin(wfs, axis=1)\n lags = maxs - mins\n ptps = np.argmax(np.ptp(wfs, axis=1), axis=1)\n all_lags[idx] = lags[np.arange(len(idx)), ptps]\n return all_lags\n\n\nclass RandomProjectionsFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_feature',\n return_output=True, parents=None, projections=None, local_radius_um\n =150.0, min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.projections = projections\n self.local_radius_um = local_radius_um\n self.min_values = min_values\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(projections=projections, local_radius_um=\n local_radius_um, min_values=min_values))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]),\n dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n wf_ptp = waveforms[idx][:, :, chan_inds].ptp(axis=1)\n if self.min_values is not None:\n wf_ptp = (wf_ptp / self.min_values[chan_inds]) ** 4\n denom = np.sum(wf_ptp, axis=1)\n mask = denom != 0\n all_projections[idx[mask]] = np.dot(wf_ptp[mask], local_projections\n ) / denom[mask][:, np.newaxis]\n return all_projections\n\n\nclass RandomProjectionsEnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_energy_feature',\n return_output=True, parents=None, projections=None, local_radius_um\n =150.0, min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self.projections = projections\n self.min_values = min_values\n self.local_radius_um = local_radius_um\n self._kwargs.update(dict(projections=projections, min_values=\n min_values, local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]),\n dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n energies = np.linalg.norm(waveforms[idx][:, :, chan_inds], axis=1)\n if self.min_values is not None:\n energies = (energies / self.min_values[chan_inds]) ** 4\n denom = np.sum(energies, axis=1)\n mask = denom != 0\n all_projections[idx[mask]] = np.dot(energies[mask],\n local_projections) / denom[mask][:, np.newaxis]\n return all_projections\n\n\nclass StdPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='std_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.std(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass GlobalPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='global_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.max(wfs, axis=(1, 2)) - np.min(wfs, axis=(1, 2))\n return all_ptps\n\n\nclass KurtosisPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='kurtosis_ptp_feature',\n return_output=True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n import scipy\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = scipy.stats.kurtosis(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass EnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='energy_feature', return_output=True,\n parents=None, local_radius_um=50.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n def get_dtype(self):\n return np.dtype('float32')\n\n def compute(self, traces, peaks, waveforms):\n energy = np.zeros(peaks.size, dtype='float32')\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n energy[idx] = np.linalg.norm(wfs, axis=(1, 2)) / chan_inds.size\n return energy\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass PeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='ptp_feature', return_output=True,\n parents=None, local_radius_um=150.0, all_channels=True):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self.all_channels = all_channels\n self._kwargs.update(dict(local_radius_um=local_radius_um,\n all_channels=all_channels))\n self._dtype = recording.get_dtype()\n <mask token>\n\n def compute(self, traces, peaks, waveforms):\n if self.all_channels:\n all_ptps = np.ptp(waveforms, axis=1)\n else:\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.max(np.ptp(wfs, axis=1))\n return all_ptps\n\n\nclass PeakToPeakLagsFeature(PipelineNode):\n\n def __init__(self, recording, name='ptp_lag_feature', return_output=\n True, parents=None, local_radius_um=150.0, all_channels=True):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.all_channels = all_channels\n self.local_radius_um = local_radius_um\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um,\n all_channels=all_channels))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n if self.all_channels:\n all_maxs = np.argmax(waveforms, axis=1)\n all_mins = np.argmin(waveforms, axis=1)\n all_lags = all_maxs - all_mins\n else:\n all_lags = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n maxs = np.argmax(wfs, axis=1)\n mins = np.argmin(wfs, axis=1)\n lags = maxs - mins\n ptps = np.argmax(np.ptp(wfs, axis=1), axis=1)\n all_lags[idx] = lags[np.arange(len(idx)), ptps]\n return all_lags\n\n\nclass RandomProjectionsFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_feature',\n return_output=True, parents=None, projections=None, local_radius_um\n =150.0, min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.projections = projections\n self.local_radius_um = local_radius_um\n self.min_values = min_values\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(projections=projections, local_radius_um=\n local_radius_um, min_values=min_values))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]),\n dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n wf_ptp = waveforms[idx][:, :, chan_inds].ptp(axis=1)\n if self.min_values is not None:\n wf_ptp = (wf_ptp / self.min_values[chan_inds]) ** 4\n denom = np.sum(wf_ptp, axis=1)\n mask = denom != 0\n all_projections[idx[mask]] = np.dot(wf_ptp[mask], local_projections\n ) / denom[mask][:, np.newaxis]\n return all_projections\n\n\nclass RandomProjectionsEnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_energy_feature',\n return_output=True, parents=None, projections=None, local_radius_um\n =150.0, min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self.projections = projections\n self.min_values = min_values\n self.local_radius_um = local_radius_um\n self._kwargs.update(dict(projections=projections, min_values=\n min_values, local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]),\n dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n energies = np.linalg.norm(waveforms[idx][:, :, chan_inds], axis=1)\n if self.min_values is not None:\n energies = (energies / self.min_values[chan_inds]) ** 4\n denom = np.sum(energies, axis=1)\n mask = denom != 0\n all_projections[idx[mask]] = np.dot(energies[mask],\n local_projections) / denom[mask][:, np.newaxis]\n return all_projections\n\n\nclass StdPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='std_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.std(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass GlobalPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='global_ptp_feature', return_output=\n True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.max(wfs, axis=(1, 2)) - np.min(wfs, axis=(1, 2))\n return all_ptps\n\n\nclass KurtosisPeakToPeakFeature(PipelineNode):\n\n def __init__(self, recording, name='kurtosis_ptp_feature',\n return_output=True, parents=None, local_radius_um=150.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n import scipy\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = scipy.stats.kurtosis(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass EnergyFeature(PipelineNode):\n\n def __init__(self, recording, name='energy_feature', return_output=True,\n parents=None, local_radius_um=50.0):\n PipelineNode.__init__(self, recording, return_output=return_output,\n parents=parents)\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n def get_dtype(self):\n return np.dtype('float32')\n\n def compute(self, traces, peaks, waveforms):\n energy = np.zeros(peaks.size, dtype='float32')\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n energy[idx] = np.linalg.norm(wfs, axis=(1, 2)) / chan_inds.size\n return energy\n\n\n<mask token>\n",
"step-5": "\"\"\"Sorting components: peak waveform features.\"\"\"\nimport numpy as np\n\nfrom spikeinterface.core.job_tools import fix_job_kwargs\nfrom spikeinterface.core import get_channel_distances\nfrom spikeinterface.sortingcomponents.peak_localization import LocalizeCenterOfMass, LocalizeMonopolarTriangulation\nfrom spikeinterface.sortingcomponents.peak_pipeline import run_peak_pipeline, PipelineNode, ExtractDenseWaveforms\n\n\n\ndef compute_features_from_peaks(\n recording,\n peaks,\n feature_list=[\"ptp\", ],\n feature_params={},\n ms_before=1.,\n ms_after=1.,\n **job_kwargs,\n):\n \"\"\"Extract features on the fly from the recording given a list of peaks. \n\n Parameters\n ----------\n recording: RecordingExtractor\n The recording extractor object.\n peaks: array\n Peaks array, as returned by detect_peaks() in \"compact_numpy\" way.\n feature_list: List of features to be computed.\n - amplitude\n - ptp\n - com\n - energy\n ms_before: float\n The duration in ms before the peak for extracting the features (default 1 ms)\n ms_after: float\n The duration in ms after the peakfor extracting the features (default 1 ms)\n\n {}\n\n Returns\n -------\n A tuple of features. Even if there is one feature.\n Every feature have shape[0] == peaks.shape[0].\n dtype and other dim depends on features.\n\n \"\"\"\n job_kwargs = fix_job_kwargs(job_kwargs)\n\n extract_dense_waveforms = ExtractDenseWaveforms(recording, ms_before=ms_before, ms_after=ms_after, return_output=False)\n nodes = [\n extract_dense_waveforms,\n ]\n for feature_name in feature_list:\n Class = _features_class[feature_name]\n params = feature_params.get(feature_name, {}).copy()\n node = Class(recording, parents=[extract_dense_waveforms], **params)\n nodes.append(node)\n\n features = run_peak_pipeline(recording, peaks, nodes, job_kwargs, job_name='features_from_peaks', squeeze_output=False)\n\n return features\n\n\nclass AmplitudeFeature(PipelineNode):\n def __init__(self, recording, name='amplitude_feature', return_output=True, parents=None, \n all_channels=False, peak_sign='neg'):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.all_channels = all_channels\n self.peak_sign = peak_sign\n self._kwargs.update(dict(all_channels=all_channels, peak_sign=peak_sign))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n if self.all_channels:\n if self.peak_sign == 'neg':\n amplitudes = np.min(waveforms, axis=1)\n elif self.peak_sign == 'pos':\n amplitudes = np.max(waveforms, axis=1)\n elif self.peak_sign == 'both':\n amplitudes = np.max(np.abs(waveforms, axis=1))\n else:\n if self.peak_sign == 'neg':\n amplitudes = np.min(waveforms, axis=(1, 2))\n elif self.peak_sign == 'pos':\n amplitudes = np.max(waveforms, axis=(1, 2))\n elif self.peak_sign == 'both':\n amplitudes = np.max(np.abs(waveforms), axis=(1, 2))\n return amplitudes\n\n\nclass PeakToPeakFeature(PipelineNode):\n def __init__(self, recording, name='ptp_feature', return_output=True, parents=None,\n local_radius_um=150., all_channels=True):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n self.all_channels = all_channels\n self._kwargs.update(dict(local_radius_um=local_radius_um, all_channels=all_channels))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n if self.all_channels:\n all_ptps = np.ptp(waveforms, axis=1)\n else:\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.max(np.ptp(wfs, axis=1))\n return all_ptps\n\n\nclass PeakToPeakLagsFeature(PipelineNode):\n def __init__(self, recording, name='ptp_lag_feature', return_output=True, parents=None,\n local_radius_um=150., all_channels=True):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.all_channels = all_channels\n self.local_radius_um = local_radius_um\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n \n self._kwargs.update(dict(local_radius_um=local_radius_um, all_channels=all_channels))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n if self.all_channels:\n all_maxs = np.argmax(waveforms, axis=1)\n all_mins = np.argmin(waveforms, axis=1)\n all_lags = all_maxs - all_mins\n else:\n all_lags = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n maxs = np.argmax(wfs, axis=1)\n mins = np.argmin(wfs, axis=1)\n lags = maxs - mins\n ptps = np.argmax(np.ptp(wfs, axis=1), axis=1)\n all_lags[idx] = lags[np.arange(len(idx)), ptps]\n return all_lags\n\n\nclass RandomProjectionsFeature(PipelineNode):\n\n def __init__(self, recording, name='random_projections_feature', return_output=True, parents=None,\n projections=None, local_radius_um=150., min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.projections = projections\n self.local_radius_um = local_radius_um\n self.min_values = min_values\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n \n self._kwargs.update(dict(projections=projections, local_radius_um=local_radius_um, min_values=min_values))\n \n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]), dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n wf_ptp = (waveforms[idx][:, :, chan_inds]).ptp(axis=1)\n\n if self.min_values is not None:\n wf_ptp = (wf_ptp/self.min_values[chan_inds])**4\n\n denom = np.sum(wf_ptp, axis=1)\n mask = denom != 0\n\n all_projections[idx[mask]] = np.dot(wf_ptp[mask], local_projections)/(denom[mask][:, np.newaxis])\n return all_projections\n\n\nclass RandomProjectionsEnergyFeature(PipelineNode):\n def __init__(self, recording, name='random_projections_energy_feature', return_output=True, parents=None,\n projections=None, local_radius_um=150., min_values=None):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n\n self.projections = projections\n self.min_values = min_values\n self.local_radius_um = local_radius_um\n self._kwargs.update(dict(projections=projections, min_values=min_values, local_radius_um=local_radius_um))\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_projections = np.zeros((peaks.size, self.projections.shape[1]), dtype=self._dtype)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n local_projections = self.projections[chan_inds, :]\n energies = np.linalg.norm(waveforms[idx][:, :, chan_inds], axis=1)\n\n if self.min_values is not None:\n energies = (energies/self.min_values[chan_inds])**4\n\n denom = np.sum(energies, axis=1)\n mask = denom != 0\n\n all_projections[idx[mask]] = np.dot(energies[mask], local_projections)/(denom[mask][:, np.newaxis])\n return all_projections\n\n\nclass StdPeakToPeakFeature(PipelineNode):\n def __init__(self, recording, name='std_ptp_feature', return_output=True, parents=None,\n local_radius_um=150.):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n \n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.std(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass GlobalPeakToPeakFeature(PipelineNode):\n def __init__(self, recording, name='global_ptp_feature', return_output=True, parents=None,\n local_radius_um=150.):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n \n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = np.max(wfs, axis=(1, 2)) - np.min(wfs, axis=(1, 2))\n return all_ptps\n\nclass KurtosisPeakToPeakFeature(PipelineNode):\n def __init__(self, recording, name='kurtosis_ptp_feature', return_output=True, parents=None,\n local_radius_um=150.):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n \n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n self._dtype = recording.get_dtype()\n\n def get_dtype(self):\n return self._dtype\n\n def compute(self, traces, peaks, waveforms):\n all_ptps = np.zeros(peaks.size)\n import scipy\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n wfs = waveforms[idx][:, :, chan_inds]\n all_ptps[idx] = scipy.stats.kurtosis(np.ptp(wfs, axis=1), axis=1)\n return all_ptps\n\n\nclass EnergyFeature(PipelineNode):\n def __init__(self, recording, name='energy_feature', return_output=True, parents=None,\n local_radius_um=50.):\n PipelineNode.__init__(self, recording, return_output=return_output, parents=parents)\n\n self.contact_locations = recording.get_channel_locations()\n self.channel_distance = get_channel_distances(recording)\n self.neighbours_mask = self.channel_distance < local_radius_um\n \n self._kwargs.update(dict(local_radius_um=local_radius_um))\n\n def get_dtype(self):\n return np.dtype('float32')\n\n def compute(self, traces, peaks, waveforms):\n energy = np.zeros(peaks.size, dtype='float32')\n for main_chan in np.unique(peaks['channel_ind']):\n idx, = np.nonzero(peaks['channel_ind'] == main_chan)\n chan_inds, = np.nonzero(self.neighbours_mask[main_chan])\n\n wfs = waveforms[idx][:, :, chan_inds]\n energy[idx] = np.linalg.norm(wfs, axis=(1, 2)) / chan_inds.size\n return energy\n\n\n_features_class = {\n 'amplitude': AmplitudeFeature,\n 'ptp' : PeakToPeakFeature,\n 'center_of_mass' : LocalizeCenterOfMass,\n 'monopolar_triangulation' : LocalizeMonopolarTriangulation,\n 'energy' : EnergyFeature,\n 'std_ptp' : StdPeakToPeakFeature,\n 'kurtosis_ptp' : KurtosisPeakToPeakFeature,\n 'random_projections_ptp' : RandomProjectionsFeature,\n 'random_projections_energy' : RandomProjectionsEnergyFeature,\n 'ptp_lag' : PeakToPeakLagsFeature,\n 'global_ptp' : GlobalPeakToPeakFeature\n}",
"step-ids": [
22,
24,
28,
31,
40
]
}
|
[
22,
24,
28,
31,
40
] |
# created by RomaOkorosso at 21.03.2021
# models.py
from datetime import datetime
from sqlalchemy import (
Column,
Integer,
String,
Boolean,
DateTime,
ForeignKey,
Date
)
from sqlalchemy.dialects import postgresql
from sqlalchemy.orm import relationship
from Database.database import Base
class Publisher(Base):
__tablename__ = "publishers"
id = Column(Integer, primary_key=True)
name = Column(String)
class Author(Base):
__tablename__ = "authors"
id = Column(Integer, primary_key=True)
full_name = Column(String)
taken_count = Column(Integer, default=0)
class Book(Base):
__tablename__ = "books"
id = Column(Integer, primary_key=True)
title = Column(String)
count = Column(Integer)
taken_count = Column(Integer)
authors_id = Column(postgresql.ARRAY(Integer), default=None)
publisher_id = Column(Integer)
publishing_year = Column(Integer)
class Client(Base):
__tablename__ = "clients"
id = Column(Integer, primary_key=True)
type = Column(String)
full_name = Column(String)
taken_books_now_id = Column(postgresql.ARRAY(Integer), default=[])
all_taken_books_id = Column(postgresql.ARRAY(Integer), default=[])
class TakenBook(Base):
__tablename__ = "taken_books"
id = Column(Integer, primary_key=True)
book_id = Column(Integer, ForeignKey("books.id"))
client_id = Column(Integer, ForeignKey("clients.id"))
taken_date = Column(Date)
return_date = Column(Date, default=None)
|
normal
|
{
"blob_id": "a288e66e64d386afd13bfc7b5b13d4a47d15cd6d",
"index": 1316,
"step-1": "<mask token>\n\n\nclass Client(Base):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TakenBook(Base):\n __tablename__ = 'taken_books'\n id = Column(Integer, primary_key=True)\n book_id = Column(Integer, ForeignKey('books.id'))\n client_id = Column(Integer, ForeignKey('clients.id'))\n taken_date = Column(Date)\n return_date = Column(Date, default=None)\n",
"step-2": "<mask token>\n\n\nclass Author(Base):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Book(Base):\n __tablename__ = 'books'\n id = Column(Integer, primary_key=True)\n title = Column(String)\n count = Column(Integer)\n taken_count = Column(Integer)\n authors_id = Column(postgresql.ARRAY(Integer), default=None)\n publisher_id = Column(Integer)\n publishing_year = Column(Integer)\n\n\nclass Client(Base):\n __tablename__ = 'clients'\n id = Column(Integer, primary_key=True)\n type = Column(String)\n full_name = Column(String)\n taken_books_now_id = Column(postgresql.ARRAY(Integer), default=[])\n all_taken_books_id = Column(postgresql.ARRAY(Integer), default=[])\n\n\nclass TakenBook(Base):\n __tablename__ = 'taken_books'\n id = Column(Integer, primary_key=True)\n book_id = Column(Integer, ForeignKey('books.id'))\n client_id = Column(Integer, ForeignKey('clients.id'))\n taken_date = Column(Date)\n return_date = Column(Date, default=None)\n",
"step-3": "<mask token>\n\n\nclass Author(Base):\n __tablename__ = 'authors'\n id = Column(Integer, primary_key=True)\n full_name = Column(String)\n taken_count = Column(Integer, default=0)\n\n\nclass Book(Base):\n __tablename__ = 'books'\n id = Column(Integer, primary_key=True)\n title = Column(String)\n count = Column(Integer)\n taken_count = Column(Integer)\n authors_id = Column(postgresql.ARRAY(Integer), default=None)\n publisher_id = Column(Integer)\n publishing_year = Column(Integer)\n\n\nclass Client(Base):\n __tablename__ = 'clients'\n id = Column(Integer, primary_key=True)\n type = Column(String)\n full_name = Column(String)\n taken_books_now_id = Column(postgresql.ARRAY(Integer), default=[])\n all_taken_books_id = Column(postgresql.ARRAY(Integer), default=[])\n\n\nclass TakenBook(Base):\n __tablename__ = 'taken_books'\n id = Column(Integer, primary_key=True)\n book_id = Column(Integer, ForeignKey('books.id'))\n client_id = Column(Integer, ForeignKey('clients.id'))\n taken_date = Column(Date)\n return_date = Column(Date, default=None)\n",
"step-4": "<mask token>\n\n\nclass Publisher(Base):\n __tablename__ = 'publishers'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n\n\nclass Author(Base):\n __tablename__ = 'authors'\n id = Column(Integer, primary_key=True)\n full_name = Column(String)\n taken_count = Column(Integer, default=0)\n\n\nclass Book(Base):\n __tablename__ = 'books'\n id = Column(Integer, primary_key=True)\n title = Column(String)\n count = Column(Integer)\n taken_count = Column(Integer)\n authors_id = Column(postgresql.ARRAY(Integer), default=None)\n publisher_id = Column(Integer)\n publishing_year = Column(Integer)\n\n\nclass Client(Base):\n __tablename__ = 'clients'\n id = Column(Integer, primary_key=True)\n type = Column(String)\n full_name = Column(String)\n taken_books_now_id = Column(postgresql.ARRAY(Integer), default=[])\n all_taken_books_id = Column(postgresql.ARRAY(Integer), default=[])\n\n\nclass TakenBook(Base):\n __tablename__ = 'taken_books'\n id = Column(Integer, primary_key=True)\n book_id = Column(Integer, ForeignKey('books.id'))\n client_id = Column(Integer, ForeignKey('clients.id'))\n taken_date = Column(Date)\n return_date = Column(Date, default=None)\n",
"step-5": "# created by RomaOkorosso at 21.03.2021\n# models.py\n\nfrom datetime import datetime\n\nfrom sqlalchemy import (\n Column,\n Integer,\n String,\n Boolean,\n DateTime,\n ForeignKey,\n Date\n)\n\nfrom sqlalchemy.dialects import postgresql\nfrom sqlalchemy.orm import relationship\n\nfrom Database.database import Base\n\n\nclass Publisher(Base):\n __tablename__ = \"publishers\"\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n\n\nclass Author(Base):\n __tablename__ = \"authors\"\n\n id = Column(Integer, primary_key=True)\n full_name = Column(String)\n taken_count = Column(Integer, default=0)\n\n\nclass Book(Base):\n __tablename__ = \"books\"\n\n id = Column(Integer, primary_key=True)\n title = Column(String)\n count = Column(Integer)\n taken_count = Column(Integer)\n authors_id = Column(postgresql.ARRAY(Integer), default=None)\n publisher_id = Column(Integer)\n publishing_year = Column(Integer)\n\n\nclass Client(Base):\n __tablename__ = \"clients\"\n\n id = Column(Integer, primary_key=True)\n type = Column(String)\n full_name = Column(String)\n taken_books_now_id = Column(postgresql.ARRAY(Integer), default=[])\n all_taken_books_id = Column(postgresql.ARRAY(Integer), default=[])\n\n\nclass TakenBook(Base):\n __tablename__ = \"taken_books\"\n\n id = Column(Integer, primary_key=True)\n book_id = Column(Integer, ForeignKey(\"books.id\"))\n client_id = Column(Integer, ForeignKey(\"clients.id\"))\n taken_date = Column(Date)\n return_date = Column(Date, default=None)\n",
"step-ids": [
3,
7,
8,
10,
12
]
}
|
[
3,
7,
8,
10,
12
] |
<|reserved_special_token_0|>
class TextDocPrintout(wx.Printout):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class PrintFrameworkSample(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, size=(640, 480), title=
'Print Framework Sample')
self.CreateStatusBar()
self.tc = wx.TextCtrl(self, -1, '', style=wx.TE_MULTILINE | wx.
TE_DONTWRAP)
self.tc.SetFont(wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE, wx.
FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
filename = os.path.join(os.path.dirname(__file__), 'sample-text.txt')
with open(filename) as fid:
self.tc.SetValue(fid.read())
self.tc.Bind(wx.EVT_SET_FOCUS, self.OnClearSelection)
wx.CallAfter(self.tc.SetInsertionPoint, 0)
menu = wx.Menu()
item = menu.Append(-1, 'Page Setup...\tF5',
'Set up page margins and etc.')
self.Bind(wx.EVT_MENU, self.OnPageSetup, item)
item = menu.Append(-1, 'Print Preview...\tF6',
'View the printout on-screen')
self.Bind(wx.EVT_MENU, self.OnPrintPreview, item)
item = menu.Append(-1, 'Print...\tF7', 'Print the document')
self.Bind(wx.EVT_MENU, self.OnPrint, item)
menu.AppendSeparator()
item = menu.Append(wx.ID_ABOUT, 'About', 'About this application')
self.Bind(wx.EVT_MENU, self.OnAbout, item)
item = menu.Append(wx.ID_EXIT, 'E&xit\tCtrl-Q',
'Close this application')
self.Bind(wx.EVT_MENU, self.OnExit, item)
menubar = wx.MenuBar()
menubar.Append(menu, '&File')
self.SetMenuBar(menubar)
self.pdata = wx.PrintData()
self.pdata.SetPaperId(wx.PAPER_LETTER)
self.pdata.SetOrientation(wx.PORTRAIT)
self.margins = wx.Point(15, 15), wx.Point(15, 15)
def OnExit(self, evt):
self.Close()
def OnAbout(self, evt):
wx.MessageBox(
'Print framework sample application\n\nUsing wxPython %s' % wx.
version(), 'About')
def OnClearSelection(self, evt):
evt.Skip()
wx.CallAfter(self.tc.SetInsertionPoint, self.tc.GetInsertionPoint())
def OnPageSetup(self, evt):
data = wx.PageSetupDialogData()
data.SetPrintData(self.pdata)
data.SetDefaultMinMargins(True)
data.SetMarginTopLeft(self.margins[0])
data.SetMarginBottomRight(self.margins[1])
dlg = wx.PageSetupDialog(self, data)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetPageSetupData()
self.pdata = wx.PrintData(data.GetPrintData())
self.pdata.SetPaperId(data.GetPaperId())
self.margins = data.GetMarginTopLeft(), data.GetMarginBottomRight()
dlg.Destroy()
def OnPrintPreview(self, evt):
data = wx.PrintDialogData(self.pdata)
text = self.tc.GetValue()
printout1 = TextDocPrintout(text, 'title', self.margins)
printout2 = TextDocPrintout(text, 'title', self.margins)
preview = wx.PrintPreview(printout1, printout2, data)
if not preview:
wx.MessageBox('Unable to create PrintPreview!', 'Error')
else:
frame = wx.PreviewFrame(preview, self, 'Print Preview', pos=
self.GetPosition(), size=self.GetSize())
frame.Initialize()
frame.Show()
def OnPrint(self, evt):
data = wx.PrintDialogData(self.pdata)
printer = wx.Printer(data)
text = self.tc.GetValue()
printout = TextDocPrintout(text, 'title', self.margins)
useSetupDialog = True
if not printer.Print(self, printout, useSetupDialog
) and printer.GetLastError() == wx.PRINTER_ERROR:
wx.MessageBox(
"""There was a problem printing.
Perhaps your current printer is not set correctly?"""
, 'Printing Error', wx.OK)
else:
data = printer.GetPrintDialogData()
self.pdata = wx.PrintData(data.GetPrintData())
printout.Destroy()
def OnPrintTest(self, evt):
data = wx.PrintDialogData(self.pdata)
dlg = wx.PrintDialog(self, data)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetPrintDialogData()
print_()
print_('GetFromPage:', data.GetFromPage())
print_('GetToPage:', data.GetToPage())
print_('GetMinPage:', data.GetMinPage())
print_('GetMaxPage:', data.GetMaxPage())
print_('GetNoCopies:', data.GetNoCopies())
print_('GetAllPages:', data.GetAllPages())
print_('GetSelection:', data.GetSelection())
print_('GetCollate:', data.GetCollate())
print_('GetPrintToFile:', data.GetPrintToFile())
self.pdata = wx.PrintData(data.GetPrintData())
print_()
print_('GetPrinterName:', self.pdata.GetPrinterName())
dlg.Destroy()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TextDocPrintout(wx.Printout):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def GetPageInfo(self):
return 1, self.numPages, 1, self.numPages
def CalculateScale(self, dc):
ppiPrinterX, ppiPrinterY = self.GetPPIPrinter()
ppiScreenX, ppiScreenY = self.GetPPIScreen()
logScale = float(ppiPrinterX) / float(ppiScreenX)
pw, ph = self.GetPageSizePixels()
dw, dh = dc.GetSize()
scale = logScale * float(dw) / float(pw)
dc.SetUserScale(scale, scale)
self.logUnitsMM = float(ppiPrinterX) / (logScale * 25.4)
def CalculateLayout(self, dc):
topLeft, bottomRight = self.margins
dw, dh = dc.GetSize()
self.x1 = topLeft.x * self.logUnitsMM
self.y1 = topLeft.y * self.logUnitsMM
self.x2 = dc.DeviceToLogicalXRel(dw) - bottomRight.x * self.logUnitsMM
self.y2 = dc.DeviceToLogicalYRel(dh) - bottomRight.y * self.logUnitsMM
self.pageHeight = self.y2 - self.y1 - 2 * self.logUnitsMM
font = wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE, wx.
FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
dc.SetFont(font)
self.lineHeight = dc.GetCharHeight()
self.linesPerPage = int(self.pageHeight / self.lineHeight)
<|reserved_special_token_0|>
def OnPrintPage(self, page):
dc = self.GetDC()
self.CalculateScale(dc)
self.CalculateLayout(dc)
dc.SetPen(wx.Pen('black', 0))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
r = wx.Rect(wx.Point(self.x1, self.y1), wx.Point(self.x2, self.y2))
dc.DrawRectangle(r)
dc.SetClippingRegion(r)
line = (page - 1) * self.linesPerPage
x = self.x1 + self.logUnitsMM
y = self.y1 + self.logUnitsMM
while line < page * self.linesPerPage:
dc.DrawText(self.lines[line], x, y)
y += self.lineHeight
line += 1
if line >= len(self.lines):
break
return True
class PrintFrameworkSample(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, size=(640, 480), title=
'Print Framework Sample')
self.CreateStatusBar()
self.tc = wx.TextCtrl(self, -1, '', style=wx.TE_MULTILINE | wx.
TE_DONTWRAP)
self.tc.SetFont(wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE, wx.
FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
filename = os.path.join(os.path.dirname(__file__), 'sample-text.txt')
with open(filename) as fid:
self.tc.SetValue(fid.read())
self.tc.Bind(wx.EVT_SET_FOCUS, self.OnClearSelection)
wx.CallAfter(self.tc.SetInsertionPoint, 0)
menu = wx.Menu()
item = menu.Append(-1, 'Page Setup...\tF5',
'Set up page margins and etc.')
self.Bind(wx.EVT_MENU, self.OnPageSetup, item)
item = menu.Append(-1, 'Print Preview...\tF6',
'View the printout on-screen')
self.Bind(wx.EVT_MENU, self.OnPrintPreview, item)
item = menu.Append(-1, 'Print...\tF7', 'Print the document')
self.Bind(wx.EVT_MENU, self.OnPrint, item)
menu.AppendSeparator()
item = menu.Append(wx.ID_ABOUT, 'About', 'About this application')
self.Bind(wx.EVT_MENU, self.OnAbout, item)
item = menu.Append(wx.ID_EXIT, 'E&xit\tCtrl-Q',
'Close this application')
self.Bind(wx.EVT_MENU, self.OnExit, item)
menubar = wx.MenuBar()
menubar.Append(menu, '&File')
self.SetMenuBar(menubar)
self.pdata = wx.PrintData()
self.pdata.SetPaperId(wx.PAPER_LETTER)
self.pdata.SetOrientation(wx.PORTRAIT)
self.margins = wx.Point(15, 15), wx.Point(15, 15)
def OnExit(self, evt):
self.Close()
def OnAbout(self, evt):
wx.MessageBox(
'Print framework sample application\n\nUsing wxPython %s' % wx.
version(), 'About')
def OnClearSelection(self, evt):
evt.Skip()
wx.CallAfter(self.tc.SetInsertionPoint, self.tc.GetInsertionPoint())
def OnPageSetup(self, evt):
data = wx.PageSetupDialogData()
data.SetPrintData(self.pdata)
data.SetDefaultMinMargins(True)
data.SetMarginTopLeft(self.margins[0])
data.SetMarginBottomRight(self.margins[1])
dlg = wx.PageSetupDialog(self, data)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetPageSetupData()
self.pdata = wx.PrintData(data.GetPrintData())
self.pdata.SetPaperId(data.GetPaperId())
self.margins = data.GetMarginTopLeft(), data.GetMarginBottomRight()
dlg.Destroy()
def OnPrintPreview(self, evt):
data = wx.PrintDialogData(self.pdata)
text = self.tc.GetValue()
printout1 = TextDocPrintout(text, 'title', self.margins)
printout2 = TextDocPrintout(text, 'title', self.margins)
preview = wx.PrintPreview(printout1, printout2, data)
if not preview:
wx.MessageBox('Unable to create PrintPreview!', 'Error')
else:
frame = wx.PreviewFrame(preview, self, 'Print Preview', pos=
self.GetPosition(), size=self.GetSize())
frame.Initialize()
frame.Show()
def OnPrint(self, evt):
data = wx.PrintDialogData(self.pdata)
printer = wx.Printer(data)
text = self.tc.GetValue()
printout = TextDocPrintout(text, 'title', self.margins)
useSetupDialog = True
if not printer.Print(self, printout, useSetupDialog
) and printer.GetLastError() == wx.PRINTER_ERROR:
wx.MessageBox(
"""There was a problem printing.
Perhaps your current printer is not set correctly?"""
, 'Printing Error', wx.OK)
else:
data = printer.GetPrintDialogData()
self.pdata = wx.PrintData(data.GetPrintData())
printout.Destroy()
def OnPrintTest(self, evt):
data = wx.PrintDialogData(self.pdata)
dlg = wx.PrintDialog(self, data)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetPrintDialogData()
print_()
print_('GetFromPage:', data.GetFromPage())
print_('GetToPage:', data.GetToPage())
print_('GetMinPage:', data.GetMinPage())
print_('GetMaxPage:', data.GetMaxPage())
print_('GetNoCopies:', data.GetNoCopies())
print_('GetAllPages:', data.GetAllPages())
print_('GetSelection:', data.GetSelection())
print_('GetCollate:', data.GetCollate())
print_('GetPrintToFile:', data.GetPrintToFile())
self.pdata = wx.PrintData(data.GetPrintData())
print_()
print_('GetPrinterName:', self.pdata.GetPrinterName())
dlg.Destroy()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TextDocPrintout(wx.Printout):
<|reserved_special_token_0|>
def __init__(self, text, title, margins):
wx.Printout.__init__(self, title)
self.lines = text.split('\n')
self.margins = margins
def HasPage(self, page):
return page <= self.numPages
def GetPageInfo(self):
return 1, self.numPages, 1, self.numPages
def CalculateScale(self, dc):
ppiPrinterX, ppiPrinterY = self.GetPPIPrinter()
ppiScreenX, ppiScreenY = self.GetPPIScreen()
logScale = float(ppiPrinterX) / float(ppiScreenX)
pw, ph = self.GetPageSizePixels()
dw, dh = dc.GetSize()
scale = logScale * float(dw) / float(pw)
dc.SetUserScale(scale, scale)
self.logUnitsMM = float(ppiPrinterX) / (logScale * 25.4)
def CalculateLayout(self, dc):
topLeft, bottomRight = self.margins
dw, dh = dc.GetSize()
self.x1 = topLeft.x * self.logUnitsMM
self.y1 = topLeft.y * self.logUnitsMM
self.x2 = dc.DeviceToLogicalXRel(dw) - bottomRight.x * self.logUnitsMM
self.y2 = dc.DeviceToLogicalYRel(dh) - bottomRight.y * self.logUnitsMM
self.pageHeight = self.y2 - self.y1 - 2 * self.logUnitsMM
font = wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE, wx.
FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
dc.SetFont(font)
self.lineHeight = dc.GetCharHeight()
self.linesPerPage = int(self.pageHeight / self.lineHeight)
<|reserved_special_token_0|>
def OnPrintPage(self, page):
dc = self.GetDC()
self.CalculateScale(dc)
self.CalculateLayout(dc)
dc.SetPen(wx.Pen('black', 0))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
r = wx.Rect(wx.Point(self.x1, self.y1), wx.Point(self.x2, self.y2))
dc.DrawRectangle(r)
dc.SetClippingRegion(r)
line = (page - 1) * self.linesPerPage
x = self.x1 + self.logUnitsMM
y = self.y1 + self.logUnitsMM
while line < page * self.linesPerPage:
dc.DrawText(self.lines[line], x, y)
y += self.lineHeight
line += 1
if line >= len(self.lines):
break
return True
class PrintFrameworkSample(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, size=(640, 480), title=
'Print Framework Sample')
self.CreateStatusBar()
self.tc = wx.TextCtrl(self, -1, '', style=wx.TE_MULTILINE | wx.
TE_DONTWRAP)
self.tc.SetFont(wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE, wx.
FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
filename = os.path.join(os.path.dirname(__file__), 'sample-text.txt')
with open(filename) as fid:
self.tc.SetValue(fid.read())
self.tc.Bind(wx.EVT_SET_FOCUS, self.OnClearSelection)
wx.CallAfter(self.tc.SetInsertionPoint, 0)
menu = wx.Menu()
item = menu.Append(-1, 'Page Setup...\tF5',
'Set up page margins and etc.')
self.Bind(wx.EVT_MENU, self.OnPageSetup, item)
item = menu.Append(-1, 'Print Preview...\tF6',
'View the printout on-screen')
self.Bind(wx.EVT_MENU, self.OnPrintPreview, item)
item = menu.Append(-1, 'Print...\tF7', 'Print the document')
self.Bind(wx.EVT_MENU, self.OnPrint, item)
menu.AppendSeparator()
item = menu.Append(wx.ID_ABOUT, 'About', 'About this application')
self.Bind(wx.EVT_MENU, self.OnAbout, item)
item = menu.Append(wx.ID_EXIT, 'E&xit\tCtrl-Q',
'Close this application')
self.Bind(wx.EVT_MENU, self.OnExit, item)
menubar = wx.MenuBar()
menubar.Append(menu, '&File')
self.SetMenuBar(menubar)
self.pdata = wx.PrintData()
self.pdata.SetPaperId(wx.PAPER_LETTER)
self.pdata.SetOrientation(wx.PORTRAIT)
self.margins = wx.Point(15, 15), wx.Point(15, 15)
def OnExit(self, evt):
self.Close()
def OnAbout(self, evt):
wx.MessageBox(
'Print framework sample application\n\nUsing wxPython %s' % wx.
version(), 'About')
def OnClearSelection(self, evt):
evt.Skip()
wx.CallAfter(self.tc.SetInsertionPoint, self.tc.GetInsertionPoint())
def OnPageSetup(self, evt):
data = wx.PageSetupDialogData()
data.SetPrintData(self.pdata)
data.SetDefaultMinMargins(True)
data.SetMarginTopLeft(self.margins[0])
data.SetMarginBottomRight(self.margins[1])
dlg = wx.PageSetupDialog(self, data)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetPageSetupData()
self.pdata = wx.PrintData(data.GetPrintData())
self.pdata.SetPaperId(data.GetPaperId())
self.margins = data.GetMarginTopLeft(), data.GetMarginBottomRight()
dlg.Destroy()
def OnPrintPreview(self, evt):
data = wx.PrintDialogData(self.pdata)
text = self.tc.GetValue()
printout1 = TextDocPrintout(text, 'title', self.margins)
printout2 = TextDocPrintout(text, 'title', self.margins)
preview = wx.PrintPreview(printout1, printout2, data)
if not preview:
wx.MessageBox('Unable to create PrintPreview!', 'Error')
else:
frame = wx.PreviewFrame(preview, self, 'Print Preview', pos=
self.GetPosition(), size=self.GetSize())
frame.Initialize()
frame.Show()
def OnPrint(self, evt):
data = wx.PrintDialogData(self.pdata)
printer = wx.Printer(data)
text = self.tc.GetValue()
printout = TextDocPrintout(text, 'title', self.margins)
useSetupDialog = True
if not printer.Print(self, printout, useSetupDialog
) and printer.GetLastError() == wx.PRINTER_ERROR:
wx.MessageBox(
"""There was a problem printing.
Perhaps your current printer is not set correctly?"""
, 'Printing Error', wx.OK)
else:
data = printer.GetPrintDialogData()
self.pdata = wx.PrintData(data.GetPrintData())
printout.Destroy()
def OnPrintTest(self, evt):
data = wx.PrintDialogData(self.pdata)
dlg = wx.PrintDialog(self, data)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetPrintDialogData()
print_()
print_('GetFromPage:', data.GetFromPage())
print_('GetToPage:', data.GetToPage())
print_('GetMinPage:', data.GetMinPage())
print_('GetMaxPage:', data.GetMaxPage())
print_('GetNoCopies:', data.GetNoCopies())
print_('GetAllPages:', data.GetAllPages())
print_('GetSelection:', data.GetSelection())
print_('GetCollate:', data.GetCollate())
print_('GetPrintToFile:', data.GetPrintToFile())
self.pdata = wx.PrintData(data.GetPrintData())
print_()
print_('GetPrinterName:', self.pdata.GetPrinterName())
dlg.Destroy()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TextDocPrintout(wx.Printout):
"""
A printout class that is able to print simple text documents.
Does not handle page numbers or titles, and it assumes that no
lines are longer than what will fit within the page width. Those
features are left as an exercise for the reader. ;-)
"""
def __init__(self, text, title, margins):
wx.Printout.__init__(self, title)
self.lines = text.split('\n')
self.margins = margins
def HasPage(self, page):
return page <= self.numPages
def GetPageInfo(self):
return 1, self.numPages, 1, self.numPages
def CalculateScale(self, dc):
ppiPrinterX, ppiPrinterY = self.GetPPIPrinter()
ppiScreenX, ppiScreenY = self.GetPPIScreen()
logScale = float(ppiPrinterX) / float(ppiScreenX)
pw, ph = self.GetPageSizePixels()
dw, dh = dc.GetSize()
scale = logScale * float(dw) / float(pw)
dc.SetUserScale(scale, scale)
self.logUnitsMM = float(ppiPrinterX) / (logScale * 25.4)
def CalculateLayout(self, dc):
topLeft, bottomRight = self.margins
dw, dh = dc.GetSize()
self.x1 = topLeft.x * self.logUnitsMM
self.y1 = topLeft.y * self.logUnitsMM
self.x2 = dc.DeviceToLogicalXRel(dw) - bottomRight.x * self.logUnitsMM
self.y2 = dc.DeviceToLogicalYRel(dh) - bottomRight.y * self.logUnitsMM
self.pageHeight = self.y2 - self.y1 - 2 * self.logUnitsMM
font = wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE, wx.
FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
dc.SetFont(font)
self.lineHeight = dc.GetCharHeight()
self.linesPerPage = int(self.pageHeight / self.lineHeight)
def OnPreparePrinting(self):
dc = self.GetDC()
self.CalculateScale(dc)
self.CalculateLayout(dc)
self.numPages = len(self.lines) / self.linesPerPage
if len(self.lines) % self.linesPerPage != 0:
self.numPages += 1
def OnPrintPage(self, page):
dc = self.GetDC()
self.CalculateScale(dc)
self.CalculateLayout(dc)
dc.SetPen(wx.Pen('black', 0))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
r = wx.Rect(wx.Point(self.x1, self.y1), wx.Point(self.x2, self.y2))
dc.DrawRectangle(r)
dc.SetClippingRegion(r)
line = (page - 1) * self.linesPerPage
x = self.x1 + self.logUnitsMM
y = self.y1 + self.logUnitsMM
while line < page * self.linesPerPage:
dc.DrawText(self.lines[line], x, y)
y += self.lineHeight
line += 1
if line >= len(self.lines):
break
return True
class PrintFrameworkSample(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, size=(640, 480), title=
'Print Framework Sample')
self.CreateStatusBar()
self.tc = wx.TextCtrl(self, -1, '', style=wx.TE_MULTILINE | wx.
TE_DONTWRAP)
self.tc.SetFont(wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE, wx.
FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
filename = os.path.join(os.path.dirname(__file__), 'sample-text.txt')
with open(filename) as fid:
self.tc.SetValue(fid.read())
self.tc.Bind(wx.EVT_SET_FOCUS, self.OnClearSelection)
wx.CallAfter(self.tc.SetInsertionPoint, 0)
menu = wx.Menu()
item = menu.Append(-1, 'Page Setup...\tF5',
'Set up page margins and etc.')
self.Bind(wx.EVT_MENU, self.OnPageSetup, item)
item = menu.Append(-1, 'Print Preview...\tF6',
'View the printout on-screen')
self.Bind(wx.EVT_MENU, self.OnPrintPreview, item)
item = menu.Append(-1, 'Print...\tF7', 'Print the document')
self.Bind(wx.EVT_MENU, self.OnPrint, item)
menu.AppendSeparator()
item = menu.Append(wx.ID_ABOUT, 'About', 'About this application')
self.Bind(wx.EVT_MENU, self.OnAbout, item)
item = menu.Append(wx.ID_EXIT, 'E&xit\tCtrl-Q',
'Close this application')
self.Bind(wx.EVT_MENU, self.OnExit, item)
menubar = wx.MenuBar()
menubar.Append(menu, '&File')
self.SetMenuBar(menubar)
self.pdata = wx.PrintData()
self.pdata.SetPaperId(wx.PAPER_LETTER)
self.pdata.SetOrientation(wx.PORTRAIT)
self.margins = wx.Point(15, 15), wx.Point(15, 15)
def OnExit(self, evt):
self.Close()
def OnAbout(self, evt):
wx.MessageBox(
'Print framework sample application\n\nUsing wxPython %s' % wx.
version(), 'About')
def OnClearSelection(self, evt):
evt.Skip()
wx.CallAfter(self.tc.SetInsertionPoint, self.tc.GetInsertionPoint())
def OnPageSetup(self, evt):
data = wx.PageSetupDialogData()
data.SetPrintData(self.pdata)
data.SetDefaultMinMargins(True)
data.SetMarginTopLeft(self.margins[0])
data.SetMarginBottomRight(self.margins[1])
dlg = wx.PageSetupDialog(self, data)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetPageSetupData()
self.pdata = wx.PrintData(data.GetPrintData())
self.pdata.SetPaperId(data.GetPaperId())
self.margins = data.GetMarginTopLeft(), data.GetMarginBottomRight()
dlg.Destroy()
def OnPrintPreview(self, evt):
data = wx.PrintDialogData(self.pdata)
text = self.tc.GetValue()
printout1 = TextDocPrintout(text, 'title', self.margins)
printout2 = TextDocPrintout(text, 'title', self.margins)
preview = wx.PrintPreview(printout1, printout2, data)
if not preview:
wx.MessageBox('Unable to create PrintPreview!', 'Error')
else:
frame = wx.PreviewFrame(preview, self, 'Print Preview', pos=
self.GetPosition(), size=self.GetSize())
frame.Initialize()
frame.Show()
def OnPrint(self, evt):
data = wx.PrintDialogData(self.pdata)
printer = wx.Printer(data)
text = self.tc.GetValue()
printout = TextDocPrintout(text, 'title', self.margins)
useSetupDialog = True
if not printer.Print(self, printout, useSetupDialog
) and printer.GetLastError() == wx.PRINTER_ERROR:
wx.MessageBox(
"""There was a problem printing.
Perhaps your current printer is not set correctly?"""
, 'Printing Error', wx.OK)
else:
data = printer.GetPrintDialogData()
self.pdata = wx.PrintData(data.GetPrintData())
printout.Destroy()
def OnPrintTest(self, evt):
data = wx.PrintDialogData(self.pdata)
dlg = wx.PrintDialog(self, data)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetPrintDialogData()
print_()
print_('GetFromPage:', data.GetFromPage())
print_('GetToPage:', data.GetToPage())
print_('GetMinPage:', data.GetMinPage())
print_('GetMaxPage:', data.GetMaxPage())
print_('GetNoCopies:', data.GetNoCopies())
print_('GetAllPages:', data.GetAllPages())
print_('GetSelection:', data.GetSelection())
print_('GetCollate:', data.GetCollate())
print_('GetPrintToFile:', data.GetPrintToFile())
self.pdata = wx.PrintData(data.GetPrintData())
print_()
print_('GetPrinterName:', self.pdata.GetPrinterName())
dlg.Destroy()
<|reserved_special_token_0|>
frm.Show()
app.MainLoop()
<|reserved_special_token_1|>
import wx
from six import print_
import os
FONTSIZE = 10
class TextDocPrintout(wx.Printout):
"""
A printout class that is able to print simple text documents.
Does not handle page numbers or titles, and it assumes that no
lines are longer than what will fit within the page width. Those
features are left as an exercise for the reader. ;-)
"""
def __init__(self, text, title, margins):
wx.Printout.__init__(self, title)
self.lines = text.split('\n')
self.margins = margins
def HasPage(self, page):
return page <= self.numPages
def GetPageInfo(self):
return (1, self.numPages, 1, self.numPages)
def CalculateScale(self, dc):
# Scale the DC such that the printout is roughly the same as
# the screen scaling.
ppiPrinterX, ppiPrinterY = self.GetPPIPrinter()
ppiScreenX, ppiScreenY = self.GetPPIScreen()
logScale = float(ppiPrinterX)/float(ppiScreenX)
# Now adjust if the real page size is reduced (such as when
# drawing on a scaled wx.MemoryDC in the Print Preview.) If
# page width == DC width then nothing changes, otherwise we
# scale down for the DC.
pw, ph = self.GetPageSizePixels()
dw, dh = dc.GetSize()
scale = logScale * float(dw)/float(pw)
# Set the DC's scale.
dc.SetUserScale(scale, scale)
# Find the logical units per millimeter (for calculating the
# margins)
self.logUnitsMM = float(ppiPrinterX)/(logScale*25.4)
def CalculateLayout(self, dc):
# Determine the position of the margins and the
# page/line height
topLeft, bottomRight = self.margins
dw, dh = dc.GetSize()
self.x1 = topLeft.x * self.logUnitsMM
self.y1 = topLeft.y * self.logUnitsMM
self.x2 = dc.DeviceToLogicalXRel(dw) - bottomRight.x * self.logUnitsMM
self.y2 = dc.DeviceToLogicalYRel(dh) - bottomRight.y * self.logUnitsMM
# use a 1mm buffer around the inside of the box, and a few
# pixels between each line
self.pageHeight = self.y2 - self.y1 - 2*self.logUnitsMM
font = wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE,
wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
dc.SetFont(font)
self.lineHeight = dc.GetCharHeight()
self.linesPerPage = int(self.pageHeight/self.lineHeight)
def OnPreparePrinting(self):
# calculate the number of pages
dc = self.GetDC()
self.CalculateScale(dc)
self.CalculateLayout(dc)
self.numPages = len(self.lines) / self.linesPerPage
if len(self.lines) % self.linesPerPage != 0:
self.numPages += 1
def OnPrintPage(self, page):
dc = self.GetDC()
self.CalculateScale(dc)
self.CalculateLayout(dc)
# draw a page outline at the margin points
dc.SetPen(wx.Pen("black", 0))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
r = wx.Rect(wx.Point(self.x1, self.y1), wx.Point(self.x2, self.y2))
dc.DrawRectangle(r)
dc.SetClippingRegion(r)
# Draw the text lines for this page
line = (page-1) * self.linesPerPage
x = self.x1 + self.logUnitsMM
y = self.y1 + self.logUnitsMM
while line < (page * self.linesPerPage):
dc.DrawText(self.lines[line], x, y)
y += self.lineHeight
line += 1
if line >= len(self.lines):
break
return True
class PrintFrameworkSample(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, size=(640, 480),
title="Print Framework Sample")
self.CreateStatusBar()
# A text widget to display the doc and let it be edited
self.tc = wx.TextCtrl(self, -1, "",
style=wx.TE_MULTILINE|wx.TE_DONTWRAP)
self.tc.SetFont(wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE,
wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
filename = os.path.join(os.path.dirname(__file__), "sample-text.txt")
with open(filename) as fid:
self.tc.SetValue(fid.read())
self.tc.Bind(wx.EVT_SET_FOCUS, self.OnClearSelection)
wx.CallAfter(self.tc.SetInsertionPoint, 0)
# Create the menu and menubar
menu = wx.Menu()
item = menu.Append(-1, "Page Setup...\tF5",
"Set up page margins and etc.")
self.Bind(wx.EVT_MENU, self.OnPageSetup, item)
item = menu.Append(-1, "Print Preview...\tF6",
"View the printout on-screen")
self.Bind(wx.EVT_MENU, self.OnPrintPreview, item)
item = menu.Append(-1, "Print...\tF7", "Print the document")
self.Bind(wx.EVT_MENU, self.OnPrint, item)
menu.AppendSeparator()
## item = menu.Append(-1, "Test other stuff...\tF9", "")
## self.Bind(wx.EVT_MENU, self.OnPrintTest, item)
## menu.AppendSeparator()
item = menu.Append(wx.ID_ABOUT, "About", "About this application")
self.Bind(wx.EVT_MENU, self.OnAbout, item)
item = menu.Append(wx.ID_EXIT, "E&xit\tCtrl-Q", "Close this application")
self.Bind(wx.EVT_MENU, self.OnExit, item)
menubar = wx.MenuBar()
menubar.Append(menu, "&File")
self.SetMenuBar(menubar)
# initialize the print data and set some default values
self.pdata = wx.PrintData()
self.pdata.SetPaperId(wx.PAPER_LETTER)
self.pdata.SetOrientation(wx.PORTRAIT)
self.margins = (wx.Point(15,15), wx.Point(15,15))
def OnExit(self, evt):
self.Close()
def OnAbout(self, evt):
wx.MessageBox('Print framework sample application\n'
'\n'
'Using wxPython %s' % wx.version(),
'About')
def OnClearSelection(self, evt):
evt.Skip()
wx.CallAfter(self.tc.SetInsertionPoint,
self.tc.GetInsertionPoint())
def OnPageSetup(self, evt):
data = wx.PageSetupDialogData()
data.SetPrintData(self.pdata)
data.SetDefaultMinMargins(True)
data.SetMarginTopLeft(self.margins[0])
data.SetMarginBottomRight(self.margins[1])
dlg = wx.PageSetupDialog(self, data)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetPageSetupData()
self.pdata = wx.PrintData(data.GetPrintData()) # force a copy
self.pdata.SetPaperId(data.GetPaperId())
#print_("paperID %r, paperSize %r" % (self.pdata.GetPaperId(), self.pdata.GetPaperSize()))
self.margins = (data.GetMarginTopLeft(),
data.GetMarginBottomRight())
dlg.Destroy()
def OnPrintPreview(self, evt):
data = wx.PrintDialogData(self.pdata)
text = self.tc.GetValue()
printout1 = TextDocPrintout(text, "title", self.margins)
printout2 = TextDocPrintout(text, "title", self.margins)
preview = wx.PrintPreview(printout1, printout2, data)
if not preview:
wx.MessageBox("Unable to create PrintPreview!", "Error")
else:
# create the preview frame such that it overlays the app frame
frame = wx.PreviewFrame(preview, self, "Print Preview",
pos=self.GetPosition(),
size=self.GetSize())
frame.Initialize()
frame.Show()
def OnPrint(self, evt):
data = wx.PrintDialogData(self.pdata)
printer = wx.Printer(data)
text = self.tc.GetValue()
printout = TextDocPrintout(text, "title", self.margins)
useSetupDialog = True
if not printer.Print(self, printout, useSetupDialog) \
and printer.GetLastError() == wx.PRINTER_ERROR:
wx.MessageBox(
"There was a problem printing.\n"
"Perhaps your current printer is not set correctly?",
"Printing Error", wx.OK)
else:
data = printer.GetPrintDialogData()
self.pdata = wx.PrintData(data.GetPrintData()) # force a copy
printout.Destroy()
def OnPrintTest(self, evt):
data = wx.PrintDialogData(self.pdata)
dlg = wx.PrintDialog(self, data)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetPrintDialogData()
print_()
print_("GetFromPage:", data.GetFromPage())
print_("GetToPage:", data.GetToPage())
print_("GetMinPage:", data.GetMinPage())
print_("GetMaxPage:", data.GetMaxPage())
print_("GetNoCopies:", data.GetNoCopies())
print_("GetAllPages:", data.GetAllPages())
print_("GetSelection:", data.GetSelection())
print_("GetCollate:", data.GetCollate())
print_("GetPrintToFile:", data.GetPrintToFile())
self.pdata = wx.PrintData(data.GetPrintData())
print_()
print_("GetPrinterName:", self.pdata.GetPrinterName())
dlg.Destroy()
app = wx.App()
frm = PrintFrameworkSample()
frm.Show()
app.MainLoop()
|
flexible
|
{
"blob_id": "2790bd80949bafe4e98ab9aca9cf80a6a0f31490",
"index": 6200,
"step-1": "<mask token>\n\n\nclass TextDocPrintout(wx.Printout):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass PrintFrameworkSample(wx.Frame):\n\n def __init__(self):\n wx.Frame.__init__(self, None, size=(640, 480), title=\n 'Print Framework Sample')\n self.CreateStatusBar()\n self.tc = wx.TextCtrl(self, -1, '', style=wx.TE_MULTILINE | wx.\n TE_DONTWRAP)\n self.tc.SetFont(wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE, wx.\n FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))\n filename = os.path.join(os.path.dirname(__file__), 'sample-text.txt')\n with open(filename) as fid:\n self.tc.SetValue(fid.read())\n self.tc.Bind(wx.EVT_SET_FOCUS, self.OnClearSelection)\n wx.CallAfter(self.tc.SetInsertionPoint, 0)\n menu = wx.Menu()\n item = menu.Append(-1, 'Page Setup...\\tF5',\n 'Set up page margins and etc.')\n self.Bind(wx.EVT_MENU, self.OnPageSetup, item)\n item = menu.Append(-1, 'Print Preview...\\tF6',\n 'View the printout on-screen')\n self.Bind(wx.EVT_MENU, self.OnPrintPreview, item)\n item = menu.Append(-1, 'Print...\\tF7', 'Print the document')\n self.Bind(wx.EVT_MENU, self.OnPrint, item)\n menu.AppendSeparator()\n item = menu.Append(wx.ID_ABOUT, 'About', 'About this application')\n self.Bind(wx.EVT_MENU, self.OnAbout, item)\n item = menu.Append(wx.ID_EXIT, 'E&xit\\tCtrl-Q',\n 'Close this application')\n self.Bind(wx.EVT_MENU, self.OnExit, item)\n menubar = wx.MenuBar()\n menubar.Append(menu, '&File')\n self.SetMenuBar(menubar)\n self.pdata = wx.PrintData()\n self.pdata.SetPaperId(wx.PAPER_LETTER)\n self.pdata.SetOrientation(wx.PORTRAIT)\n self.margins = wx.Point(15, 15), wx.Point(15, 15)\n\n def OnExit(self, evt):\n self.Close()\n\n def OnAbout(self, evt):\n wx.MessageBox(\n 'Print framework sample application\\n\\nUsing wxPython %s' % wx.\n version(), 'About')\n\n def OnClearSelection(self, evt):\n evt.Skip()\n wx.CallAfter(self.tc.SetInsertionPoint, self.tc.GetInsertionPoint())\n\n def OnPageSetup(self, evt):\n data = wx.PageSetupDialogData()\n data.SetPrintData(self.pdata)\n data.SetDefaultMinMargins(True)\n data.SetMarginTopLeft(self.margins[0])\n data.SetMarginBottomRight(self.margins[1])\n dlg = wx.PageSetupDialog(self, data)\n if dlg.ShowModal() == wx.ID_OK:\n data = dlg.GetPageSetupData()\n self.pdata = wx.PrintData(data.GetPrintData())\n self.pdata.SetPaperId(data.GetPaperId())\n self.margins = data.GetMarginTopLeft(), data.GetMarginBottomRight()\n dlg.Destroy()\n\n def OnPrintPreview(self, evt):\n data = wx.PrintDialogData(self.pdata)\n text = self.tc.GetValue()\n printout1 = TextDocPrintout(text, 'title', self.margins)\n printout2 = TextDocPrintout(text, 'title', self.margins)\n preview = wx.PrintPreview(printout1, printout2, data)\n if not preview:\n wx.MessageBox('Unable to create PrintPreview!', 'Error')\n else:\n frame = wx.PreviewFrame(preview, self, 'Print Preview', pos=\n self.GetPosition(), size=self.GetSize())\n frame.Initialize()\n frame.Show()\n\n def OnPrint(self, evt):\n data = wx.PrintDialogData(self.pdata)\n printer = wx.Printer(data)\n text = self.tc.GetValue()\n printout = TextDocPrintout(text, 'title', self.margins)\n useSetupDialog = True\n if not printer.Print(self, printout, useSetupDialog\n ) and printer.GetLastError() == wx.PRINTER_ERROR:\n wx.MessageBox(\n \"\"\"There was a problem printing.\nPerhaps your current printer is not set correctly?\"\"\"\n , 'Printing Error', wx.OK)\n else:\n data = printer.GetPrintDialogData()\n self.pdata = wx.PrintData(data.GetPrintData())\n printout.Destroy()\n\n def OnPrintTest(self, evt):\n data = wx.PrintDialogData(self.pdata)\n dlg = wx.PrintDialog(self, data)\n if dlg.ShowModal() == wx.ID_OK:\n data = dlg.GetPrintDialogData()\n print_()\n print_('GetFromPage:', data.GetFromPage())\n print_('GetToPage:', data.GetToPage())\n print_('GetMinPage:', data.GetMinPage())\n print_('GetMaxPage:', data.GetMaxPage())\n print_('GetNoCopies:', data.GetNoCopies())\n print_('GetAllPages:', data.GetAllPages())\n print_('GetSelection:', data.GetSelection())\n print_('GetCollate:', data.GetCollate())\n print_('GetPrintToFile:', data.GetPrintToFile())\n self.pdata = wx.PrintData(data.GetPrintData())\n print_()\n print_('GetPrinterName:', self.pdata.GetPrinterName())\n dlg.Destroy()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TextDocPrintout(wx.Printout):\n <mask token>\n <mask token>\n <mask token>\n\n def GetPageInfo(self):\n return 1, self.numPages, 1, self.numPages\n\n def CalculateScale(self, dc):\n ppiPrinterX, ppiPrinterY = self.GetPPIPrinter()\n ppiScreenX, ppiScreenY = self.GetPPIScreen()\n logScale = float(ppiPrinterX) / float(ppiScreenX)\n pw, ph = self.GetPageSizePixels()\n dw, dh = dc.GetSize()\n scale = logScale * float(dw) / float(pw)\n dc.SetUserScale(scale, scale)\n self.logUnitsMM = float(ppiPrinterX) / (logScale * 25.4)\n\n def CalculateLayout(self, dc):\n topLeft, bottomRight = self.margins\n dw, dh = dc.GetSize()\n self.x1 = topLeft.x * self.logUnitsMM\n self.y1 = topLeft.y * self.logUnitsMM\n self.x2 = dc.DeviceToLogicalXRel(dw) - bottomRight.x * self.logUnitsMM\n self.y2 = dc.DeviceToLogicalYRel(dh) - bottomRight.y * self.logUnitsMM\n self.pageHeight = self.y2 - self.y1 - 2 * self.logUnitsMM\n font = wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE, wx.\n FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)\n dc.SetFont(font)\n self.lineHeight = dc.GetCharHeight()\n self.linesPerPage = int(self.pageHeight / self.lineHeight)\n <mask token>\n\n def OnPrintPage(self, page):\n dc = self.GetDC()\n self.CalculateScale(dc)\n self.CalculateLayout(dc)\n dc.SetPen(wx.Pen('black', 0))\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n r = wx.Rect(wx.Point(self.x1, self.y1), wx.Point(self.x2, self.y2))\n dc.DrawRectangle(r)\n dc.SetClippingRegion(r)\n line = (page - 1) * self.linesPerPage\n x = self.x1 + self.logUnitsMM\n y = self.y1 + self.logUnitsMM\n while line < page * self.linesPerPage:\n dc.DrawText(self.lines[line], x, y)\n y += self.lineHeight\n line += 1\n if line >= len(self.lines):\n break\n return True\n\n\nclass PrintFrameworkSample(wx.Frame):\n\n def __init__(self):\n wx.Frame.__init__(self, None, size=(640, 480), title=\n 'Print Framework Sample')\n self.CreateStatusBar()\n self.tc = wx.TextCtrl(self, -1, '', style=wx.TE_MULTILINE | wx.\n TE_DONTWRAP)\n self.tc.SetFont(wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE, wx.\n FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))\n filename = os.path.join(os.path.dirname(__file__), 'sample-text.txt')\n with open(filename) as fid:\n self.tc.SetValue(fid.read())\n self.tc.Bind(wx.EVT_SET_FOCUS, self.OnClearSelection)\n wx.CallAfter(self.tc.SetInsertionPoint, 0)\n menu = wx.Menu()\n item = menu.Append(-1, 'Page Setup...\\tF5',\n 'Set up page margins and etc.')\n self.Bind(wx.EVT_MENU, self.OnPageSetup, item)\n item = menu.Append(-1, 'Print Preview...\\tF6',\n 'View the printout on-screen')\n self.Bind(wx.EVT_MENU, self.OnPrintPreview, item)\n item = menu.Append(-1, 'Print...\\tF7', 'Print the document')\n self.Bind(wx.EVT_MENU, self.OnPrint, item)\n menu.AppendSeparator()\n item = menu.Append(wx.ID_ABOUT, 'About', 'About this application')\n self.Bind(wx.EVT_MENU, self.OnAbout, item)\n item = menu.Append(wx.ID_EXIT, 'E&xit\\tCtrl-Q',\n 'Close this application')\n self.Bind(wx.EVT_MENU, self.OnExit, item)\n menubar = wx.MenuBar()\n menubar.Append(menu, '&File')\n self.SetMenuBar(menubar)\n self.pdata = wx.PrintData()\n self.pdata.SetPaperId(wx.PAPER_LETTER)\n self.pdata.SetOrientation(wx.PORTRAIT)\n self.margins = wx.Point(15, 15), wx.Point(15, 15)\n\n def OnExit(self, evt):\n self.Close()\n\n def OnAbout(self, evt):\n wx.MessageBox(\n 'Print framework sample application\\n\\nUsing wxPython %s' % wx.\n version(), 'About')\n\n def OnClearSelection(self, evt):\n evt.Skip()\n wx.CallAfter(self.tc.SetInsertionPoint, self.tc.GetInsertionPoint())\n\n def OnPageSetup(self, evt):\n data = wx.PageSetupDialogData()\n data.SetPrintData(self.pdata)\n data.SetDefaultMinMargins(True)\n data.SetMarginTopLeft(self.margins[0])\n data.SetMarginBottomRight(self.margins[1])\n dlg = wx.PageSetupDialog(self, data)\n if dlg.ShowModal() == wx.ID_OK:\n data = dlg.GetPageSetupData()\n self.pdata = wx.PrintData(data.GetPrintData())\n self.pdata.SetPaperId(data.GetPaperId())\n self.margins = data.GetMarginTopLeft(), data.GetMarginBottomRight()\n dlg.Destroy()\n\n def OnPrintPreview(self, evt):\n data = wx.PrintDialogData(self.pdata)\n text = self.tc.GetValue()\n printout1 = TextDocPrintout(text, 'title', self.margins)\n printout2 = TextDocPrintout(text, 'title', self.margins)\n preview = wx.PrintPreview(printout1, printout2, data)\n if not preview:\n wx.MessageBox('Unable to create PrintPreview!', 'Error')\n else:\n frame = wx.PreviewFrame(preview, self, 'Print Preview', pos=\n self.GetPosition(), size=self.GetSize())\n frame.Initialize()\n frame.Show()\n\n def OnPrint(self, evt):\n data = wx.PrintDialogData(self.pdata)\n printer = wx.Printer(data)\n text = self.tc.GetValue()\n printout = TextDocPrintout(text, 'title', self.margins)\n useSetupDialog = True\n if not printer.Print(self, printout, useSetupDialog\n ) and printer.GetLastError() == wx.PRINTER_ERROR:\n wx.MessageBox(\n \"\"\"There was a problem printing.\nPerhaps your current printer is not set correctly?\"\"\"\n , 'Printing Error', wx.OK)\n else:\n data = printer.GetPrintDialogData()\n self.pdata = wx.PrintData(data.GetPrintData())\n printout.Destroy()\n\n def OnPrintTest(self, evt):\n data = wx.PrintDialogData(self.pdata)\n dlg = wx.PrintDialog(self, data)\n if dlg.ShowModal() == wx.ID_OK:\n data = dlg.GetPrintDialogData()\n print_()\n print_('GetFromPage:', data.GetFromPage())\n print_('GetToPage:', data.GetToPage())\n print_('GetMinPage:', data.GetMinPage())\n print_('GetMaxPage:', data.GetMaxPage())\n print_('GetNoCopies:', data.GetNoCopies())\n print_('GetAllPages:', data.GetAllPages())\n print_('GetSelection:', data.GetSelection())\n print_('GetCollate:', data.GetCollate())\n print_('GetPrintToFile:', data.GetPrintToFile())\n self.pdata = wx.PrintData(data.GetPrintData())\n print_()\n print_('GetPrinterName:', self.pdata.GetPrinterName())\n dlg.Destroy()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TextDocPrintout(wx.Printout):\n <mask token>\n\n def __init__(self, text, title, margins):\n wx.Printout.__init__(self, title)\n self.lines = text.split('\\n')\n self.margins = margins\n\n def HasPage(self, page):\n return page <= self.numPages\n\n def GetPageInfo(self):\n return 1, self.numPages, 1, self.numPages\n\n def CalculateScale(self, dc):\n ppiPrinterX, ppiPrinterY = self.GetPPIPrinter()\n ppiScreenX, ppiScreenY = self.GetPPIScreen()\n logScale = float(ppiPrinterX) / float(ppiScreenX)\n pw, ph = self.GetPageSizePixels()\n dw, dh = dc.GetSize()\n scale = logScale * float(dw) / float(pw)\n dc.SetUserScale(scale, scale)\n self.logUnitsMM = float(ppiPrinterX) / (logScale * 25.4)\n\n def CalculateLayout(self, dc):\n topLeft, bottomRight = self.margins\n dw, dh = dc.GetSize()\n self.x1 = topLeft.x * self.logUnitsMM\n self.y1 = topLeft.y * self.logUnitsMM\n self.x2 = dc.DeviceToLogicalXRel(dw) - bottomRight.x * self.logUnitsMM\n self.y2 = dc.DeviceToLogicalYRel(dh) - bottomRight.y * self.logUnitsMM\n self.pageHeight = self.y2 - self.y1 - 2 * self.logUnitsMM\n font = wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE, wx.\n FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)\n dc.SetFont(font)\n self.lineHeight = dc.GetCharHeight()\n self.linesPerPage = int(self.pageHeight / self.lineHeight)\n <mask token>\n\n def OnPrintPage(self, page):\n dc = self.GetDC()\n self.CalculateScale(dc)\n self.CalculateLayout(dc)\n dc.SetPen(wx.Pen('black', 0))\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n r = wx.Rect(wx.Point(self.x1, self.y1), wx.Point(self.x2, self.y2))\n dc.DrawRectangle(r)\n dc.SetClippingRegion(r)\n line = (page - 1) * self.linesPerPage\n x = self.x1 + self.logUnitsMM\n y = self.y1 + self.logUnitsMM\n while line < page * self.linesPerPage:\n dc.DrawText(self.lines[line], x, y)\n y += self.lineHeight\n line += 1\n if line >= len(self.lines):\n break\n return True\n\n\nclass PrintFrameworkSample(wx.Frame):\n\n def __init__(self):\n wx.Frame.__init__(self, None, size=(640, 480), title=\n 'Print Framework Sample')\n self.CreateStatusBar()\n self.tc = wx.TextCtrl(self, -1, '', style=wx.TE_MULTILINE | wx.\n TE_DONTWRAP)\n self.tc.SetFont(wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE, wx.\n FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))\n filename = os.path.join(os.path.dirname(__file__), 'sample-text.txt')\n with open(filename) as fid:\n self.tc.SetValue(fid.read())\n self.tc.Bind(wx.EVT_SET_FOCUS, self.OnClearSelection)\n wx.CallAfter(self.tc.SetInsertionPoint, 0)\n menu = wx.Menu()\n item = menu.Append(-1, 'Page Setup...\\tF5',\n 'Set up page margins and etc.')\n self.Bind(wx.EVT_MENU, self.OnPageSetup, item)\n item = menu.Append(-1, 'Print Preview...\\tF6',\n 'View the printout on-screen')\n self.Bind(wx.EVT_MENU, self.OnPrintPreview, item)\n item = menu.Append(-1, 'Print...\\tF7', 'Print the document')\n self.Bind(wx.EVT_MENU, self.OnPrint, item)\n menu.AppendSeparator()\n item = menu.Append(wx.ID_ABOUT, 'About', 'About this application')\n self.Bind(wx.EVT_MENU, self.OnAbout, item)\n item = menu.Append(wx.ID_EXIT, 'E&xit\\tCtrl-Q',\n 'Close this application')\n self.Bind(wx.EVT_MENU, self.OnExit, item)\n menubar = wx.MenuBar()\n menubar.Append(menu, '&File')\n self.SetMenuBar(menubar)\n self.pdata = wx.PrintData()\n self.pdata.SetPaperId(wx.PAPER_LETTER)\n self.pdata.SetOrientation(wx.PORTRAIT)\n self.margins = wx.Point(15, 15), wx.Point(15, 15)\n\n def OnExit(self, evt):\n self.Close()\n\n def OnAbout(self, evt):\n wx.MessageBox(\n 'Print framework sample application\\n\\nUsing wxPython %s' % wx.\n version(), 'About')\n\n def OnClearSelection(self, evt):\n evt.Skip()\n wx.CallAfter(self.tc.SetInsertionPoint, self.tc.GetInsertionPoint())\n\n def OnPageSetup(self, evt):\n data = wx.PageSetupDialogData()\n data.SetPrintData(self.pdata)\n data.SetDefaultMinMargins(True)\n data.SetMarginTopLeft(self.margins[0])\n data.SetMarginBottomRight(self.margins[1])\n dlg = wx.PageSetupDialog(self, data)\n if dlg.ShowModal() == wx.ID_OK:\n data = dlg.GetPageSetupData()\n self.pdata = wx.PrintData(data.GetPrintData())\n self.pdata.SetPaperId(data.GetPaperId())\n self.margins = data.GetMarginTopLeft(), data.GetMarginBottomRight()\n dlg.Destroy()\n\n def OnPrintPreview(self, evt):\n data = wx.PrintDialogData(self.pdata)\n text = self.tc.GetValue()\n printout1 = TextDocPrintout(text, 'title', self.margins)\n printout2 = TextDocPrintout(text, 'title', self.margins)\n preview = wx.PrintPreview(printout1, printout2, data)\n if not preview:\n wx.MessageBox('Unable to create PrintPreview!', 'Error')\n else:\n frame = wx.PreviewFrame(preview, self, 'Print Preview', pos=\n self.GetPosition(), size=self.GetSize())\n frame.Initialize()\n frame.Show()\n\n def OnPrint(self, evt):\n data = wx.PrintDialogData(self.pdata)\n printer = wx.Printer(data)\n text = self.tc.GetValue()\n printout = TextDocPrintout(text, 'title', self.margins)\n useSetupDialog = True\n if not printer.Print(self, printout, useSetupDialog\n ) and printer.GetLastError() == wx.PRINTER_ERROR:\n wx.MessageBox(\n \"\"\"There was a problem printing.\nPerhaps your current printer is not set correctly?\"\"\"\n , 'Printing Error', wx.OK)\n else:\n data = printer.GetPrintDialogData()\n self.pdata = wx.PrintData(data.GetPrintData())\n printout.Destroy()\n\n def OnPrintTest(self, evt):\n data = wx.PrintDialogData(self.pdata)\n dlg = wx.PrintDialog(self, data)\n if dlg.ShowModal() == wx.ID_OK:\n data = dlg.GetPrintDialogData()\n print_()\n print_('GetFromPage:', data.GetFromPage())\n print_('GetToPage:', data.GetToPage())\n print_('GetMinPage:', data.GetMinPage())\n print_('GetMaxPage:', data.GetMaxPage())\n print_('GetNoCopies:', data.GetNoCopies())\n print_('GetAllPages:', data.GetAllPages())\n print_('GetSelection:', data.GetSelection())\n print_('GetCollate:', data.GetCollate())\n print_('GetPrintToFile:', data.GetPrintToFile())\n self.pdata = wx.PrintData(data.GetPrintData())\n print_()\n print_('GetPrinterName:', self.pdata.GetPrinterName())\n dlg.Destroy()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass TextDocPrintout(wx.Printout):\n \"\"\"\n A printout class that is able to print simple text documents.\n Does not handle page numbers or titles, and it assumes that no\n lines are longer than what will fit within the page width. Those\n features are left as an exercise for the reader. ;-)\n \"\"\"\n\n def __init__(self, text, title, margins):\n wx.Printout.__init__(self, title)\n self.lines = text.split('\\n')\n self.margins = margins\n\n def HasPage(self, page):\n return page <= self.numPages\n\n def GetPageInfo(self):\n return 1, self.numPages, 1, self.numPages\n\n def CalculateScale(self, dc):\n ppiPrinterX, ppiPrinterY = self.GetPPIPrinter()\n ppiScreenX, ppiScreenY = self.GetPPIScreen()\n logScale = float(ppiPrinterX) / float(ppiScreenX)\n pw, ph = self.GetPageSizePixels()\n dw, dh = dc.GetSize()\n scale = logScale * float(dw) / float(pw)\n dc.SetUserScale(scale, scale)\n self.logUnitsMM = float(ppiPrinterX) / (logScale * 25.4)\n\n def CalculateLayout(self, dc):\n topLeft, bottomRight = self.margins\n dw, dh = dc.GetSize()\n self.x1 = topLeft.x * self.logUnitsMM\n self.y1 = topLeft.y * self.logUnitsMM\n self.x2 = dc.DeviceToLogicalXRel(dw) - bottomRight.x * self.logUnitsMM\n self.y2 = dc.DeviceToLogicalYRel(dh) - bottomRight.y * self.logUnitsMM\n self.pageHeight = self.y2 - self.y1 - 2 * self.logUnitsMM\n font = wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE, wx.\n FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)\n dc.SetFont(font)\n self.lineHeight = dc.GetCharHeight()\n self.linesPerPage = int(self.pageHeight / self.lineHeight)\n\n def OnPreparePrinting(self):\n dc = self.GetDC()\n self.CalculateScale(dc)\n self.CalculateLayout(dc)\n self.numPages = len(self.lines) / self.linesPerPage\n if len(self.lines) % self.linesPerPage != 0:\n self.numPages += 1\n\n def OnPrintPage(self, page):\n dc = self.GetDC()\n self.CalculateScale(dc)\n self.CalculateLayout(dc)\n dc.SetPen(wx.Pen('black', 0))\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n r = wx.Rect(wx.Point(self.x1, self.y1), wx.Point(self.x2, self.y2))\n dc.DrawRectangle(r)\n dc.SetClippingRegion(r)\n line = (page - 1) * self.linesPerPage\n x = self.x1 + self.logUnitsMM\n y = self.y1 + self.logUnitsMM\n while line < page * self.linesPerPage:\n dc.DrawText(self.lines[line], x, y)\n y += self.lineHeight\n line += 1\n if line >= len(self.lines):\n break\n return True\n\n\nclass PrintFrameworkSample(wx.Frame):\n\n def __init__(self):\n wx.Frame.__init__(self, None, size=(640, 480), title=\n 'Print Framework Sample')\n self.CreateStatusBar()\n self.tc = wx.TextCtrl(self, -1, '', style=wx.TE_MULTILINE | wx.\n TE_DONTWRAP)\n self.tc.SetFont(wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE, wx.\n FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))\n filename = os.path.join(os.path.dirname(__file__), 'sample-text.txt')\n with open(filename) as fid:\n self.tc.SetValue(fid.read())\n self.tc.Bind(wx.EVT_SET_FOCUS, self.OnClearSelection)\n wx.CallAfter(self.tc.SetInsertionPoint, 0)\n menu = wx.Menu()\n item = menu.Append(-1, 'Page Setup...\\tF5',\n 'Set up page margins and etc.')\n self.Bind(wx.EVT_MENU, self.OnPageSetup, item)\n item = menu.Append(-1, 'Print Preview...\\tF6',\n 'View the printout on-screen')\n self.Bind(wx.EVT_MENU, self.OnPrintPreview, item)\n item = menu.Append(-1, 'Print...\\tF7', 'Print the document')\n self.Bind(wx.EVT_MENU, self.OnPrint, item)\n menu.AppendSeparator()\n item = menu.Append(wx.ID_ABOUT, 'About', 'About this application')\n self.Bind(wx.EVT_MENU, self.OnAbout, item)\n item = menu.Append(wx.ID_EXIT, 'E&xit\\tCtrl-Q',\n 'Close this application')\n self.Bind(wx.EVT_MENU, self.OnExit, item)\n menubar = wx.MenuBar()\n menubar.Append(menu, '&File')\n self.SetMenuBar(menubar)\n self.pdata = wx.PrintData()\n self.pdata.SetPaperId(wx.PAPER_LETTER)\n self.pdata.SetOrientation(wx.PORTRAIT)\n self.margins = wx.Point(15, 15), wx.Point(15, 15)\n\n def OnExit(self, evt):\n self.Close()\n\n def OnAbout(self, evt):\n wx.MessageBox(\n 'Print framework sample application\\n\\nUsing wxPython %s' % wx.\n version(), 'About')\n\n def OnClearSelection(self, evt):\n evt.Skip()\n wx.CallAfter(self.tc.SetInsertionPoint, self.tc.GetInsertionPoint())\n\n def OnPageSetup(self, evt):\n data = wx.PageSetupDialogData()\n data.SetPrintData(self.pdata)\n data.SetDefaultMinMargins(True)\n data.SetMarginTopLeft(self.margins[0])\n data.SetMarginBottomRight(self.margins[1])\n dlg = wx.PageSetupDialog(self, data)\n if dlg.ShowModal() == wx.ID_OK:\n data = dlg.GetPageSetupData()\n self.pdata = wx.PrintData(data.GetPrintData())\n self.pdata.SetPaperId(data.GetPaperId())\n self.margins = data.GetMarginTopLeft(), data.GetMarginBottomRight()\n dlg.Destroy()\n\n def OnPrintPreview(self, evt):\n data = wx.PrintDialogData(self.pdata)\n text = self.tc.GetValue()\n printout1 = TextDocPrintout(text, 'title', self.margins)\n printout2 = TextDocPrintout(text, 'title', self.margins)\n preview = wx.PrintPreview(printout1, printout2, data)\n if not preview:\n wx.MessageBox('Unable to create PrintPreview!', 'Error')\n else:\n frame = wx.PreviewFrame(preview, self, 'Print Preview', pos=\n self.GetPosition(), size=self.GetSize())\n frame.Initialize()\n frame.Show()\n\n def OnPrint(self, evt):\n data = wx.PrintDialogData(self.pdata)\n printer = wx.Printer(data)\n text = self.tc.GetValue()\n printout = TextDocPrintout(text, 'title', self.margins)\n useSetupDialog = True\n if not printer.Print(self, printout, useSetupDialog\n ) and printer.GetLastError() == wx.PRINTER_ERROR:\n wx.MessageBox(\n \"\"\"There was a problem printing.\nPerhaps your current printer is not set correctly?\"\"\"\n , 'Printing Error', wx.OK)\n else:\n data = printer.GetPrintDialogData()\n self.pdata = wx.PrintData(data.GetPrintData())\n printout.Destroy()\n\n def OnPrintTest(self, evt):\n data = wx.PrintDialogData(self.pdata)\n dlg = wx.PrintDialog(self, data)\n if dlg.ShowModal() == wx.ID_OK:\n data = dlg.GetPrintDialogData()\n print_()\n print_('GetFromPage:', data.GetFromPage())\n print_('GetToPage:', data.GetToPage())\n print_('GetMinPage:', data.GetMinPage())\n print_('GetMaxPage:', data.GetMaxPage())\n print_('GetNoCopies:', data.GetNoCopies())\n print_('GetAllPages:', data.GetAllPages())\n print_('GetSelection:', data.GetSelection())\n print_('GetCollate:', data.GetCollate())\n print_('GetPrintToFile:', data.GetPrintToFile())\n self.pdata = wx.PrintData(data.GetPrintData())\n print_()\n print_('GetPrinterName:', self.pdata.GetPrinterName())\n dlg.Destroy()\n\n\n<mask token>\nfrm.Show()\napp.MainLoop()\n",
"step-5": "import wx\nfrom six import print_\nimport os\n\nFONTSIZE = 10\n\nclass TextDocPrintout(wx.Printout):\n \"\"\"\n A printout class that is able to print simple text documents.\n Does not handle page numbers or titles, and it assumes that no\n lines are longer than what will fit within the page width. Those\n features are left as an exercise for the reader. ;-)\n \"\"\"\n def __init__(self, text, title, margins):\n wx.Printout.__init__(self, title)\n self.lines = text.split('\\n')\n self.margins = margins\n\n\n def HasPage(self, page):\n return page <= self.numPages\n\n def GetPageInfo(self):\n return (1, self.numPages, 1, self.numPages)\n\n\n def CalculateScale(self, dc):\n # Scale the DC such that the printout is roughly the same as\n # the screen scaling.\n ppiPrinterX, ppiPrinterY = self.GetPPIPrinter()\n ppiScreenX, ppiScreenY = self.GetPPIScreen()\n logScale = float(ppiPrinterX)/float(ppiScreenX)\n\n # Now adjust if the real page size is reduced (such as when\n # drawing on a scaled wx.MemoryDC in the Print Preview.) If\n # page width == DC width then nothing changes, otherwise we\n # scale down for the DC.\n pw, ph = self.GetPageSizePixels()\n dw, dh = dc.GetSize()\n scale = logScale * float(dw)/float(pw)\n\n # Set the DC's scale.\n dc.SetUserScale(scale, scale)\n\n # Find the logical units per millimeter (for calculating the\n # margins)\n self.logUnitsMM = float(ppiPrinterX)/(logScale*25.4)\n\n\n def CalculateLayout(self, dc):\n # Determine the position of the margins and the\n # page/line height\n topLeft, bottomRight = self.margins\n dw, dh = dc.GetSize()\n self.x1 = topLeft.x * self.logUnitsMM\n self.y1 = topLeft.y * self.logUnitsMM\n self.x2 = dc.DeviceToLogicalXRel(dw) - bottomRight.x * self.logUnitsMM\n self.y2 = dc.DeviceToLogicalYRel(dh) - bottomRight.y * self.logUnitsMM\n\n # use a 1mm buffer around the inside of the box, and a few\n # pixels between each line\n self.pageHeight = self.y2 - self.y1 - 2*self.logUnitsMM\n font = wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE,\n wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)\n dc.SetFont(font)\n self.lineHeight = dc.GetCharHeight()\n self.linesPerPage = int(self.pageHeight/self.lineHeight)\n\n\n def OnPreparePrinting(self):\n # calculate the number of pages\n dc = self.GetDC()\n self.CalculateScale(dc)\n self.CalculateLayout(dc)\n self.numPages = len(self.lines) / self.linesPerPage\n if len(self.lines) % self.linesPerPage != 0:\n self.numPages += 1\n\n\n def OnPrintPage(self, page):\n dc = self.GetDC()\n self.CalculateScale(dc)\n self.CalculateLayout(dc)\n\n # draw a page outline at the margin points\n dc.SetPen(wx.Pen(\"black\", 0))\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n r = wx.Rect(wx.Point(self.x1, self.y1), wx.Point(self.x2, self.y2))\n dc.DrawRectangle(r)\n dc.SetClippingRegion(r)\n\n # Draw the text lines for this page\n line = (page-1) * self.linesPerPage\n x = self.x1 + self.logUnitsMM\n y = self.y1 + self.logUnitsMM\n while line < (page * self.linesPerPage):\n dc.DrawText(self.lines[line], x, y)\n y += self.lineHeight\n line += 1\n if line >= len(self.lines):\n break\n return True\n\n\n\nclass PrintFrameworkSample(wx.Frame):\n def __init__(self):\n wx.Frame.__init__(self, None, size=(640, 480),\n title=\"Print Framework Sample\")\n self.CreateStatusBar()\n\n # A text widget to display the doc and let it be edited\n self.tc = wx.TextCtrl(self, -1, \"\",\n style=wx.TE_MULTILINE|wx.TE_DONTWRAP)\n self.tc.SetFont(wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE,\n wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))\n filename = os.path.join(os.path.dirname(__file__), \"sample-text.txt\")\n with open(filename) as fid:\n self.tc.SetValue(fid.read())\n self.tc.Bind(wx.EVT_SET_FOCUS, self.OnClearSelection)\n wx.CallAfter(self.tc.SetInsertionPoint, 0)\n\n # Create the menu and menubar\n menu = wx.Menu()\n item = menu.Append(-1, \"Page Setup...\\tF5\",\n \"Set up page margins and etc.\")\n self.Bind(wx.EVT_MENU, self.OnPageSetup, item)\n item = menu.Append(-1, \"Print Preview...\\tF6\",\n \"View the printout on-screen\")\n self.Bind(wx.EVT_MENU, self.OnPrintPreview, item)\n item = menu.Append(-1, \"Print...\\tF7\", \"Print the document\")\n self.Bind(wx.EVT_MENU, self.OnPrint, item)\n menu.AppendSeparator()\n## item = menu.Append(-1, \"Test other stuff...\\tF9\", \"\")\n## self.Bind(wx.EVT_MENU, self.OnPrintTest, item)\n## menu.AppendSeparator()\n\n item = menu.Append(wx.ID_ABOUT, \"About\", \"About this application\")\n self.Bind(wx.EVT_MENU, self.OnAbout, item)\n item = menu.Append(wx.ID_EXIT, \"E&xit\\tCtrl-Q\", \"Close this application\")\n self.Bind(wx.EVT_MENU, self.OnExit, item)\n\n menubar = wx.MenuBar()\n menubar.Append(menu, \"&File\")\n self.SetMenuBar(menubar)\n\n # initialize the print data and set some default values\n self.pdata = wx.PrintData()\n self.pdata.SetPaperId(wx.PAPER_LETTER)\n self.pdata.SetOrientation(wx.PORTRAIT)\n self.margins = (wx.Point(15,15), wx.Point(15,15))\n\n\n def OnExit(self, evt):\n self.Close()\n\n\n def OnAbout(self, evt):\n wx.MessageBox('Print framework sample application\\n'\n '\\n'\n 'Using wxPython %s' % wx.version(),\n 'About')\n\n def OnClearSelection(self, evt):\n evt.Skip()\n wx.CallAfter(self.tc.SetInsertionPoint,\n self.tc.GetInsertionPoint())\n\n\n def OnPageSetup(self, evt):\n data = wx.PageSetupDialogData()\n data.SetPrintData(self.pdata)\n\n data.SetDefaultMinMargins(True)\n data.SetMarginTopLeft(self.margins[0])\n data.SetMarginBottomRight(self.margins[1])\n\n dlg = wx.PageSetupDialog(self, data)\n if dlg.ShowModal() == wx.ID_OK:\n data = dlg.GetPageSetupData()\n self.pdata = wx.PrintData(data.GetPrintData()) # force a copy\n self.pdata.SetPaperId(data.GetPaperId())\n #print_(\"paperID %r, paperSize %r\" % (self.pdata.GetPaperId(), self.pdata.GetPaperSize()))\n self.margins = (data.GetMarginTopLeft(),\n data.GetMarginBottomRight())\n dlg.Destroy()\n\n\n def OnPrintPreview(self, evt):\n data = wx.PrintDialogData(self.pdata)\n text = self.tc.GetValue()\n printout1 = TextDocPrintout(text, \"title\", self.margins)\n printout2 = TextDocPrintout(text, \"title\", self.margins)\n preview = wx.PrintPreview(printout1, printout2, data)\n if not preview:\n wx.MessageBox(\"Unable to create PrintPreview!\", \"Error\")\n else:\n # create the preview frame such that it overlays the app frame\n frame = wx.PreviewFrame(preview, self, \"Print Preview\",\n pos=self.GetPosition(),\n size=self.GetSize())\n frame.Initialize()\n frame.Show()\n\n\n def OnPrint(self, evt):\n data = wx.PrintDialogData(self.pdata)\n printer = wx.Printer(data)\n text = self.tc.GetValue()\n printout = TextDocPrintout(text, \"title\", self.margins)\n useSetupDialog = True\n if not printer.Print(self, printout, useSetupDialog) \\\n and printer.GetLastError() == wx.PRINTER_ERROR:\n wx.MessageBox(\n \"There was a problem printing.\\n\"\n \"Perhaps your current printer is not set correctly?\",\n \"Printing Error\", wx.OK)\n else:\n data = printer.GetPrintDialogData()\n self.pdata = wx.PrintData(data.GetPrintData()) # force a copy\n printout.Destroy()\n\n\n def OnPrintTest(self, evt):\n data = wx.PrintDialogData(self.pdata)\n dlg = wx.PrintDialog(self, data)\n if dlg.ShowModal() == wx.ID_OK:\n data = dlg.GetPrintDialogData()\n print_()\n print_(\"GetFromPage:\", data.GetFromPage())\n print_(\"GetToPage:\", data.GetToPage())\n print_(\"GetMinPage:\", data.GetMinPage())\n print_(\"GetMaxPage:\", data.GetMaxPage())\n print_(\"GetNoCopies:\", data.GetNoCopies())\n print_(\"GetAllPages:\", data.GetAllPages())\n print_(\"GetSelection:\", data.GetSelection())\n print_(\"GetCollate:\", data.GetCollate())\n print_(\"GetPrintToFile:\", data.GetPrintToFile())\n\n self.pdata = wx.PrintData(data.GetPrintData())\n print_()\n print_(\"GetPrinterName:\", self.pdata.GetPrinterName())\n\n dlg.Destroy()\n\n\napp = wx.App()\nfrm = PrintFrameworkSample()\nfrm.Show()\napp.MainLoop()\n",
"step-ids": [
10,
14,
16,
19,
22
]
}
|
[
10,
14,
16,
19,
22
] |
import pandas as pd
from pandas import Series, DataFrame
def load_excel(data_path, data_name, episode_Num):
data_name = data_name + str(episode_Num) + '.xlsx'
dataframe = pd.read_excel(data_path + data_name, index_col=0)
return dataframe
def dataframe_to_numpy(dataframe):
numpy_array = dataframe.to_numpy()
return numpy_array
def numpy_to_tensor(numpy_array):
tensor = torch.from_numpy(numpy_array)
return tensor
def transform(data, data_path, data_name, episode_Num):
data = load_excel(data_path, data_name, episode_Num)
data = dataframe_to_numpy(data)
data = numpy_to_tensor(data)
return data
def data_slice(data, num_of_data):
data = data[:, 1:num_of_data + 1]
return data
|
normal
|
{
"blob_id": "b63dc8b9aa2f0593a4a7eb52a722a9c4da6c9e08",
"index": 7804,
"step-1": "<mask token>\n\n\ndef dataframe_to_numpy(dataframe):\n numpy_array = dataframe.to_numpy()\n return numpy_array\n\n\n<mask token>\n\n\ndef data_slice(data, num_of_data):\n data = data[:, 1:num_of_data + 1]\n return data\n",
"step-2": "<mask token>\n\n\ndef load_excel(data_path, data_name, episode_Num):\n data_name = data_name + str(episode_Num) + '.xlsx'\n dataframe = pd.read_excel(data_path + data_name, index_col=0)\n return dataframe\n\n\ndef dataframe_to_numpy(dataframe):\n numpy_array = dataframe.to_numpy()\n return numpy_array\n\n\n<mask token>\n\n\ndef transform(data, data_path, data_name, episode_Num):\n data = load_excel(data_path, data_name, episode_Num)\n data = dataframe_to_numpy(data)\n data = numpy_to_tensor(data)\n return data\n\n\ndef data_slice(data, num_of_data):\n data = data[:, 1:num_of_data + 1]\n return data\n",
"step-3": "<mask token>\n\n\ndef load_excel(data_path, data_name, episode_Num):\n data_name = data_name + str(episode_Num) + '.xlsx'\n dataframe = pd.read_excel(data_path + data_name, index_col=0)\n return dataframe\n\n\ndef dataframe_to_numpy(dataframe):\n numpy_array = dataframe.to_numpy()\n return numpy_array\n\n\ndef numpy_to_tensor(numpy_array):\n tensor = torch.from_numpy(numpy_array)\n return tensor\n\n\ndef transform(data, data_path, data_name, episode_Num):\n data = load_excel(data_path, data_name, episode_Num)\n data = dataframe_to_numpy(data)\n data = numpy_to_tensor(data)\n return data\n\n\ndef data_slice(data, num_of_data):\n data = data[:, 1:num_of_data + 1]\n return data\n",
"step-4": "import pandas as pd\nfrom pandas import Series, DataFrame\n\n\ndef load_excel(data_path, data_name, episode_Num):\n data_name = data_name + str(episode_Num) + '.xlsx'\n dataframe = pd.read_excel(data_path + data_name, index_col=0)\n return dataframe\n\n\ndef dataframe_to_numpy(dataframe):\n numpy_array = dataframe.to_numpy()\n return numpy_array\n\n\ndef numpy_to_tensor(numpy_array):\n tensor = torch.from_numpy(numpy_array)\n return tensor\n\n\ndef transform(data, data_path, data_name, episode_Num):\n data = load_excel(data_path, data_name, episode_Num)\n data = dataframe_to_numpy(data)\n data = numpy_to_tensor(data)\n return data\n\n\ndef data_slice(data, num_of_data):\n data = data[:, 1:num_of_data + 1]\n return data\n",
"step-5": null,
"step-ids": [
2,
4,
5,
6
]
}
|
[
2,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [url('^$', show_articles, name='index'), url('^article/$',
show_article, name='article'), url('^export/$', export_db, name='article')]
<|reserved_special_token_1|>
from django.conf.urls import url
from .views.show import show_article, show_articles, export_db
urlpatterns = [url('^$', show_articles, name='index'), url('^article/$',
show_article, name='article'), url('^export/$', export_db, name='article')]
<|reserved_special_token_1|>
from django.conf.urls import url
from .views.show import show_article, show_articles, export_db
urlpatterns = [
url(r'^$', show_articles, name='index'),
url(r'^article/$', show_article, name='article'),
url(r'^export/$', export_db, name='article'),
]
|
flexible
|
{
"blob_id": "9fdc7c1eb68a92451d41313861164a915b85fcee",
"index": 8988,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('^$', show_articles, name='index'), url('^article/$',\n show_article, name='article'), url('^export/$', export_db, name='article')]\n",
"step-3": "from django.conf.urls import url\nfrom .views.show import show_article, show_articles, export_db\nurlpatterns = [url('^$', show_articles, name='index'), url('^article/$',\n show_article, name='article'), url('^export/$', export_db, name='article')]\n",
"step-4": "from django.conf.urls import url\nfrom .views.show import show_article, show_articles, export_db\n\nurlpatterns = [\n url(r'^$', show_articles, name='index'),\n url(r'^article/$', show_article, name='article'),\n url(r'^export/$', export_db, name='article'),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('START')
time.sleep(30)
print('END')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
conf = SparkConf().setAppName('myTestCopyApp')
sc = SparkContext(conf=conf)
print('START')
time.sleep(30)
print('END')
<|reserved_special_token_1|>
from pyspark import SparkContext, SparkConf
import time
conf = SparkConf().setAppName('myTestCopyApp')
sc = SparkContext(conf=conf)
print('START')
time.sleep(30)
print('END')
<|reserved_special_token_1|>
from pyspark import SparkContext, SparkConf
import time
# Create a basic configuration
conf = SparkConf().setAppName("myTestCopyApp")
# Create a SparkContext using the configuration
sc = SparkContext(conf=conf)
print("START")
time.sleep(30)
print("END")
|
flexible
|
{
"blob_id": "4b773fbf45d15dff27dc7bd51d6636c5f783477b",
"index": 9183,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('START')\ntime.sleep(30)\nprint('END')\n",
"step-3": "<mask token>\nconf = SparkConf().setAppName('myTestCopyApp')\nsc = SparkContext(conf=conf)\nprint('START')\ntime.sleep(30)\nprint('END')\n",
"step-4": "from pyspark import SparkContext, SparkConf\nimport time\nconf = SparkConf().setAppName('myTestCopyApp')\nsc = SparkContext(conf=conf)\nprint('START')\ntime.sleep(30)\nprint('END')\n",
"step-5": "\n\nfrom pyspark import SparkContext, SparkConf\nimport time \n\n# Create a basic configuration\nconf = SparkConf().setAppName(\"myTestCopyApp\")\n\n# Create a SparkContext using the configuration\nsc = SparkContext(conf=conf)\n\nprint(\"START\")\n\ntime.sleep(30)\n\nprint(\"END\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import simplejson as json
json_list = [ "/content/squash-generation/squash/final/Custom.json",
"/content/squash-generation/squash/temp/Custom/final_qa_set.json",
"/content/squash-generation/squash/temp/Custom/generated_questions.json",
"/content/squash-generation/squash/temp/Custom/nbest_predictions.json",
"/content/squash-generation/squash/temp/Custom/null_odds.json",
"/content/squash-generation/squash/temp/Custom/predictions.json" ]
for i in json_list:
with open(i,) as f:
obj = json.load(f)
f.close()
outfile = open(i, "w")
outfile.write(json.dumps(obj, indent=4, sort_keys=True))
outfile.close()
|
normal
|
{
"blob_id": "f37d016dc49820239eb42198ca922e8681a2e0a6",
"index": 6929,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in json_list:\n with open(i) as f:\n obj = json.load(f)\n f.close()\n outfile = open(i, 'w')\n outfile.write(json.dumps(obj, indent=4, sort_keys=True))\n outfile.close()\n",
"step-3": "<mask token>\njson_list = ['/content/squash-generation/squash/final/Custom.json',\n '/content/squash-generation/squash/temp/Custom/final_qa_set.json',\n '/content/squash-generation/squash/temp/Custom/generated_questions.json',\n '/content/squash-generation/squash/temp/Custom/nbest_predictions.json',\n '/content/squash-generation/squash/temp/Custom/null_odds.json',\n '/content/squash-generation/squash/temp/Custom/predictions.json']\nfor i in json_list:\n with open(i) as f:\n obj = json.load(f)\n f.close()\n outfile = open(i, 'w')\n outfile.write(json.dumps(obj, indent=4, sort_keys=True))\n outfile.close()\n",
"step-4": "import simplejson as json\njson_list = ['/content/squash-generation/squash/final/Custom.json',\n '/content/squash-generation/squash/temp/Custom/final_qa_set.json',\n '/content/squash-generation/squash/temp/Custom/generated_questions.json',\n '/content/squash-generation/squash/temp/Custom/nbest_predictions.json',\n '/content/squash-generation/squash/temp/Custom/null_odds.json',\n '/content/squash-generation/squash/temp/Custom/predictions.json']\nfor i in json_list:\n with open(i) as f:\n obj = json.load(f)\n f.close()\n outfile = open(i, 'w')\n outfile.write(json.dumps(obj, indent=4, sort_keys=True))\n outfile.close()\n",
"step-5": "import simplejson as json\n\njson_list = [ \"/content/squash-generation/squash/final/Custom.json\",\n \"/content/squash-generation/squash/temp/Custom/final_qa_set.json\", \n \"/content/squash-generation/squash/temp/Custom/generated_questions.json\",\n \"/content/squash-generation/squash/temp/Custom/nbest_predictions.json\", \n \"/content/squash-generation/squash/temp/Custom/null_odds.json\",\n \"/content/squash-generation/squash/temp/Custom/predictions.json\" ]\n\nfor i in json_list:\n with open(i,) as f:\n obj = json.load(f)\n f.close() \n outfile = open(i, \"w\")\n outfile.write(json.dumps(obj, indent=4, sort_keys=True))\n outfile.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class ReshapedDistribution(TorchDistribution):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, base_dist, sample_shape=torch.Size(),
reinterpreted_batch_ndims=0):
sample_shape = torch.Size(sample_shape)
if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape
):
raise ValueError(
'Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), actual {} vs {}'
.format(reinterpreted_batch_ndims, len(sample_shape +
base_dist.batch_shape)))
self.base_dist = base_dist
self.sample_shape = sample_shape
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
shape = sample_shape + base_dist.batch_shape + base_dist.event_shape
batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.
event_shape)
batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]
super(ReshapedDistribution, self).__init__(batch_shape, event_shape)
def expand_by(self, sample_shape):
base_dist = self.base_dist
sample_shape = torch.Size(sample_shape) + self.sample_shape
reinterpreted_batch_ndims = self.reinterpreted_batch_ndims
return ReshapedDistribution(base_dist, sample_shape,
reinterpreted_batch_ndims)
def independent(self, reinterpreted_batch_ndims=None):
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = len(self.batch_shape)
base_dist = self.base_dist
sample_shape = self.sample_shape
reinterpreted_batch_ndims = (self.reinterpreted_batch_ndims +
reinterpreted_batch_ndims)
return ReshapedDistribution(base_dist, sample_shape,
reinterpreted_batch_ndims)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape + self.sample_shape)
<|reserved_special_token_0|>
def log_prob(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -
self.event_dim])
return sum_rightmost(self.base_dist.log_prob(value), self.
reinterpreted_batch_ndims).expand(shape)
def score_parts(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -
self.event_dim])
log_prob, score_function, entropy_term = self.base_dist.score_parts(
value)
log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims
).expand(shape)
if not isinstance(score_function, numbers.Number):
score_function = sum_rightmost(score_function, self.
reinterpreted_batch_ndims).expand(shape)
if not isinstance(entropy_term, numbers.Number):
entropy_term = sum_rightmost(entropy_term, self.
reinterpreted_batch_ndims).expand(shape)
return ScoreParts(log_prob, score_function, entropy_term)
def enumerate_support(self):
if self.reinterpreted_batch_ndims:
raise NotImplementedError(
'Pyro does not enumerate over cartesian products')
samples = self.base_dist.enumerate_support()
if not self.sample_shape:
return samples
enum_shape, base_shape = samples.shape[:1], samples.shape[1:]
samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape
) + base_shape)
samples = samples.expand(enum_shape + self.sample_shape + base_shape)
return samples
@property
def mean(self):
return self.base_dist.mean.expand(self.batch_shape + self.event_shape)
@property
def variance(self):
return self.base_dist.variance.expand(self.batch_shape + self.
event_shape)
class MaskedDistribution(TorchDistribution):
"""
Masks a distribution by a zero-one tensor that is broadcastable to the
distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.
:param torch.Tensor mask: A zero-one valued float tensor.
"""
arg_constraints = {}
def __init__(self, base_dist, mask):
if broadcast_shape(mask.shape, base_dist.batch_shape
) != base_dist.batch_shape:
raise ValueError(
'Expected mask.shape to be broadcastable to base_dist.batch_shape, actual {} vs {}'
.format(mask.shape, base_dist.batch_shape))
self.base_dist = base_dist
self._mask = mask
super(MaskedDistribution, self).__init__(base_dist.batch_shape,
base_dist.event_shape)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape)
def log_prob(self, value):
return self.base_dist.log_prob(value) * self._mask
def score_parts(self, value):
return self.base_dist.score_parts(value) * self._mask
def enumerate_support(self):
return self.base_dist.enumerate_support()
@property
def mean(self):
return self.base_dist.mean
@property
def variance(self):
return self.base_dist.variance
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TorchDistributionMixin(Distribution):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def shape(self, sample_shape=torch.Size()):
"""
The tensor shape of samples from this distribution.
Samples are of shape::
d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape
:param sample_shape: the size of the iid batch to be drawn from the
distribution.
:type sample_shape: torch.Size
:return: Tensor shape of samples.
:rtype: torch.Size
"""
return sample_shape + self.batch_shape + self.event_shape
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def reshape(self, sample_shape=None, extra_event_dims=None):
raise Exception(
"""
.reshape(sample_shape=s, extra_event_dims=n) was renamed and split into
.expand_by(sample_shape=s).independent(reinterpreted_batch_ndims=n)."""
)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class TorchDistribution(torch.distributions.Distribution,
TorchDistributionMixin):
"""
Base class for PyTorch-compatible distributions with Pyro support.
This should be the base class for almost all new Pyro distributions.
.. note::
Parameters and data should be of type :class:`~torch.Tensor`
and all methods return type :class:`~torch.Tensor` unless
otherwise noted.
**Tensor Shapes**:
TorchDistributions provide a method ``.shape()`` for the tensor shape of samples::
x = d.sample(sample_shape)
assert x.shape == d.shape(sample_shape)
Pyro follows the same distribution shape semantics as PyTorch. It distinguishes
between three different roles for tensor shapes of samples:
- *sample shape* corresponds to the shape of the iid samples drawn from the distribution.
This is taken as an argument by the distribution's `sample` method.
- *batch shape* corresponds to non-identical (independent) parameterizations of
the distribution, inferred from the distribution's parameter shapes. This is
fixed for a distribution instance.
- *event shape* corresponds to the event dimensions of the distribution, which
is fixed for a distribution class. These are collapsed when we try to score
a sample from the distribution via `d.log_prob(x)`.
These shapes are related by the equation::
assert d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape
Distributions provide a vectorized
:meth`~torch.distributions.distribution.Distribution.log_prob` method that
evaluates the log probability density of each event in a batch
independently, returning a tensor of shape
``sample_shape + d.batch_shape``::
x = d.sample(sample_shape)
assert x.shape == d.shape(sample_shape)
log_p = d.log_prob(x)
assert log_p.shape == sample_shape + d.batch_shape
**Implementing New Distributions**:
Derived classes must implement the methods
:meth:`~torch.distributions.distribution.Distribution.sample`
(or :meth:`~torch.distributions.distribution.Distribution.rsample` if
``.has_rsample == True``) and
:meth:`~torch.distributions.distribution.Distribution.log_prob`, and must
implement the properties
:attr:`~torch.distributions.distribution.Distribution.batch_shape`,
and :attr:`~torch.distributions.distribution.Distribution.event_shape`.
Discrete classes may also implement the
:meth:`~torch.distributions.distribution.Distribution.enumerate_support`
method to improve gradient estimates and set
``.has_enumerate_support = True``.
"""
pass
class ReshapedDistribution(TorchDistribution):
"""
Reshapes a distribution by adding ``sample_shape`` to its total shape
and adding ``reinterpreted_batch_ndims`` to its
:attr:`~torch.distributions.distribution.Distribution.event_shape`.
:param torch.Size sample_shape: The size of the iid batch to be drawn from
the distribution.
:param int reinterpreted_batch_ndims: The number of extra event dimensions that will
be considered dependent.
"""
arg_constraints = {}
def __init__(self, base_dist, sample_shape=torch.Size(),
reinterpreted_batch_ndims=0):
sample_shape = torch.Size(sample_shape)
if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape
):
raise ValueError(
'Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), actual {} vs {}'
.format(reinterpreted_batch_ndims, len(sample_shape +
base_dist.batch_shape)))
self.base_dist = base_dist
self.sample_shape = sample_shape
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
shape = sample_shape + base_dist.batch_shape + base_dist.event_shape
batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.
event_shape)
batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]
super(ReshapedDistribution, self).__init__(batch_shape, event_shape)
def expand_by(self, sample_shape):
base_dist = self.base_dist
sample_shape = torch.Size(sample_shape) + self.sample_shape
reinterpreted_batch_ndims = self.reinterpreted_batch_ndims
return ReshapedDistribution(base_dist, sample_shape,
reinterpreted_batch_ndims)
def independent(self, reinterpreted_batch_ndims=None):
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = len(self.batch_shape)
base_dist = self.base_dist
sample_shape = self.sample_shape
reinterpreted_batch_ndims = (self.reinterpreted_batch_ndims +
reinterpreted_batch_ndims)
return ReshapedDistribution(base_dist, sample_shape,
reinterpreted_batch_ndims)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape + self.sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape + self.sample_shape)
def log_prob(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -
self.event_dim])
return sum_rightmost(self.base_dist.log_prob(value), self.
reinterpreted_batch_ndims).expand(shape)
def score_parts(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -
self.event_dim])
log_prob, score_function, entropy_term = self.base_dist.score_parts(
value)
log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims
).expand(shape)
if not isinstance(score_function, numbers.Number):
score_function = sum_rightmost(score_function, self.
reinterpreted_batch_ndims).expand(shape)
if not isinstance(entropy_term, numbers.Number):
entropy_term = sum_rightmost(entropy_term, self.
reinterpreted_batch_ndims).expand(shape)
return ScoreParts(log_prob, score_function, entropy_term)
def enumerate_support(self):
if self.reinterpreted_batch_ndims:
raise NotImplementedError(
'Pyro does not enumerate over cartesian products')
samples = self.base_dist.enumerate_support()
if not self.sample_shape:
return samples
enum_shape, base_shape = samples.shape[:1], samples.shape[1:]
samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape
) + base_shape)
samples = samples.expand(enum_shape + self.sample_shape + base_shape)
return samples
@property
def mean(self):
return self.base_dist.mean.expand(self.batch_shape + self.event_shape)
@property
def variance(self):
return self.base_dist.variance.expand(self.batch_shape + self.
event_shape)
class MaskedDistribution(TorchDistribution):
"""
Masks a distribution by a zero-one tensor that is broadcastable to the
distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.
:param torch.Tensor mask: A zero-one valued float tensor.
"""
arg_constraints = {}
def __init__(self, base_dist, mask):
if broadcast_shape(mask.shape, base_dist.batch_shape
) != base_dist.batch_shape:
raise ValueError(
'Expected mask.shape to be broadcastable to base_dist.batch_shape, actual {} vs {}'
.format(mask.shape, base_dist.batch_shape))
self.base_dist = base_dist
self._mask = mask
super(MaskedDistribution, self).__init__(base_dist.batch_shape,
base_dist.event_shape)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape)
def log_prob(self, value):
return self.base_dist.log_prob(value) * self._mask
def score_parts(self, value):
return self.base_dist.score_parts(value) * self._mask
def enumerate_support(self):
return self.base_dist.enumerate_support()
@property
def mean(self):
return self.base_dist.mean
@property
def variance(self):
return self.base_dist.variance
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TorchDistributionMixin(Distribution):
<|reserved_special_token_0|>
def __call__(self, sample_shape=torch.Size()):
"""
Samples a random value.
This is reparameterized whenever possible, calling
:meth:`~torch.distributions.distribution.Distribution.rsample` for
reparameterized distributions and
:meth:`~torch.distributions.distribution.Distribution.sample` for
non-reparameterized distributions.
:param sample_shape: the size of the iid batch to be drawn from the
distribution.
:type sample_shape: torch.Size
:return: A random value or batch of random values (if parameters are
batched). The shape of the result should be `self.shape()`.
:rtype: torch.Tensor
"""
return self.rsample(sample_shape) if self.has_rsample else self.sample(
sample_shape)
<|reserved_special_token_0|>
def shape(self, sample_shape=torch.Size()):
"""
The tensor shape of samples from this distribution.
Samples are of shape::
d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape
:param sample_shape: the size of the iid batch to be drawn from the
distribution.
:type sample_shape: torch.Size
:return: Tensor shape of samples.
:rtype: torch.Size
"""
return sample_shape + self.batch_shape + self.event_shape
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def reshape(self, sample_shape=None, extra_event_dims=None):
raise Exception(
"""
.reshape(sample_shape=s, extra_event_dims=n) was renamed and split into
.expand_by(sample_shape=s).independent(reinterpreted_batch_ndims=n)."""
)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class TorchDistribution(torch.distributions.Distribution,
TorchDistributionMixin):
"""
Base class for PyTorch-compatible distributions with Pyro support.
This should be the base class for almost all new Pyro distributions.
.. note::
Parameters and data should be of type :class:`~torch.Tensor`
and all methods return type :class:`~torch.Tensor` unless
otherwise noted.
**Tensor Shapes**:
TorchDistributions provide a method ``.shape()`` for the tensor shape of samples::
x = d.sample(sample_shape)
assert x.shape == d.shape(sample_shape)
Pyro follows the same distribution shape semantics as PyTorch. It distinguishes
between three different roles for tensor shapes of samples:
- *sample shape* corresponds to the shape of the iid samples drawn from the distribution.
This is taken as an argument by the distribution's `sample` method.
- *batch shape* corresponds to non-identical (independent) parameterizations of
the distribution, inferred from the distribution's parameter shapes. This is
fixed for a distribution instance.
- *event shape* corresponds to the event dimensions of the distribution, which
is fixed for a distribution class. These are collapsed when we try to score
a sample from the distribution via `d.log_prob(x)`.
These shapes are related by the equation::
assert d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape
Distributions provide a vectorized
:meth`~torch.distributions.distribution.Distribution.log_prob` method that
evaluates the log probability density of each event in a batch
independently, returning a tensor of shape
``sample_shape + d.batch_shape``::
x = d.sample(sample_shape)
assert x.shape == d.shape(sample_shape)
log_p = d.log_prob(x)
assert log_p.shape == sample_shape + d.batch_shape
**Implementing New Distributions**:
Derived classes must implement the methods
:meth:`~torch.distributions.distribution.Distribution.sample`
(or :meth:`~torch.distributions.distribution.Distribution.rsample` if
``.has_rsample == True``) and
:meth:`~torch.distributions.distribution.Distribution.log_prob`, and must
implement the properties
:attr:`~torch.distributions.distribution.Distribution.batch_shape`,
and :attr:`~torch.distributions.distribution.Distribution.event_shape`.
Discrete classes may also implement the
:meth:`~torch.distributions.distribution.Distribution.enumerate_support`
method to improve gradient estimates and set
``.has_enumerate_support = True``.
"""
pass
class ReshapedDistribution(TorchDistribution):
"""
Reshapes a distribution by adding ``sample_shape`` to its total shape
and adding ``reinterpreted_batch_ndims`` to its
:attr:`~torch.distributions.distribution.Distribution.event_shape`.
:param torch.Size sample_shape: The size of the iid batch to be drawn from
the distribution.
:param int reinterpreted_batch_ndims: The number of extra event dimensions that will
be considered dependent.
"""
arg_constraints = {}
def __init__(self, base_dist, sample_shape=torch.Size(),
reinterpreted_batch_ndims=0):
sample_shape = torch.Size(sample_shape)
if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape
):
raise ValueError(
'Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), actual {} vs {}'
.format(reinterpreted_batch_ndims, len(sample_shape +
base_dist.batch_shape)))
self.base_dist = base_dist
self.sample_shape = sample_shape
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
shape = sample_shape + base_dist.batch_shape + base_dist.event_shape
batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.
event_shape)
batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]
super(ReshapedDistribution, self).__init__(batch_shape, event_shape)
def expand_by(self, sample_shape):
base_dist = self.base_dist
sample_shape = torch.Size(sample_shape) + self.sample_shape
reinterpreted_batch_ndims = self.reinterpreted_batch_ndims
return ReshapedDistribution(base_dist, sample_shape,
reinterpreted_batch_ndims)
def independent(self, reinterpreted_batch_ndims=None):
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = len(self.batch_shape)
base_dist = self.base_dist
sample_shape = self.sample_shape
reinterpreted_batch_ndims = (self.reinterpreted_batch_ndims +
reinterpreted_batch_ndims)
return ReshapedDistribution(base_dist, sample_shape,
reinterpreted_batch_ndims)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape + self.sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape + self.sample_shape)
def log_prob(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -
self.event_dim])
return sum_rightmost(self.base_dist.log_prob(value), self.
reinterpreted_batch_ndims).expand(shape)
def score_parts(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -
self.event_dim])
log_prob, score_function, entropy_term = self.base_dist.score_parts(
value)
log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims
).expand(shape)
if not isinstance(score_function, numbers.Number):
score_function = sum_rightmost(score_function, self.
reinterpreted_batch_ndims).expand(shape)
if not isinstance(entropy_term, numbers.Number):
entropy_term = sum_rightmost(entropy_term, self.
reinterpreted_batch_ndims).expand(shape)
return ScoreParts(log_prob, score_function, entropy_term)
def enumerate_support(self):
if self.reinterpreted_batch_ndims:
raise NotImplementedError(
'Pyro does not enumerate over cartesian products')
samples = self.base_dist.enumerate_support()
if not self.sample_shape:
return samples
enum_shape, base_shape = samples.shape[:1], samples.shape[1:]
samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape
) + base_shape)
samples = samples.expand(enum_shape + self.sample_shape + base_shape)
return samples
@property
def mean(self):
return self.base_dist.mean.expand(self.batch_shape + self.event_shape)
@property
def variance(self):
return self.base_dist.variance.expand(self.batch_shape + self.
event_shape)
class MaskedDistribution(TorchDistribution):
"""
Masks a distribution by a zero-one tensor that is broadcastable to the
distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.
:param torch.Tensor mask: A zero-one valued float tensor.
"""
arg_constraints = {}
def __init__(self, base_dist, mask):
if broadcast_shape(mask.shape, base_dist.batch_shape
) != base_dist.batch_shape:
raise ValueError(
'Expected mask.shape to be broadcastable to base_dist.batch_shape, actual {} vs {}'
.format(mask.shape, base_dist.batch_shape))
self.base_dist = base_dist
self._mask = mask
super(MaskedDistribution, self).__init__(base_dist.batch_shape,
base_dist.event_shape)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape)
def log_prob(self, value):
return self.base_dist.log_prob(value) * self._mask
def score_parts(self, value):
return self.base_dist.score_parts(value) * self._mask
def enumerate_support(self):
return self.base_dist.enumerate_support()
@property
def mean(self):
return self.base_dist.mean
@property
def variance(self):
return self.base_dist.variance
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TorchDistributionMixin(Distribution):
<|reserved_special_token_0|>
def __call__(self, sample_shape=torch.Size()):
"""
Samples a random value.
This is reparameterized whenever possible, calling
:meth:`~torch.distributions.distribution.Distribution.rsample` for
reparameterized distributions and
:meth:`~torch.distributions.distribution.Distribution.sample` for
non-reparameterized distributions.
:param sample_shape: the size of the iid batch to be drawn from the
distribution.
:type sample_shape: torch.Size
:return: A random value or batch of random values (if parameters are
batched). The shape of the result should be `self.shape()`.
:rtype: torch.Tensor
"""
return self.rsample(sample_shape) if self.has_rsample else self.sample(
sample_shape)
<|reserved_special_token_0|>
def shape(self, sample_shape=torch.Size()):
"""
The tensor shape of samples from this distribution.
Samples are of shape::
d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape
:param sample_shape: the size of the iid batch to be drawn from the
distribution.
:type sample_shape: torch.Size
:return: Tensor shape of samples.
:rtype: torch.Size
"""
return sample_shape + self.batch_shape + self.event_shape
def expand(self, batch_shape):
"""
Expands a distribution to a desired
:attr:`~torch.distributions.distribution.Distribution.batch_shape`.
Note that this is more general than :meth:`expand_by` because
``d.expand_by(sample_shape)`` can be reduced to
``d.expand(sample_shape + d.batch_shape)``.
:param torch.Size batch_shape: The target ``batch_shape``. This must
compatible with ``self.batch_shape`` similar to the requirements
of :func:`torch.Tensor.expand`: the target ``batch_shape`` must
be at least as long as ``self.batch_shape``, and for each
non-singleton dim of ``self.batch_shape``, ``batch_shape`` must
either agree or be set to ``-1``.
:return: An expanded version of this distribution.
:rtype: :class:`ReshapedDistribution`
"""
batch_shape = list(batch_shape)
if len(batch_shape) < len(self.batch_shape):
raise ValueError(
'Expected len(batch_shape) >= len(self.batch_shape), actual {} vs {}'
.format(len(batch_shape), len(self.batch_shape)))
for dim in range(-1, -1 - len(self.batch_shape), -1):
if batch_shape[dim] == -1:
batch_shape[dim] = self.batch_shape[dim]
elif batch_shape[dim] != self.batch_shape[dim]:
if self.batch_shape[dim] != 1:
raise ValueError(
'Cannot broadcast dim {} of size {} to size {}'.
format(dim, self.batch_shape[dim], batch_shape[dim]))
else:
raise NotImplementedError(
'https://github.com/uber/pyro/issues/1119')
sample_shape = batch_shape[:len(batch_shape) - len(self.batch_shape)]
return self.expand_by(sample_shape)
<|reserved_special_token_0|>
def reshape(self, sample_shape=None, extra_event_dims=None):
raise Exception(
"""
.reshape(sample_shape=s, extra_event_dims=n) was renamed and split into
.expand_by(sample_shape=s).independent(reinterpreted_batch_ndims=n)."""
)
def independent(self, reinterpreted_batch_ndims=None):
"""
Reinterprets the ``n`` rightmost dimensions of this distributions
:attr:`~torch.distributions.distribution.Distribution.batch_shape`
as event dims, adding them to the left side of
:attr:`~torch.distributions.distribution.Distribution.event_shape`.
Example::
>>> [d1.batch_shape, d1.event_shape]
[torch.Size((2, 3)), torch.Size((4, 5))]
>>> d2 = d1.independent(1)
>>> [d2.batch_shape, d2.event_shape]
[torch.Size((2,)), torch.Size((3, 4, 5))]
>>> d3 = d1.independent(2)
>>> [d3.batch_shape, d3.event_shape]
[torch.Size(()), torch.Size((2, 3, 4, 5))]
:param int reinterpreted_batch_ndims: The number of batch dimensions
to reinterpret as event dimensions.
:return: A reshaped version of this distribution.
:rtype: :class:`ReshapedDistribution`
"""
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = len(self.batch_shape)
return ReshapedDistribution(self, reinterpreted_batch_ndims=
reinterpreted_batch_ndims)
def mask(self, mask):
"""
Masks a distribution by a zero-one tensor that is broadcastable to the
distributions :attr:`~torch.distributions.distribution.Distribution.batch_shape`.
:param torch.Tensor mask: A zero-one valued float tensor.
:return: A masked copy of this distribution.
:rtype: :class:`MaskedDistribution`
"""
return MaskedDistribution(self, mask)
class TorchDistribution(torch.distributions.Distribution,
TorchDistributionMixin):
"""
Base class for PyTorch-compatible distributions with Pyro support.
This should be the base class for almost all new Pyro distributions.
.. note::
Parameters and data should be of type :class:`~torch.Tensor`
and all methods return type :class:`~torch.Tensor` unless
otherwise noted.
**Tensor Shapes**:
TorchDistributions provide a method ``.shape()`` for the tensor shape of samples::
x = d.sample(sample_shape)
assert x.shape == d.shape(sample_shape)
Pyro follows the same distribution shape semantics as PyTorch. It distinguishes
between three different roles for tensor shapes of samples:
- *sample shape* corresponds to the shape of the iid samples drawn from the distribution.
This is taken as an argument by the distribution's `sample` method.
- *batch shape* corresponds to non-identical (independent) parameterizations of
the distribution, inferred from the distribution's parameter shapes. This is
fixed for a distribution instance.
- *event shape* corresponds to the event dimensions of the distribution, which
is fixed for a distribution class. These are collapsed when we try to score
a sample from the distribution via `d.log_prob(x)`.
These shapes are related by the equation::
assert d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape
Distributions provide a vectorized
:meth`~torch.distributions.distribution.Distribution.log_prob` method that
evaluates the log probability density of each event in a batch
independently, returning a tensor of shape
``sample_shape + d.batch_shape``::
x = d.sample(sample_shape)
assert x.shape == d.shape(sample_shape)
log_p = d.log_prob(x)
assert log_p.shape == sample_shape + d.batch_shape
**Implementing New Distributions**:
Derived classes must implement the methods
:meth:`~torch.distributions.distribution.Distribution.sample`
(or :meth:`~torch.distributions.distribution.Distribution.rsample` if
``.has_rsample == True``) and
:meth:`~torch.distributions.distribution.Distribution.log_prob`, and must
implement the properties
:attr:`~torch.distributions.distribution.Distribution.batch_shape`,
and :attr:`~torch.distributions.distribution.Distribution.event_shape`.
Discrete classes may also implement the
:meth:`~torch.distributions.distribution.Distribution.enumerate_support`
method to improve gradient estimates and set
``.has_enumerate_support = True``.
"""
pass
class ReshapedDistribution(TorchDistribution):
"""
Reshapes a distribution by adding ``sample_shape`` to its total shape
and adding ``reinterpreted_batch_ndims`` to its
:attr:`~torch.distributions.distribution.Distribution.event_shape`.
:param torch.Size sample_shape: The size of the iid batch to be drawn from
the distribution.
:param int reinterpreted_batch_ndims: The number of extra event dimensions that will
be considered dependent.
"""
arg_constraints = {}
def __init__(self, base_dist, sample_shape=torch.Size(),
reinterpreted_batch_ndims=0):
sample_shape = torch.Size(sample_shape)
if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape
):
raise ValueError(
'Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), actual {} vs {}'
.format(reinterpreted_batch_ndims, len(sample_shape +
base_dist.batch_shape)))
self.base_dist = base_dist
self.sample_shape = sample_shape
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
shape = sample_shape + base_dist.batch_shape + base_dist.event_shape
batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.
event_shape)
batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]
super(ReshapedDistribution, self).__init__(batch_shape, event_shape)
def expand_by(self, sample_shape):
base_dist = self.base_dist
sample_shape = torch.Size(sample_shape) + self.sample_shape
reinterpreted_batch_ndims = self.reinterpreted_batch_ndims
return ReshapedDistribution(base_dist, sample_shape,
reinterpreted_batch_ndims)
def independent(self, reinterpreted_batch_ndims=None):
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = len(self.batch_shape)
base_dist = self.base_dist
sample_shape = self.sample_shape
reinterpreted_batch_ndims = (self.reinterpreted_batch_ndims +
reinterpreted_batch_ndims)
return ReshapedDistribution(base_dist, sample_shape,
reinterpreted_batch_ndims)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape + self.sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape + self.sample_shape)
def log_prob(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -
self.event_dim])
return sum_rightmost(self.base_dist.log_prob(value), self.
reinterpreted_batch_ndims).expand(shape)
def score_parts(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -
self.event_dim])
log_prob, score_function, entropy_term = self.base_dist.score_parts(
value)
log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims
).expand(shape)
if not isinstance(score_function, numbers.Number):
score_function = sum_rightmost(score_function, self.
reinterpreted_batch_ndims).expand(shape)
if not isinstance(entropy_term, numbers.Number):
entropy_term = sum_rightmost(entropy_term, self.
reinterpreted_batch_ndims).expand(shape)
return ScoreParts(log_prob, score_function, entropy_term)
def enumerate_support(self):
if self.reinterpreted_batch_ndims:
raise NotImplementedError(
'Pyro does not enumerate over cartesian products')
samples = self.base_dist.enumerate_support()
if not self.sample_shape:
return samples
enum_shape, base_shape = samples.shape[:1], samples.shape[1:]
samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape
) + base_shape)
samples = samples.expand(enum_shape + self.sample_shape + base_shape)
return samples
@property
def mean(self):
return self.base_dist.mean.expand(self.batch_shape + self.event_shape)
@property
def variance(self):
return self.base_dist.variance.expand(self.batch_shape + self.
event_shape)
class MaskedDistribution(TorchDistribution):
"""
Masks a distribution by a zero-one tensor that is broadcastable to the
distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.
:param torch.Tensor mask: A zero-one valued float tensor.
"""
arg_constraints = {}
def __init__(self, base_dist, mask):
if broadcast_shape(mask.shape, base_dist.batch_shape
) != base_dist.batch_shape:
raise ValueError(
'Expected mask.shape to be broadcastable to base_dist.batch_shape, actual {} vs {}'
.format(mask.shape, base_dist.batch_shape))
self.base_dist = base_dist
self._mask = mask
super(MaskedDistribution, self).__init__(base_dist.batch_shape,
base_dist.event_shape)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape)
def log_prob(self, value):
return self.base_dist.log_prob(value) * self._mask
def score_parts(self, value):
return self.base_dist.score_parts(value) * self._mask
def enumerate_support(self):
return self.base_dist.enumerate_support()
@property
def mean(self):
return self.base_dist.mean
@property
def variance(self):
return self.base_dist.variance
<|reserved_special_token_1|>
from __future__ import absolute_import, division, print_function
import numbers
import torch
from torch.distributions import constraints
from pyro.distributions.distribution import Distribution
from pyro.distributions.score_parts import ScoreParts
from pyro.distributions.util import broadcast_shape, sum_rightmost
class TorchDistributionMixin(Distribution):
"""
Mixin to provide Pyro compatibility for PyTorch distributions.
You should instead use `TorchDistribution` for new distribution classes.
This is mainly useful for wrapping existing PyTorch distributions for
use in Pyro. Derived classes must first inherit from
:class:`torch.distributions.distribution.Distribution` and then inherit
from :class:`TorchDistributionMixin`.
"""
def __call__(self, sample_shape=torch.Size()):
"""
Samples a random value.
This is reparameterized whenever possible, calling
:meth:`~torch.distributions.distribution.Distribution.rsample` for
reparameterized distributions and
:meth:`~torch.distributions.distribution.Distribution.sample` for
non-reparameterized distributions.
:param sample_shape: the size of the iid batch to be drawn from the
distribution.
:type sample_shape: torch.Size
:return: A random value or batch of random values (if parameters are
batched). The shape of the result should be `self.shape()`.
:rtype: torch.Tensor
"""
return self.rsample(sample_shape) if self.has_rsample else self.sample(sample_shape)
@property
def event_dim(self):
"""
:return: Number of dimensions of individual events.
:rtype: int
"""
return len(self.event_shape)
def shape(self, sample_shape=torch.Size()):
"""
The tensor shape of samples from this distribution.
Samples are of shape::
d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape
:param sample_shape: the size of the iid batch to be drawn from the
distribution.
:type sample_shape: torch.Size
:return: Tensor shape of samples.
:rtype: torch.Size
"""
return sample_shape + self.batch_shape + self.event_shape
def expand(self, batch_shape):
"""
Expands a distribution to a desired
:attr:`~torch.distributions.distribution.Distribution.batch_shape`.
Note that this is more general than :meth:`expand_by` because
``d.expand_by(sample_shape)`` can be reduced to
``d.expand(sample_shape + d.batch_shape)``.
:param torch.Size batch_shape: The target ``batch_shape``. This must
compatible with ``self.batch_shape`` similar to the requirements
of :func:`torch.Tensor.expand`: the target ``batch_shape`` must
be at least as long as ``self.batch_shape``, and for each
non-singleton dim of ``self.batch_shape``, ``batch_shape`` must
either agree or be set to ``-1``.
:return: An expanded version of this distribution.
:rtype: :class:`ReshapedDistribution`
"""
batch_shape = list(batch_shape)
if len(batch_shape) < len(self.batch_shape):
raise ValueError("Expected len(batch_shape) >= len(self.batch_shape), "
"actual {} vs {}".format(len(batch_shape), len(self.batch_shape)))
# check sizes of existing dims
for dim in range(-1, -1 - len(self.batch_shape), -1):
if batch_shape[dim] == -1:
batch_shape[dim] = self.batch_shape[dim]
elif batch_shape[dim] != self.batch_shape[dim]:
if self.batch_shape[dim] != 1:
raise ValueError("Cannot broadcast dim {} of size {} to size {}".format(
dim, self.batch_shape[dim], batch_shape[dim]))
else:
raise NotImplementedError("https://github.com/uber/pyro/issues/1119")
sample_shape = batch_shape[:len(batch_shape) - len(self.batch_shape)]
return self.expand_by(sample_shape)
def expand_by(self, sample_shape):
"""
Expands a distribution by adding ``sample_shape`` to the left side of
its :attr:`~torch.distributions.distribution.Distribution.batch_shape`.
To expand internal dims of ``self.batch_shape`` from 1 to something
larger, use :meth:`expand` instead.
:param torch.Size sample_shape: The size of the iid batch to be drawn
from the distribution.
:return: An expanded version of this distribution.
:rtype: :class:`ReshapedDistribution`
"""
return ReshapedDistribution(self, sample_shape=sample_shape)
def reshape(self, sample_shape=None, extra_event_dims=None):
raise Exception('''
.reshape(sample_shape=s, extra_event_dims=n) was renamed and split into
.expand_by(sample_shape=s).independent(reinterpreted_batch_ndims=n).''')
def independent(self, reinterpreted_batch_ndims=None):
"""
Reinterprets the ``n`` rightmost dimensions of this distributions
:attr:`~torch.distributions.distribution.Distribution.batch_shape`
as event dims, adding them to the left side of
:attr:`~torch.distributions.distribution.Distribution.event_shape`.
Example::
>>> [d1.batch_shape, d1.event_shape]
[torch.Size((2, 3)), torch.Size((4, 5))]
>>> d2 = d1.independent(1)
>>> [d2.batch_shape, d2.event_shape]
[torch.Size((2,)), torch.Size((3, 4, 5))]
>>> d3 = d1.independent(2)
>>> [d3.batch_shape, d3.event_shape]
[torch.Size(()), torch.Size((2, 3, 4, 5))]
:param int reinterpreted_batch_ndims: The number of batch dimensions
to reinterpret as event dimensions.
:return: A reshaped version of this distribution.
:rtype: :class:`ReshapedDistribution`
"""
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = len(self.batch_shape)
# TODO return pyro.distributions.torch.Independent(self, reinterpreted_batch_ndims)
return ReshapedDistribution(self, reinterpreted_batch_ndims=reinterpreted_batch_ndims)
def mask(self, mask):
"""
Masks a distribution by a zero-one tensor that is broadcastable to the
distributions :attr:`~torch.distributions.distribution.Distribution.batch_shape`.
:param torch.Tensor mask: A zero-one valued float tensor.
:return: A masked copy of this distribution.
:rtype: :class:`MaskedDistribution`
"""
return MaskedDistribution(self, mask)
class TorchDistribution(torch.distributions.Distribution, TorchDistributionMixin):
"""
Base class for PyTorch-compatible distributions with Pyro support.
This should be the base class for almost all new Pyro distributions.
.. note::
Parameters and data should be of type :class:`~torch.Tensor`
and all methods return type :class:`~torch.Tensor` unless
otherwise noted.
**Tensor Shapes**:
TorchDistributions provide a method ``.shape()`` for the tensor shape of samples::
x = d.sample(sample_shape)
assert x.shape == d.shape(sample_shape)
Pyro follows the same distribution shape semantics as PyTorch. It distinguishes
between three different roles for tensor shapes of samples:
- *sample shape* corresponds to the shape of the iid samples drawn from the distribution.
This is taken as an argument by the distribution's `sample` method.
- *batch shape* corresponds to non-identical (independent) parameterizations of
the distribution, inferred from the distribution's parameter shapes. This is
fixed for a distribution instance.
- *event shape* corresponds to the event dimensions of the distribution, which
is fixed for a distribution class. These are collapsed when we try to score
a sample from the distribution via `d.log_prob(x)`.
These shapes are related by the equation::
assert d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape
Distributions provide a vectorized
:meth`~torch.distributions.distribution.Distribution.log_prob` method that
evaluates the log probability density of each event in a batch
independently, returning a tensor of shape
``sample_shape + d.batch_shape``::
x = d.sample(sample_shape)
assert x.shape == d.shape(sample_shape)
log_p = d.log_prob(x)
assert log_p.shape == sample_shape + d.batch_shape
**Implementing New Distributions**:
Derived classes must implement the methods
:meth:`~torch.distributions.distribution.Distribution.sample`
(or :meth:`~torch.distributions.distribution.Distribution.rsample` if
``.has_rsample == True``) and
:meth:`~torch.distributions.distribution.Distribution.log_prob`, and must
implement the properties
:attr:`~torch.distributions.distribution.Distribution.batch_shape`,
and :attr:`~torch.distributions.distribution.Distribution.event_shape`.
Discrete classes may also implement the
:meth:`~torch.distributions.distribution.Distribution.enumerate_support`
method to improve gradient estimates and set
``.has_enumerate_support = True``.
"""
pass
class ReshapedDistribution(TorchDistribution):
"""
Reshapes a distribution by adding ``sample_shape`` to its total shape
and adding ``reinterpreted_batch_ndims`` to its
:attr:`~torch.distributions.distribution.Distribution.event_shape`.
:param torch.Size sample_shape: The size of the iid batch to be drawn from
the distribution.
:param int reinterpreted_batch_ndims: The number of extra event dimensions that will
be considered dependent.
"""
arg_constraints = {}
def __init__(self, base_dist, sample_shape=torch.Size(), reinterpreted_batch_ndims=0):
sample_shape = torch.Size(sample_shape)
if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape):
raise ValueError('Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), '
'actual {} vs {}'.format(reinterpreted_batch_ndims,
len(sample_shape + base_dist.batch_shape)))
self.base_dist = base_dist
self.sample_shape = sample_shape
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
shape = sample_shape + base_dist.batch_shape + base_dist.event_shape
batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.event_shape)
batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]
super(ReshapedDistribution, self).__init__(batch_shape, event_shape)
def expand_by(self, sample_shape):
base_dist = self.base_dist
sample_shape = torch.Size(sample_shape) + self.sample_shape
reinterpreted_batch_ndims = self.reinterpreted_batch_ndims
return ReshapedDistribution(base_dist, sample_shape, reinterpreted_batch_ndims)
def independent(self, reinterpreted_batch_ndims=None):
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = len(self.batch_shape)
base_dist = self.base_dist
sample_shape = self.sample_shape
reinterpreted_batch_ndims = self.reinterpreted_batch_ndims + reinterpreted_batch_ndims
return ReshapedDistribution(base_dist, sample_shape, reinterpreted_batch_ndims)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape + self.sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape + self.sample_shape)
def log_prob(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() - self.event_dim])
return sum_rightmost(self.base_dist.log_prob(value), self.reinterpreted_batch_ndims).expand(shape)
def score_parts(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() - self.event_dim])
log_prob, score_function, entropy_term = self.base_dist.score_parts(value)
log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims).expand(shape)
if not isinstance(score_function, numbers.Number):
score_function = sum_rightmost(score_function, self.reinterpreted_batch_ndims).expand(shape)
if not isinstance(entropy_term, numbers.Number):
entropy_term = sum_rightmost(entropy_term, self.reinterpreted_batch_ndims).expand(shape)
return ScoreParts(log_prob, score_function, entropy_term)
def enumerate_support(self):
if self.reinterpreted_batch_ndims:
raise NotImplementedError("Pyro does not enumerate over cartesian products")
samples = self.base_dist.enumerate_support()
if not self.sample_shape:
return samples
# Shift enumeration dim to correct location.
enum_shape, base_shape = samples.shape[:1], samples.shape[1:]
samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape) + base_shape)
samples = samples.expand(enum_shape + self.sample_shape + base_shape)
return samples
@property
def mean(self):
return self.base_dist.mean.expand(self.batch_shape + self.event_shape)
@property
def variance(self):
return self.base_dist.variance.expand(self.batch_shape + self.event_shape)
class MaskedDistribution(TorchDistribution):
"""
Masks a distribution by a zero-one tensor that is broadcastable to the
distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.
:param torch.Tensor mask: A zero-one valued float tensor.
"""
arg_constraints = {}
def __init__(self, base_dist, mask):
if broadcast_shape(mask.shape, base_dist.batch_shape) != base_dist.batch_shape:
raise ValueError("Expected mask.shape to be broadcastable to base_dist.batch_shape, "
"actual {} vs {}".format(mask.shape, base_dist.batch_shape))
self.base_dist = base_dist
self._mask = mask
super(MaskedDistribution, self).__init__(base_dist.batch_shape, base_dist.event_shape)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape)
def log_prob(self, value):
return self.base_dist.log_prob(value) * self._mask
def score_parts(self, value):
return self.base_dist.score_parts(value) * self._mask
def enumerate_support(self):
return self.base_dist.enumerate_support()
@property
def mean(self):
return self.base_dist.mean
@property
def variance(self):
return self.base_dist.variance
|
flexible
|
{
"blob_id": "0f0ea6f07f9a082042ed9aff7a95d372c32b5a13",
"index": 1897,
"step-1": "<mask token>\n\n\nclass ReshapedDistribution(TorchDistribution):\n <mask token>\n <mask token>\n\n def __init__(self, base_dist, sample_shape=torch.Size(),\n reinterpreted_batch_ndims=0):\n sample_shape = torch.Size(sample_shape)\n if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape\n ):\n raise ValueError(\n 'Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), actual {} vs {}'\n .format(reinterpreted_batch_ndims, len(sample_shape +\n base_dist.batch_shape)))\n self.base_dist = base_dist\n self.sample_shape = sample_shape\n self.reinterpreted_batch_ndims = reinterpreted_batch_ndims\n shape = sample_shape + base_dist.batch_shape + base_dist.event_shape\n batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.\n event_shape)\n batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]\n super(ReshapedDistribution, self).__init__(batch_shape, event_shape)\n\n def expand_by(self, sample_shape):\n base_dist = self.base_dist\n sample_shape = torch.Size(sample_shape) + self.sample_shape\n reinterpreted_batch_ndims = self.reinterpreted_batch_ndims\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n def independent(self, reinterpreted_batch_ndims=None):\n if reinterpreted_batch_ndims is None:\n reinterpreted_batch_ndims = len(self.batch_shape)\n base_dist = self.base_dist\n sample_shape = self.sample_shape\n reinterpreted_batch_ndims = (self.reinterpreted_batch_ndims +\n reinterpreted_batch_ndims)\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape + self.sample_shape)\n <mask token>\n\n def log_prob(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n return sum_rightmost(self.base_dist.log_prob(value), self.\n reinterpreted_batch_ndims).expand(shape)\n\n def score_parts(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n log_prob, score_function, entropy_term = self.base_dist.score_parts(\n value)\n log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims\n ).expand(shape)\n if not isinstance(score_function, numbers.Number):\n score_function = sum_rightmost(score_function, self.\n reinterpreted_batch_ndims).expand(shape)\n if not isinstance(entropy_term, numbers.Number):\n entropy_term = sum_rightmost(entropy_term, self.\n reinterpreted_batch_ndims).expand(shape)\n return ScoreParts(log_prob, score_function, entropy_term)\n\n def enumerate_support(self):\n if self.reinterpreted_batch_ndims:\n raise NotImplementedError(\n 'Pyro does not enumerate over cartesian products')\n samples = self.base_dist.enumerate_support()\n if not self.sample_shape:\n return samples\n enum_shape, base_shape = samples.shape[:1], samples.shape[1:]\n samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape\n ) + base_shape)\n samples = samples.expand(enum_shape + self.sample_shape + base_shape)\n return samples\n\n @property\n def mean(self):\n return self.base_dist.mean.expand(self.batch_shape + self.event_shape)\n\n @property\n def variance(self):\n return self.base_dist.variance.expand(self.batch_shape + self.\n event_shape)\n\n\nclass MaskedDistribution(TorchDistribution):\n \"\"\"\n Masks a distribution by a zero-one tensor that is broadcastable to the\n distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n :param torch.Tensor mask: A zero-one valued float tensor.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, mask):\n if broadcast_shape(mask.shape, base_dist.batch_shape\n ) != base_dist.batch_shape:\n raise ValueError(\n 'Expected mask.shape to be broadcastable to base_dist.batch_shape, actual {} vs {}'\n .format(mask.shape, base_dist.batch_shape))\n self.base_dist = base_dist\n self._mask = mask\n super(MaskedDistribution, self).__init__(base_dist.batch_shape,\n base_dist.event_shape)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape)\n\n def log_prob(self, value):\n return self.base_dist.log_prob(value) * self._mask\n\n def score_parts(self, value):\n return self.base_dist.score_parts(value) * self._mask\n\n def enumerate_support(self):\n return self.base_dist.enumerate_support()\n\n @property\n def mean(self):\n return self.base_dist.mean\n\n @property\n def variance(self):\n return self.base_dist.variance\n",
"step-2": "<mask token>\n\n\nclass TorchDistributionMixin(Distribution):\n <mask token>\n <mask token>\n <mask token>\n\n def shape(self, sample_shape=torch.Size()):\n \"\"\"\n The tensor shape of samples from this distribution.\n\n Samples are of shape::\n\n d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n :param sample_shape: the size of the iid batch to be drawn from the\n distribution.\n :type sample_shape: torch.Size\n :return: Tensor shape of samples.\n :rtype: torch.Size\n \"\"\"\n return sample_shape + self.batch_shape + self.event_shape\n <mask token>\n <mask token>\n\n def reshape(self, sample_shape=None, extra_event_dims=None):\n raise Exception(\n \"\"\"\n .reshape(sample_shape=s, extra_event_dims=n) was renamed and split into\n .expand_by(sample_shape=s).independent(reinterpreted_batch_ndims=n).\"\"\"\n )\n <mask token>\n <mask token>\n\n\nclass TorchDistribution(torch.distributions.Distribution,\n TorchDistributionMixin):\n \"\"\"\n Base class for PyTorch-compatible distributions with Pyro support.\n\n This should be the base class for almost all new Pyro distributions.\n\n .. note::\n\n Parameters and data should be of type :class:`~torch.Tensor`\n and all methods return type :class:`~torch.Tensor` unless\n otherwise noted.\n\n **Tensor Shapes**:\n\n TorchDistributions provide a method ``.shape()`` for the tensor shape of samples::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n\n Pyro follows the same distribution shape semantics as PyTorch. It distinguishes\n between three different roles for tensor shapes of samples:\n\n - *sample shape* corresponds to the shape of the iid samples drawn from the distribution.\n This is taken as an argument by the distribution's `sample` method.\n - *batch shape* corresponds to non-identical (independent) parameterizations of\n the distribution, inferred from the distribution's parameter shapes. This is\n fixed for a distribution instance.\n - *event shape* corresponds to the event dimensions of the distribution, which\n is fixed for a distribution class. These are collapsed when we try to score\n a sample from the distribution via `d.log_prob(x)`.\n\n These shapes are related by the equation::\n\n assert d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n Distributions provide a vectorized\n :meth`~torch.distributions.distribution.Distribution.log_prob` method that\n evaluates the log probability density of each event in a batch\n independently, returning a tensor of shape\n ``sample_shape + d.batch_shape``::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n log_p = d.log_prob(x)\n assert log_p.shape == sample_shape + d.batch_shape\n\n **Implementing New Distributions**:\n\n Derived classes must implement the methods\n :meth:`~torch.distributions.distribution.Distribution.sample`\n (or :meth:`~torch.distributions.distribution.Distribution.rsample` if\n ``.has_rsample == True``) and\n :meth:`~torch.distributions.distribution.Distribution.log_prob`, and must\n implement the properties\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`,\n and :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n Discrete classes may also implement the\n :meth:`~torch.distributions.distribution.Distribution.enumerate_support`\n method to improve gradient estimates and set\n ``.has_enumerate_support = True``.\n \"\"\"\n pass\n\n\nclass ReshapedDistribution(TorchDistribution):\n \"\"\"\n Reshapes a distribution by adding ``sample_shape`` to its total shape\n and adding ``reinterpreted_batch_ndims`` to its\n :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n\n :param torch.Size sample_shape: The size of the iid batch to be drawn from\n the distribution.\n :param int reinterpreted_batch_ndims: The number of extra event dimensions that will\n be considered dependent.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, sample_shape=torch.Size(),\n reinterpreted_batch_ndims=0):\n sample_shape = torch.Size(sample_shape)\n if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape\n ):\n raise ValueError(\n 'Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), actual {} vs {}'\n .format(reinterpreted_batch_ndims, len(sample_shape +\n base_dist.batch_shape)))\n self.base_dist = base_dist\n self.sample_shape = sample_shape\n self.reinterpreted_batch_ndims = reinterpreted_batch_ndims\n shape = sample_shape + base_dist.batch_shape + base_dist.event_shape\n batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.\n event_shape)\n batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]\n super(ReshapedDistribution, self).__init__(batch_shape, event_shape)\n\n def expand_by(self, sample_shape):\n base_dist = self.base_dist\n sample_shape = torch.Size(sample_shape) + self.sample_shape\n reinterpreted_batch_ndims = self.reinterpreted_batch_ndims\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n def independent(self, reinterpreted_batch_ndims=None):\n if reinterpreted_batch_ndims is None:\n reinterpreted_batch_ndims = len(self.batch_shape)\n base_dist = self.base_dist\n sample_shape = self.sample_shape\n reinterpreted_batch_ndims = (self.reinterpreted_batch_ndims +\n reinterpreted_batch_ndims)\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape + self.sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape + self.sample_shape)\n\n def log_prob(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n return sum_rightmost(self.base_dist.log_prob(value), self.\n reinterpreted_batch_ndims).expand(shape)\n\n def score_parts(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n log_prob, score_function, entropy_term = self.base_dist.score_parts(\n value)\n log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims\n ).expand(shape)\n if not isinstance(score_function, numbers.Number):\n score_function = sum_rightmost(score_function, self.\n reinterpreted_batch_ndims).expand(shape)\n if not isinstance(entropy_term, numbers.Number):\n entropy_term = sum_rightmost(entropy_term, self.\n reinterpreted_batch_ndims).expand(shape)\n return ScoreParts(log_prob, score_function, entropy_term)\n\n def enumerate_support(self):\n if self.reinterpreted_batch_ndims:\n raise NotImplementedError(\n 'Pyro does not enumerate over cartesian products')\n samples = self.base_dist.enumerate_support()\n if not self.sample_shape:\n return samples\n enum_shape, base_shape = samples.shape[:1], samples.shape[1:]\n samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape\n ) + base_shape)\n samples = samples.expand(enum_shape + self.sample_shape + base_shape)\n return samples\n\n @property\n def mean(self):\n return self.base_dist.mean.expand(self.batch_shape + self.event_shape)\n\n @property\n def variance(self):\n return self.base_dist.variance.expand(self.batch_shape + self.\n event_shape)\n\n\nclass MaskedDistribution(TorchDistribution):\n \"\"\"\n Masks a distribution by a zero-one tensor that is broadcastable to the\n distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n :param torch.Tensor mask: A zero-one valued float tensor.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, mask):\n if broadcast_shape(mask.shape, base_dist.batch_shape\n ) != base_dist.batch_shape:\n raise ValueError(\n 'Expected mask.shape to be broadcastable to base_dist.batch_shape, actual {} vs {}'\n .format(mask.shape, base_dist.batch_shape))\n self.base_dist = base_dist\n self._mask = mask\n super(MaskedDistribution, self).__init__(base_dist.batch_shape,\n base_dist.event_shape)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape)\n\n def log_prob(self, value):\n return self.base_dist.log_prob(value) * self._mask\n\n def score_parts(self, value):\n return self.base_dist.score_parts(value) * self._mask\n\n def enumerate_support(self):\n return self.base_dist.enumerate_support()\n\n @property\n def mean(self):\n return self.base_dist.mean\n\n @property\n def variance(self):\n return self.base_dist.variance\n",
"step-3": "<mask token>\n\n\nclass TorchDistributionMixin(Distribution):\n <mask token>\n\n def __call__(self, sample_shape=torch.Size()):\n \"\"\"\n Samples a random value.\n\n This is reparameterized whenever possible, calling\n :meth:`~torch.distributions.distribution.Distribution.rsample` for\n reparameterized distributions and\n :meth:`~torch.distributions.distribution.Distribution.sample` for\n non-reparameterized distributions.\n\n :param sample_shape: the size of the iid batch to be drawn from the\n distribution.\n :type sample_shape: torch.Size\n :return: A random value or batch of random values (if parameters are\n batched). The shape of the result should be `self.shape()`.\n :rtype: torch.Tensor\n \"\"\"\n return self.rsample(sample_shape) if self.has_rsample else self.sample(\n sample_shape)\n <mask token>\n\n def shape(self, sample_shape=torch.Size()):\n \"\"\"\n The tensor shape of samples from this distribution.\n\n Samples are of shape::\n\n d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n :param sample_shape: the size of the iid batch to be drawn from the\n distribution.\n :type sample_shape: torch.Size\n :return: Tensor shape of samples.\n :rtype: torch.Size\n \"\"\"\n return sample_shape + self.batch_shape + self.event_shape\n <mask token>\n <mask token>\n\n def reshape(self, sample_shape=None, extra_event_dims=None):\n raise Exception(\n \"\"\"\n .reshape(sample_shape=s, extra_event_dims=n) was renamed and split into\n .expand_by(sample_shape=s).independent(reinterpreted_batch_ndims=n).\"\"\"\n )\n <mask token>\n <mask token>\n\n\nclass TorchDistribution(torch.distributions.Distribution,\n TorchDistributionMixin):\n \"\"\"\n Base class for PyTorch-compatible distributions with Pyro support.\n\n This should be the base class for almost all new Pyro distributions.\n\n .. note::\n\n Parameters and data should be of type :class:`~torch.Tensor`\n and all methods return type :class:`~torch.Tensor` unless\n otherwise noted.\n\n **Tensor Shapes**:\n\n TorchDistributions provide a method ``.shape()`` for the tensor shape of samples::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n\n Pyro follows the same distribution shape semantics as PyTorch. It distinguishes\n between three different roles for tensor shapes of samples:\n\n - *sample shape* corresponds to the shape of the iid samples drawn from the distribution.\n This is taken as an argument by the distribution's `sample` method.\n - *batch shape* corresponds to non-identical (independent) parameterizations of\n the distribution, inferred from the distribution's parameter shapes. This is\n fixed for a distribution instance.\n - *event shape* corresponds to the event dimensions of the distribution, which\n is fixed for a distribution class. These are collapsed when we try to score\n a sample from the distribution via `d.log_prob(x)`.\n\n These shapes are related by the equation::\n\n assert d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n Distributions provide a vectorized\n :meth`~torch.distributions.distribution.Distribution.log_prob` method that\n evaluates the log probability density of each event in a batch\n independently, returning a tensor of shape\n ``sample_shape + d.batch_shape``::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n log_p = d.log_prob(x)\n assert log_p.shape == sample_shape + d.batch_shape\n\n **Implementing New Distributions**:\n\n Derived classes must implement the methods\n :meth:`~torch.distributions.distribution.Distribution.sample`\n (or :meth:`~torch.distributions.distribution.Distribution.rsample` if\n ``.has_rsample == True``) and\n :meth:`~torch.distributions.distribution.Distribution.log_prob`, and must\n implement the properties\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`,\n and :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n Discrete classes may also implement the\n :meth:`~torch.distributions.distribution.Distribution.enumerate_support`\n method to improve gradient estimates and set\n ``.has_enumerate_support = True``.\n \"\"\"\n pass\n\n\nclass ReshapedDistribution(TorchDistribution):\n \"\"\"\n Reshapes a distribution by adding ``sample_shape`` to its total shape\n and adding ``reinterpreted_batch_ndims`` to its\n :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n\n :param torch.Size sample_shape: The size of the iid batch to be drawn from\n the distribution.\n :param int reinterpreted_batch_ndims: The number of extra event dimensions that will\n be considered dependent.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, sample_shape=torch.Size(),\n reinterpreted_batch_ndims=0):\n sample_shape = torch.Size(sample_shape)\n if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape\n ):\n raise ValueError(\n 'Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), actual {} vs {}'\n .format(reinterpreted_batch_ndims, len(sample_shape +\n base_dist.batch_shape)))\n self.base_dist = base_dist\n self.sample_shape = sample_shape\n self.reinterpreted_batch_ndims = reinterpreted_batch_ndims\n shape = sample_shape + base_dist.batch_shape + base_dist.event_shape\n batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.\n event_shape)\n batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]\n super(ReshapedDistribution, self).__init__(batch_shape, event_shape)\n\n def expand_by(self, sample_shape):\n base_dist = self.base_dist\n sample_shape = torch.Size(sample_shape) + self.sample_shape\n reinterpreted_batch_ndims = self.reinterpreted_batch_ndims\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n def independent(self, reinterpreted_batch_ndims=None):\n if reinterpreted_batch_ndims is None:\n reinterpreted_batch_ndims = len(self.batch_shape)\n base_dist = self.base_dist\n sample_shape = self.sample_shape\n reinterpreted_batch_ndims = (self.reinterpreted_batch_ndims +\n reinterpreted_batch_ndims)\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape + self.sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape + self.sample_shape)\n\n def log_prob(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n return sum_rightmost(self.base_dist.log_prob(value), self.\n reinterpreted_batch_ndims).expand(shape)\n\n def score_parts(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n log_prob, score_function, entropy_term = self.base_dist.score_parts(\n value)\n log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims\n ).expand(shape)\n if not isinstance(score_function, numbers.Number):\n score_function = sum_rightmost(score_function, self.\n reinterpreted_batch_ndims).expand(shape)\n if not isinstance(entropy_term, numbers.Number):\n entropy_term = sum_rightmost(entropy_term, self.\n reinterpreted_batch_ndims).expand(shape)\n return ScoreParts(log_prob, score_function, entropy_term)\n\n def enumerate_support(self):\n if self.reinterpreted_batch_ndims:\n raise NotImplementedError(\n 'Pyro does not enumerate over cartesian products')\n samples = self.base_dist.enumerate_support()\n if not self.sample_shape:\n return samples\n enum_shape, base_shape = samples.shape[:1], samples.shape[1:]\n samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape\n ) + base_shape)\n samples = samples.expand(enum_shape + self.sample_shape + base_shape)\n return samples\n\n @property\n def mean(self):\n return self.base_dist.mean.expand(self.batch_shape + self.event_shape)\n\n @property\n def variance(self):\n return self.base_dist.variance.expand(self.batch_shape + self.\n event_shape)\n\n\nclass MaskedDistribution(TorchDistribution):\n \"\"\"\n Masks a distribution by a zero-one tensor that is broadcastable to the\n distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n :param torch.Tensor mask: A zero-one valued float tensor.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, mask):\n if broadcast_shape(mask.shape, base_dist.batch_shape\n ) != base_dist.batch_shape:\n raise ValueError(\n 'Expected mask.shape to be broadcastable to base_dist.batch_shape, actual {} vs {}'\n .format(mask.shape, base_dist.batch_shape))\n self.base_dist = base_dist\n self._mask = mask\n super(MaskedDistribution, self).__init__(base_dist.batch_shape,\n base_dist.event_shape)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape)\n\n def log_prob(self, value):\n return self.base_dist.log_prob(value) * self._mask\n\n def score_parts(self, value):\n return self.base_dist.score_parts(value) * self._mask\n\n def enumerate_support(self):\n return self.base_dist.enumerate_support()\n\n @property\n def mean(self):\n return self.base_dist.mean\n\n @property\n def variance(self):\n return self.base_dist.variance\n",
"step-4": "<mask token>\n\n\nclass TorchDistributionMixin(Distribution):\n <mask token>\n\n def __call__(self, sample_shape=torch.Size()):\n \"\"\"\n Samples a random value.\n\n This is reparameterized whenever possible, calling\n :meth:`~torch.distributions.distribution.Distribution.rsample` for\n reparameterized distributions and\n :meth:`~torch.distributions.distribution.Distribution.sample` for\n non-reparameterized distributions.\n\n :param sample_shape: the size of the iid batch to be drawn from the\n distribution.\n :type sample_shape: torch.Size\n :return: A random value or batch of random values (if parameters are\n batched). The shape of the result should be `self.shape()`.\n :rtype: torch.Tensor\n \"\"\"\n return self.rsample(sample_shape) if self.has_rsample else self.sample(\n sample_shape)\n <mask token>\n\n def shape(self, sample_shape=torch.Size()):\n \"\"\"\n The tensor shape of samples from this distribution.\n\n Samples are of shape::\n\n d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n :param sample_shape: the size of the iid batch to be drawn from the\n distribution.\n :type sample_shape: torch.Size\n :return: Tensor shape of samples.\n :rtype: torch.Size\n \"\"\"\n return sample_shape + self.batch_shape + self.event_shape\n\n def expand(self, batch_shape):\n \"\"\"\n Expands a distribution to a desired\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n Note that this is more general than :meth:`expand_by` because\n ``d.expand_by(sample_shape)`` can be reduced to\n ``d.expand(sample_shape + d.batch_shape)``.\n\n :param torch.Size batch_shape: The target ``batch_shape``. This must\n compatible with ``self.batch_shape`` similar to the requirements\n of :func:`torch.Tensor.expand`: the target ``batch_shape`` must\n be at least as long as ``self.batch_shape``, and for each\n non-singleton dim of ``self.batch_shape``, ``batch_shape`` must\n either agree or be set to ``-1``.\n :return: An expanded version of this distribution.\n :rtype: :class:`ReshapedDistribution`\n \"\"\"\n batch_shape = list(batch_shape)\n if len(batch_shape) < len(self.batch_shape):\n raise ValueError(\n 'Expected len(batch_shape) >= len(self.batch_shape), actual {} vs {}'\n .format(len(batch_shape), len(self.batch_shape)))\n for dim in range(-1, -1 - len(self.batch_shape), -1):\n if batch_shape[dim] == -1:\n batch_shape[dim] = self.batch_shape[dim]\n elif batch_shape[dim] != self.batch_shape[dim]:\n if self.batch_shape[dim] != 1:\n raise ValueError(\n 'Cannot broadcast dim {} of size {} to size {}'.\n format(dim, self.batch_shape[dim], batch_shape[dim]))\n else:\n raise NotImplementedError(\n 'https://github.com/uber/pyro/issues/1119')\n sample_shape = batch_shape[:len(batch_shape) - len(self.batch_shape)]\n return self.expand_by(sample_shape)\n <mask token>\n\n def reshape(self, sample_shape=None, extra_event_dims=None):\n raise Exception(\n \"\"\"\n .reshape(sample_shape=s, extra_event_dims=n) was renamed and split into\n .expand_by(sample_shape=s).independent(reinterpreted_batch_ndims=n).\"\"\"\n )\n\n def independent(self, reinterpreted_batch_ndims=None):\n \"\"\"\n Reinterprets the ``n`` rightmost dimensions of this distributions\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`\n as event dims, adding them to the left side of\n :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n\n Example::\n\n >>> [d1.batch_shape, d1.event_shape]\n [torch.Size((2, 3)), torch.Size((4, 5))]\n >>> d2 = d1.independent(1)\n >>> [d2.batch_shape, d2.event_shape]\n [torch.Size((2,)), torch.Size((3, 4, 5))]\n >>> d3 = d1.independent(2)\n >>> [d3.batch_shape, d3.event_shape]\n [torch.Size(()), torch.Size((2, 3, 4, 5))]\n\n :param int reinterpreted_batch_ndims: The number of batch dimensions\n to reinterpret as event dimensions.\n :return: A reshaped version of this distribution.\n :rtype: :class:`ReshapedDistribution`\n \"\"\"\n if reinterpreted_batch_ndims is None:\n reinterpreted_batch_ndims = len(self.batch_shape)\n return ReshapedDistribution(self, reinterpreted_batch_ndims=\n reinterpreted_batch_ndims)\n\n def mask(self, mask):\n \"\"\"\n Masks a distribution by a zero-one tensor that is broadcastable to the\n distributions :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n :param torch.Tensor mask: A zero-one valued float tensor.\n :return: A masked copy of this distribution.\n :rtype: :class:`MaskedDistribution`\n \"\"\"\n return MaskedDistribution(self, mask)\n\n\nclass TorchDistribution(torch.distributions.Distribution,\n TorchDistributionMixin):\n \"\"\"\n Base class for PyTorch-compatible distributions with Pyro support.\n\n This should be the base class for almost all new Pyro distributions.\n\n .. note::\n\n Parameters and data should be of type :class:`~torch.Tensor`\n and all methods return type :class:`~torch.Tensor` unless\n otherwise noted.\n\n **Tensor Shapes**:\n\n TorchDistributions provide a method ``.shape()`` for the tensor shape of samples::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n\n Pyro follows the same distribution shape semantics as PyTorch. It distinguishes\n between three different roles for tensor shapes of samples:\n\n - *sample shape* corresponds to the shape of the iid samples drawn from the distribution.\n This is taken as an argument by the distribution's `sample` method.\n - *batch shape* corresponds to non-identical (independent) parameterizations of\n the distribution, inferred from the distribution's parameter shapes. This is\n fixed for a distribution instance.\n - *event shape* corresponds to the event dimensions of the distribution, which\n is fixed for a distribution class. These are collapsed when we try to score\n a sample from the distribution via `d.log_prob(x)`.\n\n These shapes are related by the equation::\n\n assert d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n Distributions provide a vectorized\n :meth`~torch.distributions.distribution.Distribution.log_prob` method that\n evaluates the log probability density of each event in a batch\n independently, returning a tensor of shape\n ``sample_shape + d.batch_shape``::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n log_p = d.log_prob(x)\n assert log_p.shape == sample_shape + d.batch_shape\n\n **Implementing New Distributions**:\n\n Derived classes must implement the methods\n :meth:`~torch.distributions.distribution.Distribution.sample`\n (or :meth:`~torch.distributions.distribution.Distribution.rsample` if\n ``.has_rsample == True``) and\n :meth:`~torch.distributions.distribution.Distribution.log_prob`, and must\n implement the properties\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`,\n and :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n Discrete classes may also implement the\n :meth:`~torch.distributions.distribution.Distribution.enumerate_support`\n method to improve gradient estimates and set\n ``.has_enumerate_support = True``.\n \"\"\"\n pass\n\n\nclass ReshapedDistribution(TorchDistribution):\n \"\"\"\n Reshapes a distribution by adding ``sample_shape`` to its total shape\n and adding ``reinterpreted_batch_ndims`` to its\n :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n\n :param torch.Size sample_shape: The size of the iid batch to be drawn from\n the distribution.\n :param int reinterpreted_batch_ndims: The number of extra event dimensions that will\n be considered dependent.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, sample_shape=torch.Size(),\n reinterpreted_batch_ndims=0):\n sample_shape = torch.Size(sample_shape)\n if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape\n ):\n raise ValueError(\n 'Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), actual {} vs {}'\n .format(reinterpreted_batch_ndims, len(sample_shape +\n base_dist.batch_shape)))\n self.base_dist = base_dist\n self.sample_shape = sample_shape\n self.reinterpreted_batch_ndims = reinterpreted_batch_ndims\n shape = sample_shape + base_dist.batch_shape + base_dist.event_shape\n batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.\n event_shape)\n batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]\n super(ReshapedDistribution, self).__init__(batch_shape, event_shape)\n\n def expand_by(self, sample_shape):\n base_dist = self.base_dist\n sample_shape = torch.Size(sample_shape) + self.sample_shape\n reinterpreted_batch_ndims = self.reinterpreted_batch_ndims\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n def independent(self, reinterpreted_batch_ndims=None):\n if reinterpreted_batch_ndims is None:\n reinterpreted_batch_ndims = len(self.batch_shape)\n base_dist = self.base_dist\n sample_shape = self.sample_shape\n reinterpreted_batch_ndims = (self.reinterpreted_batch_ndims +\n reinterpreted_batch_ndims)\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape + self.sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape + self.sample_shape)\n\n def log_prob(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n return sum_rightmost(self.base_dist.log_prob(value), self.\n reinterpreted_batch_ndims).expand(shape)\n\n def score_parts(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n log_prob, score_function, entropy_term = self.base_dist.score_parts(\n value)\n log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims\n ).expand(shape)\n if not isinstance(score_function, numbers.Number):\n score_function = sum_rightmost(score_function, self.\n reinterpreted_batch_ndims).expand(shape)\n if not isinstance(entropy_term, numbers.Number):\n entropy_term = sum_rightmost(entropy_term, self.\n reinterpreted_batch_ndims).expand(shape)\n return ScoreParts(log_prob, score_function, entropy_term)\n\n def enumerate_support(self):\n if self.reinterpreted_batch_ndims:\n raise NotImplementedError(\n 'Pyro does not enumerate over cartesian products')\n samples = self.base_dist.enumerate_support()\n if not self.sample_shape:\n return samples\n enum_shape, base_shape = samples.shape[:1], samples.shape[1:]\n samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape\n ) + base_shape)\n samples = samples.expand(enum_shape + self.sample_shape + base_shape)\n return samples\n\n @property\n def mean(self):\n return self.base_dist.mean.expand(self.batch_shape + self.event_shape)\n\n @property\n def variance(self):\n return self.base_dist.variance.expand(self.batch_shape + self.\n event_shape)\n\n\nclass MaskedDistribution(TorchDistribution):\n \"\"\"\n Masks a distribution by a zero-one tensor that is broadcastable to the\n distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n :param torch.Tensor mask: A zero-one valued float tensor.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, mask):\n if broadcast_shape(mask.shape, base_dist.batch_shape\n ) != base_dist.batch_shape:\n raise ValueError(\n 'Expected mask.shape to be broadcastable to base_dist.batch_shape, actual {} vs {}'\n .format(mask.shape, base_dist.batch_shape))\n self.base_dist = base_dist\n self._mask = mask\n super(MaskedDistribution, self).__init__(base_dist.batch_shape,\n base_dist.event_shape)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape)\n\n def log_prob(self, value):\n return self.base_dist.log_prob(value) * self._mask\n\n def score_parts(self, value):\n return self.base_dist.score_parts(value) * self._mask\n\n def enumerate_support(self):\n return self.base_dist.enumerate_support()\n\n @property\n def mean(self):\n return self.base_dist.mean\n\n @property\n def variance(self):\n return self.base_dist.variance\n",
"step-5": "from __future__ import absolute_import, division, print_function\n\nimport numbers\n\nimport torch\nfrom torch.distributions import constraints\n\nfrom pyro.distributions.distribution import Distribution\nfrom pyro.distributions.score_parts import ScoreParts\nfrom pyro.distributions.util import broadcast_shape, sum_rightmost\n\n\nclass TorchDistributionMixin(Distribution):\n \"\"\"\n Mixin to provide Pyro compatibility for PyTorch distributions.\n\n You should instead use `TorchDistribution` for new distribution classes.\n\n This is mainly useful for wrapping existing PyTorch distributions for\n use in Pyro. Derived classes must first inherit from\n :class:`torch.distributions.distribution.Distribution` and then inherit\n from :class:`TorchDistributionMixin`.\n \"\"\"\n def __call__(self, sample_shape=torch.Size()):\n \"\"\"\n Samples a random value.\n\n This is reparameterized whenever possible, calling\n :meth:`~torch.distributions.distribution.Distribution.rsample` for\n reparameterized distributions and\n :meth:`~torch.distributions.distribution.Distribution.sample` for\n non-reparameterized distributions.\n\n :param sample_shape: the size of the iid batch to be drawn from the\n distribution.\n :type sample_shape: torch.Size\n :return: A random value or batch of random values (if parameters are\n batched). The shape of the result should be `self.shape()`.\n :rtype: torch.Tensor\n \"\"\"\n return self.rsample(sample_shape) if self.has_rsample else self.sample(sample_shape)\n\n @property\n def event_dim(self):\n \"\"\"\n :return: Number of dimensions of individual events.\n :rtype: int\n \"\"\"\n return len(self.event_shape)\n\n def shape(self, sample_shape=torch.Size()):\n \"\"\"\n The tensor shape of samples from this distribution.\n\n Samples are of shape::\n\n d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n :param sample_shape: the size of the iid batch to be drawn from the\n distribution.\n :type sample_shape: torch.Size\n :return: Tensor shape of samples.\n :rtype: torch.Size\n \"\"\"\n return sample_shape + self.batch_shape + self.event_shape\n\n def expand(self, batch_shape):\n \"\"\"\n Expands a distribution to a desired\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n Note that this is more general than :meth:`expand_by` because\n ``d.expand_by(sample_shape)`` can be reduced to\n ``d.expand(sample_shape + d.batch_shape)``.\n\n :param torch.Size batch_shape: The target ``batch_shape``. This must\n compatible with ``self.batch_shape`` similar to the requirements\n of :func:`torch.Tensor.expand`: the target ``batch_shape`` must\n be at least as long as ``self.batch_shape``, and for each\n non-singleton dim of ``self.batch_shape``, ``batch_shape`` must\n either agree or be set to ``-1``.\n :return: An expanded version of this distribution.\n :rtype: :class:`ReshapedDistribution`\n \"\"\"\n batch_shape = list(batch_shape)\n if len(batch_shape) < len(self.batch_shape):\n raise ValueError(\"Expected len(batch_shape) >= len(self.batch_shape), \"\n \"actual {} vs {}\".format(len(batch_shape), len(self.batch_shape)))\n # check sizes of existing dims\n for dim in range(-1, -1 - len(self.batch_shape), -1):\n if batch_shape[dim] == -1:\n batch_shape[dim] = self.batch_shape[dim]\n elif batch_shape[dim] != self.batch_shape[dim]:\n if self.batch_shape[dim] != 1:\n raise ValueError(\"Cannot broadcast dim {} of size {} to size {}\".format(\n dim, self.batch_shape[dim], batch_shape[dim]))\n else:\n raise NotImplementedError(\"https://github.com/uber/pyro/issues/1119\")\n sample_shape = batch_shape[:len(batch_shape) - len(self.batch_shape)]\n return self.expand_by(sample_shape)\n\n def expand_by(self, sample_shape):\n \"\"\"\n Expands a distribution by adding ``sample_shape`` to the left side of\n its :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n To expand internal dims of ``self.batch_shape`` from 1 to something\n larger, use :meth:`expand` instead.\n\n :param torch.Size sample_shape: The size of the iid batch to be drawn\n from the distribution.\n :return: An expanded version of this distribution.\n :rtype: :class:`ReshapedDistribution`\n \"\"\"\n return ReshapedDistribution(self, sample_shape=sample_shape)\n\n def reshape(self, sample_shape=None, extra_event_dims=None):\n raise Exception('''\n .reshape(sample_shape=s, extra_event_dims=n) was renamed and split into\n .expand_by(sample_shape=s).independent(reinterpreted_batch_ndims=n).''')\n\n def independent(self, reinterpreted_batch_ndims=None):\n \"\"\"\n Reinterprets the ``n`` rightmost dimensions of this distributions\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`\n as event dims, adding them to the left side of\n :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n\n Example::\n\n >>> [d1.batch_shape, d1.event_shape]\n [torch.Size((2, 3)), torch.Size((4, 5))]\n >>> d2 = d1.independent(1)\n >>> [d2.batch_shape, d2.event_shape]\n [torch.Size((2,)), torch.Size((3, 4, 5))]\n >>> d3 = d1.independent(2)\n >>> [d3.batch_shape, d3.event_shape]\n [torch.Size(()), torch.Size((2, 3, 4, 5))]\n\n :param int reinterpreted_batch_ndims: The number of batch dimensions\n to reinterpret as event dimensions.\n :return: A reshaped version of this distribution.\n :rtype: :class:`ReshapedDistribution`\n \"\"\"\n if reinterpreted_batch_ndims is None:\n reinterpreted_batch_ndims = len(self.batch_shape)\n # TODO return pyro.distributions.torch.Independent(self, reinterpreted_batch_ndims)\n return ReshapedDistribution(self, reinterpreted_batch_ndims=reinterpreted_batch_ndims)\n\n def mask(self, mask):\n \"\"\"\n Masks a distribution by a zero-one tensor that is broadcastable to the\n distributions :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n :param torch.Tensor mask: A zero-one valued float tensor.\n :return: A masked copy of this distribution.\n :rtype: :class:`MaskedDistribution`\n \"\"\"\n return MaskedDistribution(self, mask)\n\n\nclass TorchDistribution(torch.distributions.Distribution, TorchDistributionMixin):\n \"\"\"\n Base class for PyTorch-compatible distributions with Pyro support.\n\n This should be the base class for almost all new Pyro distributions.\n\n .. note::\n\n Parameters and data should be of type :class:`~torch.Tensor`\n and all methods return type :class:`~torch.Tensor` unless\n otherwise noted.\n\n **Tensor Shapes**:\n\n TorchDistributions provide a method ``.shape()`` for the tensor shape of samples::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n\n Pyro follows the same distribution shape semantics as PyTorch. It distinguishes\n between three different roles for tensor shapes of samples:\n\n - *sample shape* corresponds to the shape of the iid samples drawn from the distribution.\n This is taken as an argument by the distribution's `sample` method.\n - *batch shape* corresponds to non-identical (independent) parameterizations of\n the distribution, inferred from the distribution's parameter shapes. This is\n fixed for a distribution instance.\n - *event shape* corresponds to the event dimensions of the distribution, which\n is fixed for a distribution class. These are collapsed when we try to score\n a sample from the distribution via `d.log_prob(x)`.\n\n These shapes are related by the equation::\n\n assert d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n Distributions provide a vectorized\n :meth`~torch.distributions.distribution.Distribution.log_prob` method that\n evaluates the log probability density of each event in a batch\n independently, returning a tensor of shape\n ``sample_shape + d.batch_shape``::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n log_p = d.log_prob(x)\n assert log_p.shape == sample_shape + d.batch_shape\n\n **Implementing New Distributions**:\n\n Derived classes must implement the methods\n :meth:`~torch.distributions.distribution.Distribution.sample`\n (or :meth:`~torch.distributions.distribution.Distribution.rsample` if\n ``.has_rsample == True``) and\n :meth:`~torch.distributions.distribution.Distribution.log_prob`, and must\n implement the properties\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`,\n and :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n Discrete classes may also implement the\n :meth:`~torch.distributions.distribution.Distribution.enumerate_support`\n method to improve gradient estimates and set\n ``.has_enumerate_support = True``.\n \"\"\"\n pass\n\n\nclass ReshapedDistribution(TorchDistribution):\n \"\"\"\n Reshapes a distribution by adding ``sample_shape`` to its total shape\n and adding ``reinterpreted_batch_ndims`` to its\n :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n\n :param torch.Size sample_shape: The size of the iid batch to be drawn from\n the distribution.\n :param int reinterpreted_batch_ndims: The number of extra event dimensions that will\n be considered dependent.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, sample_shape=torch.Size(), reinterpreted_batch_ndims=0):\n sample_shape = torch.Size(sample_shape)\n if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape):\n raise ValueError('Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), '\n 'actual {} vs {}'.format(reinterpreted_batch_ndims,\n len(sample_shape + base_dist.batch_shape)))\n self.base_dist = base_dist\n self.sample_shape = sample_shape\n self.reinterpreted_batch_ndims = reinterpreted_batch_ndims\n shape = sample_shape + base_dist.batch_shape + base_dist.event_shape\n batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.event_shape)\n batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]\n super(ReshapedDistribution, self).__init__(batch_shape, event_shape)\n\n def expand_by(self, sample_shape):\n base_dist = self.base_dist\n sample_shape = torch.Size(sample_shape) + self.sample_shape\n reinterpreted_batch_ndims = self.reinterpreted_batch_ndims\n return ReshapedDistribution(base_dist, sample_shape, reinterpreted_batch_ndims)\n\n def independent(self, reinterpreted_batch_ndims=None):\n if reinterpreted_batch_ndims is None:\n reinterpreted_batch_ndims = len(self.batch_shape)\n base_dist = self.base_dist\n sample_shape = self.sample_shape\n reinterpreted_batch_ndims = self.reinterpreted_batch_ndims + reinterpreted_batch_ndims\n return ReshapedDistribution(base_dist, sample_shape, reinterpreted_batch_ndims)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape + self.sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape + self.sample_shape)\n\n def log_prob(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() - self.event_dim])\n return sum_rightmost(self.base_dist.log_prob(value), self.reinterpreted_batch_ndims).expand(shape)\n\n def score_parts(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() - self.event_dim])\n log_prob, score_function, entropy_term = self.base_dist.score_parts(value)\n log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims).expand(shape)\n if not isinstance(score_function, numbers.Number):\n score_function = sum_rightmost(score_function, self.reinterpreted_batch_ndims).expand(shape)\n if not isinstance(entropy_term, numbers.Number):\n entropy_term = sum_rightmost(entropy_term, self.reinterpreted_batch_ndims).expand(shape)\n return ScoreParts(log_prob, score_function, entropy_term)\n\n def enumerate_support(self):\n if self.reinterpreted_batch_ndims:\n raise NotImplementedError(\"Pyro does not enumerate over cartesian products\")\n\n samples = self.base_dist.enumerate_support()\n if not self.sample_shape:\n return samples\n\n # Shift enumeration dim to correct location.\n enum_shape, base_shape = samples.shape[:1], samples.shape[1:]\n samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape) + base_shape)\n samples = samples.expand(enum_shape + self.sample_shape + base_shape)\n return samples\n\n @property\n def mean(self):\n return self.base_dist.mean.expand(self.batch_shape + self.event_shape)\n\n @property\n def variance(self):\n return self.base_dist.variance.expand(self.batch_shape + self.event_shape)\n\n\nclass MaskedDistribution(TorchDistribution):\n \"\"\"\n Masks a distribution by a zero-one tensor that is broadcastable to the\n distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n :param torch.Tensor mask: A zero-one valued float tensor.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, mask):\n if broadcast_shape(mask.shape, base_dist.batch_shape) != base_dist.batch_shape:\n raise ValueError(\"Expected mask.shape to be broadcastable to base_dist.batch_shape, \"\n \"actual {} vs {}\".format(mask.shape, base_dist.batch_shape))\n self.base_dist = base_dist\n self._mask = mask\n super(MaskedDistribution, self).__init__(base_dist.batch_shape, base_dist.event_shape)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape)\n\n def log_prob(self, value):\n return self.base_dist.log_prob(value) * self._mask\n\n def score_parts(self, value):\n return self.base_dist.score_parts(value) * self._mask\n\n def enumerate_support(self):\n return self.base_dist.enumerate_support()\n\n @property\n def mean(self):\n return self.base_dist.mean\n\n @property\n def variance(self):\n return self.base_dist.variance\n",
"step-ids": [
27,
35,
36,
39,
44
]
}
|
[
27,
35,
36,
39,
44
] |
from typing import Dict, Tuple
import torch
from tqdm import tqdm
import schnetpack.properties as structure
from schnetpack.data import AtomsLoader
__all__ = ["calculate_stats"]
def calculate_stats(
dataloader: AtomsLoader,
divide_by_atoms: Dict[str, bool],
atomref: Dict[str, torch.Tensor] = None,
) -> Dict[str, Tuple[torch.Tensor, torch.Tensor]]:
"""
Use the incremental Welford algorithm described in [h1]_ to accumulate
the mean and standard deviation over a set of samples.
References:
-----------
.. [h1] https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
Args:
dataset: atoms data set
divide_by_atoms: dict from property name to bool:
If True, divide property by number of atoms before calculating statistics.
atomref: reference values for single atoms to be removed before calculating stats
Returns:
"""
property_names = list(divide_by_atoms.keys())
norm_mask = torch.tensor(
[float(divide_by_atoms[p]) for p in property_names], dtype=torch.float64
)
count = 0
mean = torch.zeros_like(norm_mask)
M2 = torch.zeros_like(norm_mask)
for props in tqdm(dataloader):
sample_values = []
for p in property_names:
val = props[p][None, :]
if atomref and p in atomref.keys():
ar = atomref[p]
ar = ar[props[structure.Z]]
idx_m = props[structure.idx_m]
tmp = torch.zeros((idx_m[-1] + 1,), dtype=ar.dtype, device=ar.device)
v0 = tmp.index_add(0, idx_m, ar)
val -= v0
sample_values.append(val)
sample_values = torch.cat(sample_values, dim=0)
batch_size = sample_values.shape[1]
new_count = count + batch_size
norm = norm_mask[:, None] * props[structure.n_atoms][None, :] + (
1 - norm_mask[:, None]
)
sample_values /= norm
sample_mean = torch.mean(sample_values, dim=1)
sample_m2 = torch.sum((sample_values - sample_mean[:, None]) ** 2, dim=1)
delta = sample_mean - mean
mean += delta * batch_size / new_count
corr = batch_size * count / new_count
M2 += sample_m2 + delta**2 * corr
count = new_count
stddev = torch.sqrt(M2 / count)
stats = {pn: (mu, std) for pn, mu, std in zip(property_names, mean, stddev)}
return stats
|
normal
|
{
"blob_id": "b2944a95dbe25057155aaf6198a97d85b3bb620b",
"index": 6436,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef calculate_stats(dataloader: AtomsLoader, divide_by_atoms: Dict[str,\n bool], atomref: Dict[str, torch.Tensor]=None) ->Dict[str, Tuple[torch.\n Tensor, torch.Tensor]]:\n \"\"\"\n Use the incremental Welford algorithm described in [h1]_ to accumulate\n the mean and standard deviation over a set of samples.\n\n References:\n -----------\n .. [h1] https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance\n\n Args:\n dataset: atoms data set\n divide_by_atoms: dict from property name to bool:\n If True, divide property by number of atoms before calculating statistics.\n atomref: reference values for single atoms to be removed before calculating stats\n\n\n Returns:\n\n \"\"\"\n property_names = list(divide_by_atoms.keys())\n norm_mask = torch.tensor([float(divide_by_atoms[p]) for p in\n property_names], dtype=torch.float64)\n count = 0\n mean = torch.zeros_like(norm_mask)\n M2 = torch.zeros_like(norm_mask)\n for props in tqdm(dataloader):\n sample_values = []\n for p in property_names:\n val = props[p][None, :]\n if atomref and p in atomref.keys():\n ar = atomref[p]\n ar = ar[props[structure.Z]]\n idx_m = props[structure.idx_m]\n tmp = torch.zeros((idx_m[-1] + 1,), dtype=ar.dtype, device=\n ar.device)\n v0 = tmp.index_add(0, idx_m, ar)\n val -= v0\n sample_values.append(val)\n sample_values = torch.cat(sample_values, dim=0)\n batch_size = sample_values.shape[1]\n new_count = count + batch_size\n norm = norm_mask[:, None] * props[structure.n_atoms][None, :] + (1 -\n norm_mask[:, None])\n sample_values /= norm\n sample_mean = torch.mean(sample_values, dim=1)\n sample_m2 = torch.sum((sample_values - sample_mean[:, None]) ** 2,\n dim=1)\n delta = sample_mean - mean\n mean += delta * batch_size / new_count\n corr = batch_size * count / new_count\n M2 += sample_m2 + delta ** 2 * corr\n count = new_count\n stddev = torch.sqrt(M2 / count)\n stats = {pn: (mu, std) for pn, mu, std in zip(property_names, mean, stddev)\n }\n return stats\n",
"step-3": "<mask token>\n__all__ = ['calculate_stats']\n\n\ndef calculate_stats(dataloader: AtomsLoader, divide_by_atoms: Dict[str,\n bool], atomref: Dict[str, torch.Tensor]=None) ->Dict[str, Tuple[torch.\n Tensor, torch.Tensor]]:\n \"\"\"\n Use the incremental Welford algorithm described in [h1]_ to accumulate\n the mean and standard deviation over a set of samples.\n\n References:\n -----------\n .. [h1] https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance\n\n Args:\n dataset: atoms data set\n divide_by_atoms: dict from property name to bool:\n If True, divide property by number of atoms before calculating statistics.\n atomref: reference values for single atoms to be removed before calculating stats\n\n\n Returns:\n\n \"\"\"\n property_names = list(divide_by_atoms.keys())\n norm_mask = torch.tensor([float(divide_by_atoms[p]) for p in\n property_names], dtype=torch.float64)\n count = 0\n mean = torch.zeros_like(norm_mask)\n M2 = torch.zeros_like(norm_mask)\n for props in tqdm(dataloader):\n sample_values = []\n for p in property_names:\n val = props[p][None, :]\n if atomref and p in atomref.keys():\n ar = atomref[p]\n ar = ar[props[structure.Z]]\n idx_m = props[structure.idx_m]\n tmp = torch.zeros((idx_m[-1] + 1,), dtype=ar.dtype, device=\n ar.device)\n v0 = tmp.index_add(0, idx_m, ar)\n val -= v0\n sample_values.append(val)\n sample_values = torch.cat(sample_values, dim=0)\n batch_size = sample_values.shape[1]\n new_count = count + batch_size\n norm = norm_mask[:, None] * props[structure.n_atoms][None, :] + (1 -\n norm_mask[:, None])\n sample_values /= norm\n sample_mean = torch.mean(sample_values, dim=1)\n sample_m2 = torch.sum((sample_values - sample_mean[:, None]) ** 2,\n dim=1)\n delta = sample_mean - mean\n mean += delta * batch_size / new_count\n corr = batch_size * count / new_count\n M2 += sample_m2 + delta ** 2 * corr\n count = new_count\n stddev = torch.sqrt(M2 / count)\n stats = {pn: (mu, std) for pn, mu, std in zip(property_names, mean, stddev)\n }\n return stats\n",
"step-4": "from typing import Dict, Tuple\nimport torch\nfrom tqdm import tqdm\nimport schnetpack.properties as structure\nfrom schnetpack.data import AtomsLoader\n__all__ = ['calculate_stats']\n\n\ndef calculate_stats(dataloader: AtomsLoader, divide_by_atoms: Dict[str,\n bool], atomref: Dict[str, torch.Tensor]=None) ->Dict[str, Tuple[torch.\n Tensor, torch.Tensor]]:\n \"\"\"\n Use the incremental Welford algorithm described in [h1]_ to accumulate\n the mean and standard deviation over a set of samples.\n\n References:\n -----------\n .. [h1] https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance\n\n Args:\n dataset: atoms data set\n divide_by_atoms: dict from property name to bool:\n If True, divide property by number of atoms before calculating statistics.\n atomref: reference values for single atoms to be removed before calculating stats\n\n\n Returns:\n\n \"\"\"\n property_names = list(divide_by_atoms.keys())\n norm_mask = torch.tensor([float(divide_by_atoms[p]) for p in\n property_names], dtype=torch.float64)\n count = 0\n mean = torch.zeros_like(norm_mask)\n M2 = torch.zeros_like(norm_mask)\n for props in tqdm(dataloader):\n sample_values = []\n for p in property_names:\n val = props[p][None, :]\n if atomref and p in atomref.keys():\n ar = atomref[p]\n ar = ar[props[structure.Z]]\n idx_m = props[structure.idx_m]\n tmp = torch.zeros((idx_m[-1] + 1,), dtype=ar.dtype, device=\n ar.device)\n v0 = tmp.index_add(0, idx_m, ar)\n val -= v0\n sample_values.append(val)\n sample_values = torch.cat(sample_values, dim=0)\n batch_size = sample_values.shape[1]\n new_count = count + batch_size\n norm = norm_mask[:, None] * props[structure.n_atoms][None, :] + (1 -\n norm_mask[:, None])\n sample_values /= norm\n sample_mean = torch.mean(sample_values, dim=1)\n sample_m2 = torch.sum((sample_values - sample_mean[:, None]) ** 2,\n dim=1)\n delta = sample_mean - mean\n mean += delta * batch_size / new_count\n corr = batch_size * count / new_count\n M2 += sample_m2 + delta ** 2 * corr\n count = new_count\n stddev = torch.sqrt(M2 / count)\n stats = {pn: (mu, std) for pn, mu, std in zip(property_names, mean, stddev)\n }\n return stats\n",
"step-5": "from typing import Dict, Tuple\n\nimport torch\nfrom tqdm import tqdm\n\nimport schnetpack.properties as structure\nfrom schnetpack.data import AtomsLoader\n\n__all__ = [\"calculate_stats\"]\n\n\ndef calculate_stats(\n dataloader: AtomsLoader,\n divide_by_atoms: Dict[str, bool],\n atomref: Dict[str, torch.Tensor] = None,\n) -> Dict[str, Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"\n Use the incremental Welford algorithm described in [h1]_ to accumulate\n the mean and standard deviation over a set of samples.\n\n References:\n -----------\n .. [h1] https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance\n\n Args:\n dataset: atoms data set\n divide_by_atoms: dict from property name to bool:\n If True, divide property by number of atoms before calculating statistics.\n atomref: reference values for single atoms to be removed before calculating stats\n\n\n Returns:\n\n \"\"\"\n property_names = list(divide_by_atoms.keys())\n norm_mask = torch.tensor(\n [float(divide_by_atoms[p]) for p in property_names], dtype=torch.float64\n )\n\n count = 0\n mean = torch.zeros_like(norm_mask)\n M2 = torch.zeros_like(norm_mask)\n\n for props in tqdm(dataloader):\n sample_values = []\n for p in property_names:\n val = props[p][None, :]\n if atomref and p in atomref.keys():\n ar = atomref[p]\n ar = ar[props[structure.Z]]\n idx_m = props[structure.idx_m]\n tmp = torch.zeros((idx_m[-1] + 1,), dtype=ar.dtype, device=ar.device)\n v0 = tmp.index_add(0, idx_m, ar)\n val -= v0\n\n sample_values.append(val)\n sample_values = torch.cat(sample_values, dim=0)\n\n batch_size = sample_values.shape[1]\n new_count = count + batch_size\n\n norm = norm_mask[:, None] * props[structure.n_atoms][None, :] + (\n 1 - norm_mask[:, None]\n )\n sample_values /= norm\n\n sample_mean = torch.mean(sample_values, dim=1)\n sample_m2 = torch.sum((sample_values - sample_mean[:, None]) ** 2, dim=1)\n\n delta = sample_mean - mean\n mean += delta * batch_size / new_count\n corr = batch_size * count / new_count\n M2 += sample_m2 + delta**2 * corr\n count = new_count\n\n stddev = torch.sqrt(M2 / count)\n stats = {pn: (mu, std) for pn, mu, std in zip(property_names, mean, stddev)}\n return stats\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def game_core_v3(number):
"""Сначала устанавливаем любое random число, а потом уменьшаем или увеличиваем его в зависимости от того, больше оно или меньше нужного.
Функция принимает загаданное число и возвращает число попыток"""
count = 1
allAnsvers = [x for x in range(1, 101)]
a = int(len(allAnsvers) / 2) - 1
predict = allAnsvers[a]
tempList = allAnsvers
while number != predict:
count += 1
if predict > number:
tempList = tempList[0:a]
a = int(len(tempList) / 2) - 1
elif predict < number:
tempList = tempList[a:]
a = int(len(tempList) / 2)
predict = tempList[a]
return count
def score_game(game_core):
"""Запускаем игру 1000 раз, чтобы узнать, как быстро игра угадывает число"""
count_ls = []
np.random.seed(1)
random_array = np.random.randint(1, 101, size=1000)
for number in random_array:
count_ls.append(game_core(number))
score = int(np.mean(count_ls))
print(f'Ваш алгоритм угадывает число в среднем за {score} попыток')
return score
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Загадано число от 1 до 100')
def game_core_v3(number):
"""Сначала устанавливаем любое random число, а потом уменьшаем или увеличиваем его в зависимости от того, больше оно или меньше нужного.
Функция принимает загаданное число и возвращает число попыток"""
count = 1
allAnsvers = [x for x in range(1, 101)]
a = int(len(allAnsvers) / 2) - 1
predict = allAnsvers[a]
tempList = allAnsvers
while number != predict:
count += 1
if predict > number:
tempList = tempList[0:a]
a = int(len(tempList) / 2) - 1
elif predict < number:
tempList = tempList[a:]
a = int(len(tempList) / 2)
predict = tempList[a]
return count
def score_game(game_core):
"""Запускаем игру 1000 раз, чтобы узнать, как быстро игра угадывает число"""
count_ls = []
np.random.seed(1)
random_array = np.random.randint(1, 101, size=1000)
for number in random_array:
count_ls.append(game_core(number))
score = int(np.mean(count_ls))
print(f'Ваш алгоритм угадывает число в среднем за {score} попыток')
return score
score_game(game_core_v3)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
count = 0
number = np.random.randint(1, 101)
print('Загадано число от 1 до 100')
def game_core_v3(number):
"""Сначала устанавливаем любое random число, а потом уменьшаем или увеличиваем его в зависимости от того, больше оно или меньше нужного.
Функция принимает загаданное число и возвращает число попыток"""
count = 1
allAnsvers = [x for x in range(1, 101)]
a = int(len(allAnsvers) / 2) - 1
predict = allAnsvers[a]
tempList = allAnsvers
while number != predict:
count += 1
if predict > number:
tempList = tempList[0:a]
a = int(len(tempList) / 2) - 1
elif predict < number:
tempList = tempList[a:]
a = int(len(tempList) / 2)
predict = tempList[a]
return count
def score_game(game_core):
"""Запускаем игру 1000 раз, чтобы узнать, как быстро игра угадывает число"""
count_ls = []
np.random.seed(1)
random_array = np.random.randint(1, 101, size=1000)
for number in random_array:
count_ls.append(game_core(number))
score = int(np.mean(count_ls))
print(f'Ваш алгоритм угадывает число в среднем за {score} попыток')
return score
score_game(game_core_v3)
<|reserved_special_token_1|>
import numpy as np
count = 0
number = np.random.randint(1, 101)
print('Загадано число от 1 до 100')
def game_core_v3(number):
"""Сначала устанавливаем любое random число, а потом уменьшаем или увеличиваем его в зависимости от того, больше оно или меньше нужного.
Функция принимает загаданное число и возвращает число попыток"""
count = 1
allAnsvers = [x for x in range(1, 101)]
a = int(len(allAnsvers) / 2) - 1
predict = allAnsvers[a]
tempList = allAnsvers
while number != predict:
count += 1
if predict > number:
tempList = tempList[0:a]
a = int(len(tempList) / 2) - 1
elif predict < number:
tempList = tempList[a:]
a = int(len(tempList) / 2)
predict = tempList[a]
return count
def score_game(game_core):
"""Запускаем игру 1000 раз, чтобы узнать, как быстро игра угадывает число"""
count_ls = []
np.random.seed(1)
random_array = np.random.randint(1, 101, size=1000)
for number in random_array:
count_ls.append(game_core(number))
score = int(np.mean(count_ls))
print(f'Ваш алгоритм угадывает число в среднем за {score} попыток')
return score
score_game(game_core_v3)
<|reserved_special_token_1|>
import numpy as np
count = 0 # счетчик попыток
number = np.random.randint(1, 101) # загадали число
print("Загадано число от 1 до 100")
def game_core_v3(number):
'''Сначала устанавливаем любое random число, а потом уменьшаем или увеличиваем его в зависимости от того, больше оно или меньше нужного.
Функция принимает загаданное число и возвращает число попыток'''
count = 1
allAnsvers = [x for x in range(1, 101)]
a = int(len(allAnsvers) / 2) - 1
predict = allAnsvers[a]
tempList = allAnsvers
while number != predict:
count += 1
if predict > number:
tempList = tempList[0: a]
a = int(len(tempList) / 2) - 1
elif predict < number:
tempList = tempList[a:]
a = int(len(tempList) / 2)
predict = tempList[a]
return(count) # выход из цикла, если угадали
def score_game(game_core):
'''Запускаем игру 1000 раз, чтобы узнать, как быстро игра угадывает число'''
count_ls = []
np.random.seed(1) # фиксируем RANDOM SEED, чтобы ваш эксперимент был воспроизводим!
random_array = np.random.randint(1, 101, size=(1000))
for number in random_array:
count_ls.append(game_core(number))
score = int(np.mean(count_ls))
print(f"Ваш алгоритм угадывает число в среднем за {score} попыток")
return (score)
# запускаем
score_game(game_core_v3)
|
flexible
|
{
"blob_id": "66474b8cdca9a4aa48b8dc710d161a3a16495aed",
"index": 6438,
"step-1": "<mask token>\n\n\ndef game_core_v3(number):\n \"\"\"Сначала устанавливаем любое random число, а потом уменьшаем или увеличиваем его в зависимости от того, больше оно или меньше нужного.\n Функция принимает загаданное число и возвращает число попыток\"\"\"\n count = 1\n allAnsvers = [x for x in range(1, 101)]\n a = int(len(allAnsvers) / 2) - 1\n predict = allAnsvers[a]\n tempList = allAnsvers\n while number != predict:\n count += 1\n if predict > number:\n tempList = tempList[0:a]\n a = int(len(tempList) / 2) - 1\n elif predict < number:\n tempList = tempList[a:]\n a = int(len(tempList) / 2)\n predict = tempList[a]\n return count\n\n\ndef score_game(game_core):\n \"\"\"Запускаем игру 1000 раз, чтобы узнать, как быстро игра угадывает число\"\"\"\n count_ls = []\n np.random.seed(1)\n random_array = np.random.randint(1, 101, size=1000)\n for number in random_array:\n count_ls.append(game_core(number))\n score = int(np.mean(count_ls))\n print(f'Ваш алгоритм угадывает число в среднем за {score} попыток')\n return score\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint('Загадано число от 1 до 100')\n\n\ndef game_core_v3(number):\n \"\"\"Сначала устанавливаем любое random число, а потом уменьшаем или увеличиваем его в зависимости от того, больше оно или меньше нужного.\n Функция принимает загаданное число и возвращает число попыток\"\"\"\n count = 1\n allAnsvers = [x for x in range(1, 101)]\n a = int(len(allAnsvers) / 2) - 1\n predict = allAnsvers[a]\n tempList = allAnsvers\n while number != predict:\n count += 1\n if predict > number:\n tempList = tempList[0:a]\n a = int(len(tempList) / 2) - 1\n elif predict < number:\n tempList = tempList[a:]\n a = int(len(tempList) / 2)\n predict = tempList[a]\n return count\n\n\ndef score_game(game_core):\n \"\"\"Запускаем игру 1000 раз, чтобы узнать, как быстро игра угадывает число\"\"\"\n count_ls = []\n np.random.seed(1)\n random_array = np.random.randint(1, 101, size=1000)\n for number in random_array:\n count_ls.append(game_core(number))\n score = int(np.mean(count_ls))\n print(f'Ваш алгоритм угадывает число в среднем за {score} попыток')\n return score\n\n\nscore_game(game_core_v3)\n",
"step-3": "<mask token>\ncount = 0\nnumber = np.random.randint(1, 101)\nprint('Загадано число от 1 до 100')\n\n\ndef game_core_v3(number):\n \"\"\"Сначала устанавливаем любое random число, а потом уменьшаем или увеличиваем его в зависимости от того, больше оно или меньше нужного.\n Функция принимает загаданное число и возвращает число попыток\"\"\"\n count = 1\n allAnsvers = [x for x in range(1, 101)]\n a = int(len(allAnsvers) / 2) - 1\n predict = allAnsvers[a]\n tempList = allAnsvers\n while number != predict:\n count += 1\n if predict > number:\n tempList = tempList[0:a]\n a = int(len(tempList) / 2) - 1\n elif predict < number:\n tempList = tempList[a:]\n a = int(len(tempList) / 2)\n predict = tempList[a]\n return count\n\n\ndef score_game(game_core):\n \"\"\"Запускаем игру 1000 раз, чтобы узнать, как быстро игра угадывает число\"\"\"\n count_ls = []\n np.random.seed(1)\n random_array = np.random.randint(1, 101, size=1000)\n for number in random_array:\n count_ls.append(game_core(number))\n score = int(np.mean(count_ls))\n print(f'Ваш алгоритм угадывает число в среднем за {score} попыток')\n return score\n\n\nscore_game(game_core_v3)\n",
"step-4": "import numpy as np\ncount = 0\nnumber = np.random.randint(1, 101)\nprint('Загадано число от 1 до 100')\n\n\ndef game_core_v3(number):\n \"\"\"Сначала устанавливаем любое random число, а потом уменьшаем или увеличиваем его в зависимости от того, больше оно или меньше нужного.\n Функция принимает загаданное число и возвращает число попыток\"\"\"\n count = 1\n allAnsvers = [x for x in range(1, 101)]\n a = int(len(allAnsvers) / 2) - 1\n predict = allAnsvers[a]\n tempList = allAnsvers\n while number != predict:\n count += 1\n if predict > number:\n tempList = tempList[0:a]\n a = int(len(tempList) / 2) - 1\n elif predict < number:\n tempList = tempList[a:]\n a = int(len(tempList) / 2)\n predict = tempList[a]\n return count\n\n\ndef score_game(game_core):\n \"\"\"Запускаем игру 1000 раз, чтобы узнать, как быстро игра угадывает число\"\"\"\n count_ls = []\n np.random.seed(1)\n random_array = np.random.randint(1, 101, size=1000)\n for number in random_array:\n count_ls.append(game_core(number))\n score = int(np.mean(count_ls))\n print(f'Ваш алгоритм угадывает число в среднем за {score} попыток')\n return score\n\n\nscore_game(game_core_v3)\n",
"step-5": "import numpy as np\n\ncount = 0 # счетчик попыток\nnumber = np.random.randint(1, 101) # загадали число\nprint(\"Загадано число от 1 до 100\")\n\ndef game_core_v3(number):\n '''Сначала устанавливаем любое random число, а потом уменьшаем или увеличиваем его в зависимости от того, больше оно или меньше нужного.\n Функция принимает загаданное число и возвращает число попыток'''\n count = 1\n allAnsvers = [x for x in range(1, 101)]\n a = int(len(allAnsvers) / 2) - 1\n predict = allAnsvers[a]\n tempList = allAnsvers\n while number != predict:\n count += 1\n if predict > number:\n tempList = tempList[0: a]\n a = int(len(tempList) / 2) - 1\n elif predict < number:\n tempList = tempList[a:]\n a = int(len(tempList) / 2)\n predict = tempList[a]\n return(count) # выход из цикла, если угадали\n\ndef score_game(game_core):\n '''Запускаем игру 1000 раз, чтобы узнать, как быстро игра угадывает число'''\n count_ls = []\n np.random.seed(1) # фиксируем RANDOM SEED, чтобы ваш эксперимент был воспроизводим!\n random_array = np.random.randint(1, 101, size=(1000))\n for number in random_array:\n count_ls.append(game_core(number))\n score = int(np.mean(count_ls))\n print(f\"Ваш алгоритм угадывает число в среднем за {score} попыток\")\n return (score)\n\n\n# запускаем\nscore_game(game_core_v3)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import csv
import io
import pickle
import os
import pip
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.http import MediaIoBaseDownload
import cv2
import numpy as np
SCOPES = ['https://www.googleapis.com/auth/drive.metadata',
'https://www.googleapis.com/auth/drive.file',
'https://www.googleapis.com/auth/drive']
def install(package):
if hasattr(pip, 'main'):
pip.main(['install', package])
else:
pip._internal.main(['install', package])
def create_folder(service):
file_metadata = {
'name': 'Test Techm',
'mimeType': 'application/vnd.google-apps.folder'
}
file = service.files().create(body=file_metadata,
fields='id').execute()
print('Folder ID: %s' % file.get('id'))
def get_gdrive_service():
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
# return Google Drive API service
return build('drive', 'v3', credentials=creds)
def downloadFile(id, name):
service = get_gdrive_service()
request = service.files().get_media(fileId=id)
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
with io.open("." + "/" + name, 'wb') as f:
fh.seek(0)
f.write(fh.read())
def is_duplicate(img1,img2):
response=False
image1 = cv2.imread(img1)
image2 = cv2.imread(img2)
try:
difference = cv2.subtract(image1, image2)
result = not np.any(difference) #if difference is all zeros it will return False
if result is True:
response=True
#duplicate_image.append(list[i])
#print("{} images is matching with {} Occurred {} times ".format(img1,img1,list.count(img1)))
except:
i=0
return response
def check_duplicate_image_new(items):
print("Images is loading to memory..")
#"""given items returned by Google Drive API, prints them in a tabular way"""
map= {}
list=[]
message= set()
duplicate_image=[]
final_result={}
if not items:
print('No files found.')
else:
for item in items:
if item["mimeType"] == "image/jpeg":
list.append(item["name"])
#Creating Map
value=[]
value.append(item["name"])
value.append(item["webViewLink"])
if item["name"] in map:
val=set()
val.add(item["webViewLink"])
map[item["name"]]=item["webViewLink"]
else:
map[item["name"]]=item["webViewLink"]
#Dowloading Image
downloadFile(item["id"],item["name"])
match=[]
flag=False
for i in range(len(list)-1):
temp=[]
dp_count=0
flag=False
if list[i] not in match :
flag=True
for j in range(i+1,len(list)):
istrue=is_duplicate(list[i],list[j])
if istrue==True:
dp_count=dp_count+1
temp.append(list[j])
if list[j] not in match:
match.append(list[j])
if list[i] not in match:
match.append(list[i])
if len(match)==0:
match.append(list[i])
match.append(list[j])
if flag==True and dp_count !=0:
#print(list[i]," - ",dp_count)
final_result[list[i]]=temp
m={}
tdct=0
for x, y in final_result.items():
res=y
tdct=tdct+len(res)
s=set()
for i in res:
#s=set()
for item in items:
if item["mimeType"] == "image/jpeg":
if item["name"]==i:
s.add(item["webViewLink"])
m[x]=s
return m,tdct
def duplicate_image_list(imagelist):
#print(len(imagelist))
dup_list = []
if len(imagelist) >= 1:
for i in range(len(imagelist) - 1):
count=0
l=[]
for j in range(i + 1, len(imagelist)):
image1 = cv2.imread(imagelist[i])
image2 = cv2.imread(imagelist[j])
try:
difference = cv2.subtract(image1, image2)
result = not np.any(difference) # if difference is all zeros it will return False
if result is True:
#print(imagelist[i],"Matching with ",imagelist[j])
l.append(imagelist[j])
count=count+1
dup_list.append(imagelist[i])
except:
i = 0
return dup_list
csv_map = {}
def check_duplicate_image(items):
# """given items returned by Google Drive API, prints them in a tabular way"""
map = {}
image_name_list = []
duplicate_image = []
for item in items:
file_type = item["mimeType"]
if file_type == "image/jpeg":
image_name_list.append(item["name"])
#append url or
# Creating Map
value = []
value.append(item["name"])
value.append(item["webViewLink"])
map[item["id"]] = value
csv_map[item["name"]] = item["webViewLink"]
# Dowloading Image
downloadFile(item["id"], item["name"])
duplicate_image = duplicate_image_list(image_name_list)
return duplicate_image
def renameFile(service,items, newName):
count=1
for item in items:
id = item["id"]
name = item["name"]
mime_type = item["mimeType"]
file = service.files().get(fileId=id).execute()
del file['id']
if "jpeg" in mime_type:
file['name'] = newName+str(count)+ ".jpg";
if "png" in mime_type:
file['name'] = newName+str(count)+ ".png";
updated_file = service.files().update(fileId=id, body=file).execute()
count=count+1
def count_image(id):
imageList = []
service = get_gdrive_service()
results = service.files().list(pageSize=1000, q="'{}' in parents".format(id)).execute()
items = results.get('files', [])
for item in items:
mime_Type = item["mimeType"]
if mime_Type == "image/jpeg":
imageList.append(item["name"])
if mime_Type == "application/vnd.google-apps.folder":
imageList.extend(count_image(item["id"]))
return imageList
def list_files(items, service):
folder_count = 0
image_count = 0
imglist = []
count = 0
testtechm_id = ''
nm_name = []
img_count = []
list_all_folder_name=[]
rows = []
overview_map = {}
img_nm=0
for item in items:
name = item["name"]
mime_type = item["mimeType"]
if name == 'Test Techm':
testtechm_id = item['parents'][0]
for item in items:
id = item["id"]
name = item["name"]
mime_type = item["mimeType"]
if mime_type == "application/vnd.google-apps.folder":
folder_count = folder_count + 1
if mime_type == "image/jpeg":
# renameFile(item["id"],"rajj_img"+str(image_count))
image_count = image_count + 1
if mime_type == "application/vnd.google-apps.folder" and item["parents"][0] == testtechm_id:
list_all_folder_name.append(item["name"])
name1 = count_image(id)
nm_name.append(name1)
img_count.append(len(name1))
overview_map[item["name"]] = name1
rows.append((id, name, mime_type, folder_count))
imglist.append(count)
rows.append((id, name, mime_type, folder_count))
#duplicate_count = len(check_duplicate_image(items))
lt,duplicate_ct=check_duplicate_image_new(items)
duplicateImagehtml(folder_count, image_count, duplicate_ct,items)
# overview chart report page
draw_chart_create_report(list_all_folder_name, image_count, duplicate_ct, overview_map,folder_count)
def createDeviceCSV():
fileName = 'DuplicateImage.csv'
with open(fileName, 'w') as csvFile:
writer = csv.writer(csvFile)
row = ["Image Name", 'Image Url']
writer.writerow(row)
count = 0
for k, v in csv_map.items():
row = [k, v]
writer.writerow(row)
count = count + 1
#print("Device's adding into csv: " + str(count))
csvFile.close()
#print('Device CSV File creation is Done file name is ', fileName)
def duplicateImagehtml(folder_count, image_count, duplicate_ct,items):
uri = []
map1,count=check_duplicate_image_new(items)
for k, v in map1.items():
name_url = []
name_url.append(k)
name_url.append(str(len(v)))
name_url.append(str(v))
uri.append(name_url)
fb = open('duplicateData.html', 'w')
message = """ <html> <head>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
google.charts.load('current', {'packages':['table']});
google.charts.setOnLoadCallback(drawTable);
function drawTable() {
var data3 = new google.visualization.DataTable();
data3.addColumn('string', 'Name');
data3.addColumn('string', 'Count');
data3.addRows([
['Total Folders', '""" + str(folder_count) + """'],
['Total Images', '""" + str(image_count) + """'],
['Duplicate Images', '""" + str(duplicate_ct) + """']]);
var table2 = new google.visualization.Table(document.getElementById('table_div_base'));
table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'});
var data = new google.visualization.DataTable();
data.addColumn('string', 'Image Name');
data.addColumn('string', 'Image Count');
data.addColumn('string', 'Image Url');
data.addRows(""" + str(uri) + """);
var table = new google.visualization.Table(document.getElementById('table_div'));
table.draw(data, {showRowNumber: true, width: '100%', height: '100%'});
}
</script>
</head>
<body><h2 style="text-align: center">Google Drive Summary Table</h2>
<div id="table_div_base" style="width: 100%; height: 200px; display:inline-block;border-style: solid"></div>
<h2 style="text-align: center" >List of Duplicate Image</h2>
<div id="table_div" style="width: 100%; height: 500px; display:inline-block;border-style: solid"></div>
</body></html>"""
fb.write(message)
fb.close()
print("Duplicate image data preparing.. ")
# webbrowser.open_new_tab('helloworld.html')
def draw_chart_create_report(folder_count, image_count, duplicate_ct, map,folder_count_real):
#folder_count=len(folder_count)
fb = open('gDriveOverview.html', 'w')
values = list(map.values())
newlist = []
folder_name = list(map.keys())
total_image_count = []
duplicate_image_count_in_folder = []
for v in values:
newlist.append(duplicate_image_list(v))
total_image_count.append(len(v))
for n in newlist:
duplicate_image_count_in_folder.append(len(n))
# create plot
#print(total_image_count, duplicate_image_count_in_folder, map.keys())
m1 = """<html>
<head>
<h1 style ="color:black;text-align: center;font-size:25px;margin-left:-6px;margin-bottom:25px;width:1300px;float:left;">Google Drive Data Overview</h1>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
google.charts.load('current', {'packages':['bar','corechart','table']});
google.charts.setOnLoadCallback(drawChart);
function drawChart() {
var paiData = google.visualization.arrayToDataTable([
['Drive', 'Drive Data'],
['Total Images', """ + str(image_count) + """],
['Total duplicate Images', """ + str(duplicate_ct) + """],
['Total Folder', """ + str(folder_count_real) + """]
]);
var paiOptions = {
title: 'Google Drive Overview'
};
var chart = new google.visualization.PieChart(document.getElementById('piechart'));
chart.draw(paiData, paiOptions);
var barData = google.visualization.arrayToDataTable("""
fb.write(m1)
barchart_data = []
barchart_data.append(['Folders', 'Total no of Images', 'Total no of duplicate Images'])
for i in range(len(values)):
item_list = []
item_list.append(folder_count[i])
item_list.append(total_image_count[i])
item_list.append(duplicate_image_count_in_folder[i])
barchart_data.append(item_list)
fb.write(m1)
m3 = str(barchart_data) + """);
var barOptions = {
chart: { title: 'Google Drive Folderwise Overview',
subtitle: 'This report is created on '+new Date(),
}};
var chart = new google.charts.Bar(document.getElementById('bar_chart'));
chart.draw(barData, google.charts.Bar.convertOptions(barOptions));
}
</script>
</head>
<body>
<div style="width:100%; margin:0px auto;">
<div id="piechart" style="width: 900px; height: 500px; display:inline-block;"></div>
<div id="bar_chart" style="width: 900px; height: 500px; display:inline-block;"></div>
</div>
<div>
<h2>
<p style="float:right;color:red;">** <a href="duplicateData.html" target="_blank">Click here to know more about duplicate image data</a></p>
</h2></div></body></html>
"""
fb.write(m3)
fb.close()
print("Bar and Pie chart creating.... ")
def main():
service = get_gdrive_service()
print("Wait a moment script is running ..!!!")
results = service.files().list(pageSize=1000,
fields="nextPageToken,files(id, name,mimeType,parents,webViewLink)").execute()
items = results.get('files', [])
if not items:
# empty drive
print('No files found.')
else:
# create_folder(service)
print("-----_")
name="g_image_"
renameFile(service,items,name)
print("==============================")
#check_duplicate_image(items)
# createDeviceCSV()
list_files(items, service)
if __name__ == '__main__':
main()
print("Script is done ..!!!")
|
normal
|
{
"blob_id": "f32b9dc36b2452fea8c8f284fbf800f22608c3ae",
"index": 8541,
"step-1": "<mask token>\n\n\ndef install(package):\n if hasattr(pip, 'main'):\n pip.main(['install', package])\n else:\n pip._internal.main(['install', package])\n\n\n<mask token>\n\n\ndef get_gdrive_service():\n creds = None\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json'\n , SCOPES)\n creds = flow.run_local_server(port=0)\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n return build('drive', 'v3', credentials=creds)\n\n\ndef downloadFile(id, name):\n service = get_gdrive_service()\n request = service.files().get_media(fileId=id)\n fh = io.BytesIO()\n downloader = MediaIoBaseDownload(fh, request)\n done = False\n while done is False:\n status, done = downloader.next_chunk()\n with io.open('.' + '/' + name, 'wb') as f:\n fh.seek(0)\n f.write(fh.read())\n\n\n<mask token>\n\n\ndef check_duplicate_image(items):\n map = {}\n image_name_list = []\n duplicate_image = []\n for item in items:\n file_type = item['mimeType']\n if file_type == 'image/jpeg':\n image_name_list.append(item['name'])\n value = []\n value.append(item['name'])\n value.append(item['webViewLink'])\n map[item['id']] = value\n csv_map[item['name']] = item['webViewLink']\n downloadFile(item['id'], item['name'])\n duplicate_image = duplicate_image_list(image_name_list)\n return duplicate_image\n\n\ndef renameFile(service, items, newName):\n count = 1\n for item in items:\n id = item['id']\n name = item['name']\n mime_type = item['mimeType']\n file = service.files().get(fileId=id).execute()\n del file['id']\n if 'jpeg' in mime_type:\n file['name'] = newName + str(count) + '.jpg'\n if 'png' in mime_type:\n file['name'] = newName + str(count) + '.png'\n updated_file = service.files().update(fileId=id, body=file).execute()\n count = count + 1\n\n\n<mask token>\n\n\ndef list_files(items, service):\n folder_count = 0\n image_count = 0\n imglist = []\n count = 0\n testtechm_id = ''\n nm_name = []\n img_count = []\n list_all_folder_name = []\n rows = []\n overview_map = {}\n img_nm = 0\n for item in items:\n name = item['name']\n mime_type = item['mimeType']\n if name == 'Test Techm':\n testtechm_id = item['parents'][0]\n for item in items:\n id = item['id']\n name = item['name']\n mime_type = item['mimeType']\n if mime_type == 'application/vnd.google-apps.folder':\n folder_count = folder_count + 1\n if mime_type == 'image/jpeg':\n image_count = image_count + 1\n if mime_type == 'application/vnd.google-apps.folder' and item['parents'\n ][0] == testtechm_id:\n list_all_folder_name.append(item['name'])\n name1 = count_image(id)\n nm_name.append(name1)\n img_count.append(len(name1))\n overview_map[item['name']] = name1\n rows.append((id, name, mime_type, folder_count))\n imglist.append(count)\n rows.append((id, name, mime_type, folder_count))\n lt, duplicate_ct = check_duplicate_image_new(items)\n duplicateImagehtml(folder_count, image_count, duplicate_ct, items)\n draw_chart_create_report(list_all_folder_name, image_count,\n duplicate_ct, overview_map, folder_count)\n\n\ndef createDeviceCSV():\n fileName = 'DuplicateImage.csv'\n with open(fileName, 'w') as csvFile:\n writer = csv.writer(csvFile)\n row = ['Image Name', 'Image Url']\n writer.writerow(row)\n count = 0\n for k, v in csv_map.items():\n row = [k, v]\n writer.writerow(row)\n count = count + 1\n csvFile.close()\n\n\ndef duplicateImagehtml(folder_count, image_count, duplicate_ct, items):\n uri = []\n map1, count = check_duplicate_image_new(items)\n for k, v in map1.items():\n name_url = []\n name_url.append(k)\n name_url.append(str(len(v)))\n name_url.append(str(v))\n uri.append(name_url)\n fb = open('duplicateData.html', 'w')\n message = \"\"\" <html> <head>\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\n <script type=\"text/javascript\">\n google.charts.load('current', {'packages':['table']});\n google.charts.setOnLoadCallback(drawTable);\n function drawTable() {\n var data3 = new google.visualization.DataTable();\n data3.addColumn('string', 'Name');\n data3.addColumn('string', 'Count');\n data3.addRows([\n ['Total Folders', '\"\"\" + str(folder_count) + \"\"\"'],\n ['Total Images', '\"\"\" + str(image_count) + \"\"\"'],\n ['Duplicate Images', '\"\"\" + str(duplicate_ct) + \"\"\"']]);\n\n var table2 = new google.visualization.Table(document.getElementById('table_div_base'));\n\n table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'});\n var data = new google.visualization.DataTable();\n data.addColumn('string', 'Image Name');\n data.addColumn('string', 'Image Count');\n data.addColumn('string', 'Image Url');\n data.addRows(\"\"\" + str(uri) + \"\"\");\n var table = new google.visualization.Table(document.getElementById('table_div'));\n table.draw(data, {showRowNumber: true, width: '100%', height: '100%'});\n }\n </script>\n </head>\n <body><h2 style=\"text-align: center\">Google Drive Summary Table</h2>\n <div id=\"table_div_base\" style=\"width: 100%; height: 200px; display:inline-block;border-style: solid\"></div>\n <h2 style=\"text-align: center\" >List of Duplicate Image</h2>\n <div id=\"table_div\" style=\"width: 100%; height: 500px; display:inline-block;border-style: solid\"></div>\n </body></html>\"\"\"\n fb.write(message)\n fb.close()\n print('Duplicate image data preparing.. ')\n\n\ndef draw_chart_create_report(folder_count, image_count, duplicate_ct, map,\n folder_count_real):\n fb = open('gDriveOverview.html', 'w')\n values = list(map.values())\n newlist = []\n folder_name = list(map.keys())\n total_image_count = []\n duplicate_image_count_in_folder = []\n for v in values:\n newlist.append(duplicate_image_list(v))\n total_image_count.append(len(v))\n for n in newlist:\n duplicate_image_count_in_folder.append(len(n))\n m1 = \"\"\"<html>\n <head>\n <h1 style =\"color:black;text-align: center;font-size:25px;margin-left:-6px;margin-bottom:25px;width:1300px;float:left;\">Google Drive Data Overview</h1>\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\n <script type=\"text/javascript\">\n google.charts.load('current', {'packages':['bar','corechart','table']});\n google.charts.setOnLoadCallback(drawChart);\n function drawChart() {\n var paiData = google.visualization.arrayToDataTable([\n ['Drive', 'Drive Data'],\n ['Total Images', \"\"\" + str(image_count) + \"\"\"],\n ['Total duplicate Images', \"\"\" + str(duplicate_ct) + \"\"\"],\n ['Total Folder', \"\"\" + str(folder_count_real) + \"\"\"]\n ]);\n var paiOptions = {\n title: 'Google Drive Overview'\n };\n var chart = new google.visualization.PieChart(document.getElementById('piechart'));\n chart.draw(paiData, paiOptions);\n var barData = google.visualization.arrayToDataTable(\"\"\"\n fb.write(m1)\n barchart_data = []\n barchart_data.append(['Folders', 'Total no of Images',\n 'Total no of duplicate Images'])\n for i in range(len(values)):\n item_list = []\n item_list.append(folder_count[i])\n item_list.append(total_image_count[i])\n item_list.append(duplicate_image_count_in_folder[i])\n barchart_data.append(item_list)\n fb.write(m1)\n m3 = str(barchart_data) + \"\"\");\n \n var barOptions = {\n chart: { title: 'Google Drive Folderwise Overview',\n subtitle: 'This report is created on '+new Date(),\n }};\n \n var chart = new google.charts.Bar(document.getElementById('bar_chart'));\n \n chart.draw(barData, google.charts.Bar.convertOptions(barOptions));\n }\n </script>\n </head>\n <body>\n <div style=\"width:100%; margin:0px auto;\">\n <div id=\"piechart\" style=\"width: 900px; height: 500px; display:inline-block;\"></div>\n <div id=\"bar_chart\" style=\"width: 900px; height: 500px; display:inline-block;\"></div>\n </div>\n <div>\n <h2>\n <p style=\"float:right;color:red;\">** <a href=\"duplicateData.html\" target=\"_blank\">Click here to know more about duplicate image data</a></p>\n </h2></div></body></html>\n \"\"\"\n fb.write(m3)\n fb.close()\n print('Bar and Pie chart creating.... ')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef install(package):\n if hasattr(pip, 'main'):\n pip.main(['install', package])\n else:\n pip._internal.main(['install', package])\n\n\n<mask token>\n\n\ndef get_gdrive_service():\n creds = None\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json'\n , SCOPES)\n creds = flow.run_local_server(port=0)\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n return build('drive', 'v3', credentials=creds)\n\n\ndef downloadFile(id, name):\n service = get_gdrive_service()\n request = service.files().get_media(fileId=id)\n fh = io.BytesIO()\n downloader = MediaIoBaseDownload(fh, request)\n done = False\n while done is False:\n status, done = downloader.next_chunk()\n with io.open('.' + '/' + name, 'wb') as f:\n fh.seek(0)\n f.write(fh.read())\n\n\n<mask token>\n\n\ndef check_duplicate_image_new(items):\n print('Images is loading to memory..')\n map = {}\n list = []\n message = set()\n duplicate_image = []\n final_result = {}\n if not items:\n print('No files found.')\n else:\n for item in items:\n if item['mimeType'] == 'image/jpeg':\n list.append(item['name'])\n value = []\n value.append(item['name'])\n value.append(item['webViewLink'])\n if item['name'] in map:\n val = set()\n val.add(item['webViewLink'])\n map[item['name']] = item['webViewLink']\n else:\n map[item['name']] = item['webViewLink']\n downloadFile(item['id'], item['name'])\n match = []\n flag = False\n for i in range(len(list) - 1):\n temp = []\n dp_count = 0\n flag = False\n if list[i] not in match:\n flag = True\n for j in range(i + 1, len(list)):\n istrue = is_duplicate(list[i], list[j])\n if istrue == True:\n dp_count = dp_count + 1\n temp.append(list[j])\n if list[j] not in match:\n match.append(list[j])\n if list[i] not in match:\n match.append(list[i])\n if len(match) == 0:\n match.append(list[i])\n match.append(list[j])\n if flag == True and dp_count != 0:\n final_result[list[i]] = temp\n m = {}\n tdct = 0\n for x, y in final_result.items():\n res = y\n tdct = tdct + len(res)\n s = set()\n for i in res:\n for item in items:\n if item['mimeType'] == 'image/jpeg':\n if item['name'] == i:\n s.add(item['webViewLink'])\n m[x] = s\n return m, tdct\n\n\ndef duplicate_image_list(imagelist):\n dup_list = []\n if len(imagelist) >= 1:\n for i in range(len(imagelist) - 1):\n count = 0\n l = []\n for j in range(i + 1, len(imagelist)):\n image1 = cv2.imread(imagelist[i])\n image2 = cv2.imread(imagelist[j])\n try:\n difference = cv2.subtract(image1, image2)\n result = not np.any(difference)\n if result is True:\n l.append(imagelist[j])\n count = count + 1\n dup_list.append(imagelist[i])\n except:\n i = 0\n return dup_list\n\n\n<mask token>\n\n\ndef check_duplicate_image(items):\n map = {}\n image_name_list = []\n duplicate_image = []\n for item in items:\n file_type = item['mimeType']\n if file_type == 'image/jpeg':\n image_name_list.append(item['name'])\n value = []\n value.append(item['name'])\n value.append(item['webViewLink'])\n map[item['id']] = value\n csv_map[item['name']] = item['webViewLink']\n downloadFile(item['id'], item['name'])\n duplicate_image = duplicate_image_list(image_name_list)\n return duplicate_image\n\n\ndef renameFile(service, items, newName):\n count = 1\n for item in items:\n id = item['id']\n name = item['name']\n mime_type = item['mimeType']\n file = service.files().get(fileId=id).execute()\n del file['id']\n if 'jpeg' in mime_type:\n file['name'] = newName + str(count) + '.jpg'\n if 'png' in mime_type:\n file['name'] = newName + str(count) + '.png'\n updated_file = service.files().update(fileId=id, body=file).execute()\n count = count + 1\n\n\n<mask token>\n\n\ndef list_files(items, service):\n folder_count = 0\n image_count = 0\n imglist = []\n count = 0\n testtechm_id = ''\n nm_name = []\n img_count = []\n list_all_folder_name = []\n rows = []\n overview_map = {}\n img_nm = 0\n for item in items:\n name = item['name']\n mime_type = item['mimeType']\n if name == 'Test Techm':\n testtechm_id = item['parents'][0]\n for item in items:\n id = item['id']\n name = item['name']\n mime_type = item['mimeType']\n if mime_type == 'application/vnd.google-apps.folder':\n folder_count = folder_count + 1\n if mime_type == 'image/jpeg':\n image_count = image_count + 1\n if mime_type == 'application/vnd.google-apps.folder' and item['parents'\n ][0] == testtechm_id:\n list_all_folder_name.append(item['name'])\n name1 = count_image(id)\n nm_name.append(name1)\n img_count.append(len(name1))\n overview_map[item['name']] = name1\n rows.append((id, name, mime_type, folder_count))\n imglist.append(count)\n rows.append((id, name, mime_type, folder_count))\n lt, duplicate_ct = check_duplicate_image_new(items)\n duplicateImagehtml(folder_count, image_count, duplicate_ct, items)\n draw_chart_create_report(list_all_folder_name, image_count,\n duplicate_ct, overview_map, folder_count)\n\n\ndef createDeviceCSV():\n fileName = 'DuplicateImage.csv'\n with open(fileName, 'w') as csvFile:\n writer = csv.writer(csvFile)\n row = ['Image Name', 'Image Url']\n writer.writerow(row)\n count = 0\n for k, v in csv_map.items():\n row = [k, v]\n writer.writerow(row)\n count = count + 1\n csvFile.close()\n\n\ndef duplicateImagehtml(folder_count, image_count, duplicate_ct, items):\n uri = []\n map1, count = check_duplicate_image_new(items)\n for k, v in map1.items():\n name_url = []\n name_url.append(k)\n name_url.append(str(len(v)))\n name_url.append(str(v))\n uri.append(name_url)\n fb = open('duplicateData.html', 'w')\n message = \"\"\" <html> <head>\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\n <script type=\"text/javascript\">\n google.charts.load('current', {'packages':['table']});\n google.charts.setOnLoadCallback(drawTable);\n function drawTable() {\n var data3 = new google.visualization.DataTable();\n data3.addColumn('string', 'Name');\n data3.addColumn('string', 'Count');\n data3.addRows([\n ['Total Folders', '\"\"\" + str(folder_count) + \"\"\"'],\n ['Total Images', '\"\"\" + str(image_count) + \"\"\"'],\n ['Duplicate Images', '\"\"\" + str(duplicate_ct) + \"\"\"']]);\n\n var table2 = new google.visualization.Table(document.getElementById('table_div_base'));\n\n table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'});\n var data = new google.visualization.DataTable();\n data.addColumn('string', 'Image Name');\n data.addColumn('string', 'Image Count');\n data.addColumn('string', 'Image Url');\n data.addRows(\"\"\" + str(uri) + \"\"\");\n var table = new google.visualization.Table(document.getElementById('table_div'));\n table.draw(data, {showRowNumber: true, width: '100%', height: '100%'});\n }\n </script>\n </head>\n <body><h2 style=\"text-align: center\">Google Drive Summary Table</h2>\n <div id=\"table_div_base\" style=\"width: 100%; height: 200px; display:inline-block;border-style: solid\"></div>\n <h2 style=\"text-align: center\" >List of Duplicate Image</h2>\n <div id=\"table_div\" style=\"width: 100%; height: 500px; display:inline-block;border-style: solid\"></div>\n </body></html>\"\"\"\n fb.write(message)\n fb.close()\n print('Duplicate image data preparing.. ')\n\n\ndef draw_chart_create_report(folder_count, image_count, duplicate_ct, map,\n folder_count_real):\n fb = open('gDriveOverview.html', 'w')\n values = list(map.values())\n newlist = []\n folder_name = list(map.keys())\n total_image_count = []\n duplicate_image_count_in_folder = []\n for v in values:\n newlist.append(duplicate_image_list(v))\n total_image_count.append(len(v))\n for n in newlist:\n duplicate_image_count_in_folder.append(len(n))\n m1 = \"\"\"<html>\n <head>\n <h1 style =\"color:black;text-align: center;font-size:25px;margin-left:-6px;margin-bottom:25px;width:1300px;float:left;\">Google Drive Data Overview</h1>\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\n <script type=\"text/javascript\">\n google.charts.load('current', {'packages':['bar','corechart','table']});\n google.charts.setOnLoadCallback(drawChart);\n function drawChart() {\n var paiData = google.visualization.arrayToDataTable([\n ['Drive', 'Drive Data'],\n ['Total Images', \"\"\" + str(image_count) + \"\"\"],\n ['Total duplicate Images', \"\"\" + str(duplicate_ct) + \"\"\"],\n ['Total Folder', \"\"\" + str(folder_count_real) + \"\"\"]\n ]);\n var paiOptions = {\n title: 'Google Drive Overview'\n };\n var chart = new google.visualization.PieChart(document.getElementById('piechart'));\n chart.draw(paiData, paiOptions);\n var barData = google.visualization.arrayToDataTable(\"\"\"\n fb.write(m1)\n barchart_data = []\n barchart_data.append(['Folders', 'Total no of Images',\n 'Total no of duplicate Images'])\n for i in range(len(values)):\n item_list = []\n item_list.append(folder_count[i])\n item_list.append(total_image_count[i])\n item_list.append(duplicate_image_count_in_folder[i])\n barchart_data.append(item_list)\n fb.write(m1)\n m3 = str(barchart_data) + \"\"\");\n \n var barOptions = {\n chart: { title: 'Google Drive Folderwise Overview',\n subtitle: 'This report is created on '+new Date(),\n }};\n \n var chart = new google.charts.Bar(document.getElementById('bar_chart'));\n \n chart.draw(barData, google.charts.Bar.convertOptions(barOptions));\n }\n </script>\n </head>\n <body>\n <div style=\"width:100%; margin:0px auto;\">\n <div id=\"piechart\" style=\"width: 900px; height: 500px; display:inline-block;\"></div>\n <div id=\"bar_chart\" style=\"width: 900px; height: 500px; display:inline-block;\"></div>\n </div>\n <div>\n <h2>\n <p style=\"float:right;color:red;\">** <a href=\"duplicateData.html\" target=\"_blank\">Click here to know more about duplicate image data</a></p>\n </h2></div></body></html>\n \"\"\"\n fb.write(m3)\n fb.close()\n print('Bar and Pie chart creating.... ')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef install(package):\n if hasattr(pip, 'main'):\n pip.main(['install', package])\n else:\n pip._internal.main(['install', package])\n\n\n<mask token>\n\n\ndef get_gdrive_service():\n creds = None\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json'\n , SCOPES)\n creds = flow.run_local_server(port=0)\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n return build('drive', 'v3', credentials=creds)\n\n\ndef downloadFile(id, name):\n service = get_gdrive_service()\n request = service.files().get_media(fileId=id)\n fh = io.BytesIO()\n downloader = MediaIoBaseDownload(fh, request)\n done = False\n while done is False:\n status, done = downloader.next_chunk()\n with io.open('.' + '/' + name, 'wb') as f:\n fh.seek(0)\n f.write(fh.read())\n\n\n<mask token>\n\n\ndef check_duplicate_image_new(items):\n print('Images is loading to memory..')\n map = {}\n list = []\n message = set()\n duplicate_image = []\n final_result = {}\n if not items:\n print('No files found.')\n else:\n for item in items:\n if item['mimeType'] == 'image/jpeg':\n list.append(item['name'])\n value = []\n value.append(item['name'])\n value.append(item['webViewLink'])\n if item['name'] in map:\n val = set()\n val.add(item['webViewLink'])\n map[item['name']] = item['webViewLink']\n else:\n map[item['name']] = item['webViewLink']\n downloadFile(item['id'], item['name'])\n match = []\n flag = False\n for i in range(len(list) - 1):\n temp = []\n dp_count = 0\n flag = False\n if list[i] not in match:\n flag = True\n for j in range(i + 1, len(list)):\n istrue = is_duplicate(list[i], list[j])\n if istrue == True:\n dp_count = dp_count + 1\n temp.append(list[j])\n if list[j] not in match:\n match.append(list[j])\n if list[i] not in match:\n match.append(list[i])\n if len(match) == 0:\n match.append(list[i])\n match.append(list[j])\n if flag == True and dp_count != 0:\n final_result[list[i]] = temp\n m = {}\n tdct = 0\n for x, y in final_result.items():\n res = y\n tdct = tdct + len(res)\n s = set()\n for i in res:\n for item in items:\n if item['mimeType'] == 'image/jpeg':\n if item['name'] == i:\n s.add(item['webViewLink'])\n m[x] = s\n return m, tdct\n\n\ndef duplicate_image_list(imagelist):\n dup_list = []\n if len(imagelist) >= 1:\n for i in range(len(imagelist) - 1):\n count = 0\n l = []\n for j in range(i + 1, len(imagelist)):\n image1 = cv2.imread(imagelist[i])\n image2 = cv2.imread(imagelist[j])\n try:\n difference = cv2.subtract(image1, image2)\n result = not np.any(difference)\n if result is True:\n l.append(imagelist[j])\n count = count + 1\n dup_list.append(imagelist[i])\n except:\n i = 0\n return dup_list\n\n\n<mask token>\n\n\ndef check_duplicate_image(items):\n map = {}\n image_name_list = []\n duplicate_image = []\n for item in items:\n file_type = item['mimeType']\n if file_type == 'image/jpeg':\n image_name_list.append(item['name'])\n value = []\n value.append(item['name'])\n value.append(item['webViewLink'])\n map[item['id']] = value\n csv_map[item['name']] = item['webViewLink']\n downloadFile(item['id'], item['name'])\n duplicate_image = duplicate_image_list(image_name_list)\n return duplicate_image\n\n\ndef renameFile(service, items, newName):\n count = 1\n for item in items:\n id = item['id']\n name = item['name']\n mime_type = item['mimeType']\n file = service.files().get(fileId=id).execute()\n del file['id']\n if 'jpeg' in mime_type:\n file['name'] = newName + str(count) + '.jpg'\n if 'png' in mime_type:\n file['name'] = newName + str(count) + '.png'\n updated_file = service.files().update(fileId=id, body=file).execute()\n count = count + 1\n\n\ndef count_image(id):\n imageList = []\n service = get_gdrive_service()\n results = service.files().list(pageSize=1000, q=\"'{}' in parents\".\n format(id)).execute()\n items = results.get('files', [])\n for item in items:\n mime_Type = item['mimeType']\n if mime_Type == 'image/jpeg':\n imageList.append(item['name'])\n if mime_Type == 'application/vnd.google-apps.folder':\n imageList.extend(count_image(item['id']))\n return imageList\n\n\ndef list_files(items, service):\n folder_count = 0\n image_count = 0\n imglist = []\n count = 0\n testtechm_id = ''\n nm_name = []\n img_count = []\n list_all_folder_name = []\n rows = []\n overview_map = {}\n img_nm = 0\n for item in items:\n name = item['name']\n mime_type = item['mimeType']\n if name == 'Test Techm':\n testtechm_id = item['parents'][0]\n for item in items:\n id = item['id']\n name = item['name']\n mime_type = item['mimeType']\n if mime_type == 'application/vnd.google-apps.folder':\n folder_count = folder_count + 1\n if mime_type == 'image/jpeg':\n image_count = image_count + 1\n if mime_type == 'application/vnd.google-apps.folder' and item['parents'\n ][0] == testtechm_id:\n list_all_folder_name.append(item['name'])\n name1 = count_image(id)\n nm_name.append(name1)\n img_count.append(len(name1))\n overview_map[item['name']] = name1\n rows.append((id, name, mime_type, folder_count))\n imglist.append(count)\n rows.append((id, name, mime_type, folder_count))\n lt, duplicate_ct = check_duplicate_image_new(items)\n duplicateImagehtml(folder_count, image_count, duplicate_ct, items)\n draw_chart_create_report(list_all_folder_name, image_count,\n duplicate_ct, overview_map, folder_count)\n\n\ndef createDeviceCSV():\n fileName = 'DuplicateImage.csv'\n with open(fileName, 'w') as csvFile:\n writer = csv.writer(csvFile)\n row = ['Image Name', 'Image Url']\n writer.writerow(row)\n count = 0\n for k, v in csv_map.items():\n row = [k, v]\n writer.writerow(row)\n count = count + 1\n csvFile.close()\n\n\ndef duplicateImagehtml(folder_count, image_count, duplicate_ct, items):\n uri = []\n map1, count = check_duplicate_image_new(items)\n for k, v in map1.items():\n name_url = []\n name_url.append(k)\n name_url.append(str(len(v)))\n name_url.append(str(v))\n uri.append(name_url)\n fb = open('duplicateData.html', 'w')\n message = \"\"\" <html> <head>\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\n <script type=\"text/javascript\">\n google.charts.load('current', {'packages':['table']});\n google.charts.setOnLoadCallback(drawTable);\n function drawTable() {\n var data3 = new google.visualization.DataTable();\n data3.addColumn('string', 'Name');\n data3.addColumn('string', 'Count');\n data3.addRows([\n ['Total Folders', '\"\"\" + str(folder_count) + \"\"\"'],\n ['Total Images', '\"\"\" + str(image_count) + \"\"\"'],\n ['Duplicate Images', '\"\"\" + str(duplicate_ct) + \"\"\"']]);\n\n var table2 = new google.visualization.Table(document.getElementById('table_div_base'));\n\n table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'});\n var data = new google.visualization.DataTable();\n data.addColumn('string', 'Image Name');\n data.addColumn('string', 'Image Count');\n data.addColumn('string', 'Image Url');\n data.addRows(\"\"\" + str(uri) + \"\"\");\n var table = new google.visualization.Table(document.getElementById('table_div'));\n table.draw(data, {showRowNumber: true, width: '100%', height: '100%'});\n }\n </script>\n </head>\n <body><h2 style=\"text-align: center\">Google Drive Summary Table</h2>\n <div id=\"table_div_base\" style=\"width: 100%; height: 200px; display:inline-block;border-style: solid\"></div>\n <h2 style=\"text-align: center\" >List of Duplicate Image</h2>\n <div id=\"table_div\" style=\"width: 100%; height: 500px; display:inline-block;border-style: solid\"></div>\n </body></html>\"\"\"\n fb.write(message)\n fb.close()\n print('Duplicate image data preparing.. ')\n\n\ndef draw_chart_create_report(folder_count, image_count, duplicate_ct, map,\n folder_count_real):\n fb = open('gDriveOverview.html', 'w')\n values = list(map.values())\n newlist = []\n folder_name = list(map.keys())\n total_image_count = []\n duplicate_image_count_in_folder = []\n for v in values:\n newlist.append(duplicate_image_list(v))\n total_image_count.append(len(v))\n for n in newlist:\n duplicate_image_count_in_folder.append(len(n))\n m1 = \"\"\"<html>\n <head>\n <h1 style =\"color:black;text-align: center;font-size:25px;margin-left:-6px;margin-bottom:25px;width:1300px;float:left;\">Google Drive Data Overview</h1>\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\n <script type=\"text/javascript\">\n google.charts.load('current', {'packages':['bar','corechart','table']});\n google.charts.setOnLoadCallback(drawChart);\n function drawChart() {\n var paiData = google.visualization.arrayToDataTable([\n ['Drive', 'Drive Data'],\n ['Total Images', \"\"\" + str(image_count) + \"\"\"],\n ['Total duplicate Images', \"\"\" + str(duplicate_ct) + \"\"\"],\n ['Total Folder', \"\"\" + str(folder_count_real) + \"\"\"]\n ]);\n var paiOptions = {\n title: 'Google Drive Overview'\n };\n var chart = new google.visualization.PieChart(document.getElementById('piechart'));\n chart.draw(paiData, paiOptions);\n var barData = google.visualization.arrayToDataTable(\"\"\"\n fb.write(m1)\n barchart_data = []\n barchart_data.append(['Folders', 'Total no of Images',\n 'Total no of duplicate Images'])\n for i in range(len(values)):\n item_list = []\n item_list.append(folder_count[i])\n item_list.append(total_image_count[i])\n item_list.append(duplicate_image_count_in_folder[i])\n barchart_data.append(item_list)\n fb.write(m1)\n m3 = str(barchart_data) + \"\"\");\n \n var barOptions = {\n chart: { title: 'Google Drive Folderwise Overview',\n subtitle: 'This report is created on '+new Date(),\n }};\n \n var chart = new google.charts.Bar(document.getElementById('bar_chart'));\n \n chart.draw(barData, google.charts.Bar.convertOptions(barOptions));\n }\n </script>\n </head>\n <body>\n <div style=\"width:100%; margin:0px auto;\">\n <div id=\"piechart\" style=\"width: 900px; height: 500px; display:inline-block;\"></div>\n <div id=\"bar_chart\" style=\"width: 900px; height: 500px; display:inline-block;\"></div>\n </div>\n <div>\n <h2>\n <p style=\"float:right;color:red;\">** <a href=\"duplicateData.html\" target=\"_blank\">Click here to know more about duplicate image data</a></p>\n </h2></div></body></html>\n \"\"\"\n fb.write(m3)\n fb.close()\n print('Bar and Pie chart creating.... ')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef install(package):\n if hasattr(pip, 'main'):\n pip.main(['install', package])\n else:\n pip._internal.main(['install', package])\n\n\n<mask token>\n\n\ndef get_gdrive_service():\n creds = None\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json'\n , SCOPES)\n creds = flow.run_local_server(port=0)\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n return build('drive', 'v3', credentials=creds)\n\n\ndef downloadFile(id, name):\n service = get_gdrive_service()\n request = service.files().get_media(fileId=id)\n fh = io.BytesIO()\n downloader = MediaIoBaseDownload(fh, request)\n done = False\n while done is False:\n status, done = downloader.next_chunk()\n with io.open('.' + '/' + name, 'wb') as f:\n fh.seek(0)\n f.write(fh.read())\n\n\ndef is_duplicate(img1, img2):\n response = False\n image1 = cv2.imread(img1)\n image2 = cv2.imread(img2)\n try:\n difference = cv2.subtract(image1, image2)\n result = not np.any(difference)\n if result is True:\n response = True\n except:\n i = 0\n return response\n\n\ndef check_duplicate_image_new(items):\n print('Images is loading to memory..')\n map = {}\n list = []\n message = set()\n duplicate_image = []\n final_result = {}\n if not items:\n print('No files found.')\n else:\n for item in items:\n if item['mimeType'] == 'image/jpeg':\n list.append(item['name'])\n value = []\n value.append(item['name'])\n value.append(item['webViewLink'])\n if item['name'] in map:\n val = set()\n val.add(item['webViewLink'])\n map[item['name']] = item['webViewLink']\n else:\n map[item['name']] = item['webViewLink']\n downloadFile(item['id'], item['name'])\n match = []\n flag = False\n for i in range(len(list) - 1):\n temp = []\n dp_count = 0\n flag = False\n if list[i] not in match:\n flag = True\n for j in range(i + 1, len(list)):\n istrue = is_duplicate(list[i], list[j])\n if istrue == True:\n dp_count = dp_count + 1\n temp.append(list[j])\n if list[j] not in match:\n match.append(list[j])\n if list[i] not in match:\n match.append(list[i])\n if len(match) == 0:\n match.append(list[i])\n match.append(list[j])\n if flag == True and dp_count != 0:\n final_result[list[i]] = temp\n m = {}\n tdct = 0\n for x, y in final_result.items():\n res = y\n tdct = tdct + len(res)\n s = set()\n for i in res:\n for item in items:\n if item['mimeType'] == 'image/jpeg':\n if item['name'] == i:\n s.add(item['webViewLink'])\n m[x] = s\n return m, tdct\n\n\ndef duplicate_image_list(imagelist):\n dup_list = []\n if len(imagelist) >= 1:\n for i in range(len(imagelist) - 1):\n count = 0\n l = []\n for j in range(i + 1, len(imagelist)):\n image1 = cv2.imread(imagelist[i])\n image2 = cv2.imread(imagelist[j])\n try:\n difference = cv2.subtract(image1, image2)\n result = not np.any(difference)\n if result is True:\n l.append(imagelist[j])\n count = count + 1\n dup_list.append(imagelist[i])\n except:\n i = 0\n return dup_list\n\n\n<mask token>\n\n\ndef check_duplicate_image(items):\n map = {}\n image_name_list = []\n duplicate_image = []\n for item in items:\n file_type = item['mimeType']\n if file_type == 'image/jpeg':\n image_name_list.append(item['name'])\n value = []\n value.append(item['name'])\n value.append(item['webViewLink'])\n map[item['id']] = value\n csv_map[item['name']] = item['webViewLink']\n downloadFile(item['id'], item['name'])\n duplicate_image = duplicate_image_list(image_name_list)\n return duplicate_image\n\n\ndef renameFile(service, items, newName):\n count = 1\n for item in items:\n id = item['id']\n name = item['name']\n mime_type = item['mimeType']\n file = service.files().get(fileId=id).execute()\n del file['id']\n if 'jpeg' in mime_type:\n file['name'] = newName + str(count) + '.jpg'\n if 'png' in mime_type:\n file['name'] = newName + str(count) + '.png'\n updated_file = service.files().update(fileId=id, body=file).execute()\n count = count + 1\n\n\ndef count_image(id):\n imageList = []\n service = get_gdrive_service()\n results = service.files().list(pageSize=1000, q=\"'{}' in parents\".\n format(id)).execute()\n items = results.get('files', [])\n for item in items:\n mime_Type = item['mimeType']\n if mime_Type == 'image/jpeg':\n imageList.append(item['name'])\n if mime_Type == 'application/vnd.google-apps.folder':\n imageList.extend(count_image(item['id']))\n return imageList\n\n\ndef list_files(items, service):\n folder_count = 0\n image_count = 0\n imglist = []\n count = 0\n testtechm_id = ''\n nm_name = []\n img_count = []\n list_all_folder_name = []\n rows = []\n overview_map = {}\n img_nm = 0\n for item in items:\n name = item['name']\n mime_type = item['mimeType']\n if name == 'Test Techm':\n testtechm_id = item['parents'][0]\n for item in items:\n id = item['id']\n name = item['name']\n mime_type = item['mimeType']\n if mime_type == 'application/vnd.google-apps.folder':\n folder_count = folder_count + 1\n if mime_type == 'image/jpeg':\n image_count = image_count + 1\n if mime_type == 'application/vnd.google-apps.folder' and item['parents'\n ][0] == testtechm_id:\n list_all_folder_name.append(item['name'])\n name1 = count_image(id)\n nm_name.append(name1)\n img_count.append(len(name1))\n overview_map[item['name']] = name1\n rows.append((id, name, mime_type, folder_count))\n imglist.append(count)\n rows.append((id, name, mime_type, folder_count))\n lt, duplicate_ct = check_duplicate_image_new(items)\n duplicateImagehtml(folder_count, image_count, duplicate_ct, items)\n draw_chart_create_report(list_all_folder_name, image_count,\n duplicate_ct, overview_map, folder_count)\n\n\ndef createDeviceCSV():\n fileName = 'DuplicateImage.csv'\n with open(fileName, 'w') as csvFile:\n writer = csv.writer(csvFile)\n row = ['Image Name', 'Image Url']\n writer.writerow(row)\n count = 0\n for k, v in csv_map.items():\n row = [k, v]\n writer.writerow(row)\n count = count + 1\n csvFile.close()\n\n\ndef duplicateImagehtml(folder_count, image_count, duplicate_ct, items):\n uri = []\n map1, count = check_duplicate_image_new(items)\n for k, v in map1.items():\n name_url = []\n name_url.append(k)\n name_url.append(str(len(v)))\n name_url.append(str(v))\n uri.append(name_url)\n fb = open('duplicateData.html', 'w')\n message = \"\"\" <html> <head>\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\n <script type=\"text/javascript\">\n google.charts.load('current', {'packages':['table']});\n google.charts.setOnLoadCallback(drawTable);\n function drawTable() {\n var data3 = new google.visualization.DataTable();\n data3.addColumn('string', 'Name');\n data3.addColumn('string', 'Count');\n data3.addRows([\n ['Total Folders', '\"\"\" + str(folder_count) + \"\"\"'],\n ['Total Images', '\"\"\" + str(image_count) + \"\"\"'],\n ['Duplicate Images', '\"\"\" + str(duplicate_ct) + \"\"\"']]);\n\n var table2 = new google.visualization.Table(document.getElementById('table_div_base'));\n\n table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'});\n var data = new google.visualization.DataTable();\n data.addColumn('string', 'Image Name');\n data.addColumn('string', 'Image Count');\n data.addColumn('string', 'Image Url');\n data.addRows(\"\"\" + str(uri) + \"\"\");\n var table = new google.visualization.Table(document.getElementById('table_div'));\n table.draw(data, {showRowNumber: true, width: '100%', height: '100%'});\n }\n </script>\n </head>\n <body><h2 style=\"text-align: center\">Google Drive Summary Table</h2>\n <div id=\"table_div_base\" style=\"width: 100%; height: 200px; display:inline-block;border-style: solid\"></div>\n <h2 style=\"text-align: center\" >List of Duplicate Image</h2>\n <div id=\"table_div\" style=\"width: 100%; height: 500px; display:inline-block;border-style: solid\"></div>\n </body></html>\"\"\"\n fb.write(message)\n fb.close()\n print('Duplicate image data preparing.. ')\n\n\ndef draw_chart_create_report(folder_count, image_count, duplicate_ct, map,\n folder_count_real):\n fb = open('gDriveOverview.html', 'w')\n values = list(map.values())\n newlist = []\n folder_name = list(map.keys())\n total_image_count = []\n duplicate_image_count_in_folder = []\n for v in values:\n newlist.append(duplicate_image_list(v))\n total_image_count.append(len(v))\n for n in newlist:\n duplicate_image_count_in_folder.append(len(n))\n m1 = \"\"\"<html>\n <head>\n <h1 style =\"color:black;text-align: center;font-size:25px;margin-left:-6px;margin-bottom:25px;width:1300px;float:left;\">Google Drive Data Overview</h1>\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\n <script type=\"text/javascript\">\n google.charts.load('current', {'packages':['bar','corechart','table']});\n google.charts.setOnLoadCallback(drawChart);\n function drawChart() {\n var paiData = google.visualization.arrayToDataTable([\n ['Drive', 'Drive Data'],\n ['Total Images', \"\"\" + str(image_count) + \"\"\"],\n ['Total duplicate Images', \"\"\" + str(duplicate_ct) + \"\"\"],\n ['Total Folder', \"\"\" + str(folder_count_real) + \"\"\"]\n ]);\n var paiOptions = {\n title: 'Google Drive Overview'\n };\n var chart = new google.visualization.PieChart(document.getElementById('piechart'));\n chart.draw(paiData, paiOptions);\n var barData = google.visualization.arrayToDataTable(\"\"\"\n fb.write(m1)\n barchart_data = []\n barchart_data.append(['Folders', 'Total no of Images',\n 'Total no of duplicate Images'])\n for i in range(len(values)):\n item_list = []\n item_list.append(folder_count[i])\n item_list.append(total_image_count[i])\n item_list.append(duplicate_image_count_in_folder[i])\n barchart_data.append(item_list)\n fb.write(m1)\n m3 = str(barchart_data) + \"\"\");\n \n var barOptions = {\n chart: { title: 'Google Drive Folderwise Overview',\n subtitle: 'This report is created on '+new Date(),\n }};\n \n var chart = new google.charts.Bar(document.getElementById('bar_chart'));\n \n chart.draw(barData, google.charts.Bar.convertOptions(barOptions));\n }\n </script>\n </head>\n <body>\n <div style=\"width:100%; margin:0px auto;\">\n <div id=\"piechart\" style=\"width: 900px; height: 500px; display:inline-block;\"></div>\n <div id=\"bar_chart\" style=\"width: 900px; height: 500px; display:inline-block;\"></div>\n </div>\n <div>\n <h2>\n <p style=\"float:right;color:red;\">** <a href=\"duplicateData.html\" target=\"_blank\">Click here to know more about duplicate image data</a></p>\n </h2></div></body></html>\n \"\"\"\n fb.write(m3)\n fb.close()\n print('Bar and Pie chart creating.... ')\n\n\n<mask token>\n",
"step-5": "import csv\r\nimport io\r\nimport pickle\r\nimport os\r\nimport pip\r\nfrom googleapiclient.discovery import build\r\nfrom google_auth_oauthlib.flow import InstalledAppFlow\r\nfrom google.auth.transport.requests import Request\r\nfrom googleapiclient.http import MediaIoBaseDownload\r\nimport cv2\r\nimport numpy as np\r\n\r\nSCOPES = ['https://www.googleapis.com/auth/drive.metadata',\r\n 'https://www.googleapis.com/auth/drive.file',\r\n 'https://www.googleapis.com/auth/drive']\r\n\r\n\r\ndef install(package):\r\n if hasattr(pip, 'main'):\r\n pip.main(['install', package])\r\n else:\r\n pip._internal.main(['install', package])\r\n\r\n\r\ndef create_folder(service):\r\n file_metadata = {\r\n 'name': 'Test Techm',\r\n 'mimeType': 'application/vnd.google-apps.folder'\r\n }\r\n file = service.files().create(body=file_metadata,\r\n fields='id').execute()\r\n print('Folder ID: %s' % file.get('id'))\r\n\r\n\r\ndef get_gdrive_service():\r\n creds = None\r\n # The file token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('token.pickle'):\r\n with open('token.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'credentials.json', SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('token.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n # return Google Drive API service\r\n return build('drive', 'v3', credentials=creds)\r\n\r\n\r\ndef downloadFile(id, name):\r\n service = get_gdrive_service()\r\n request = service.files().get_media(fileId=id)\r\n fh = io.BytesIO()\r\n downloader = MediaIoBaseDownload(fh, request)\r\n done = False\r\n while done is False:\r\n status, done = downloader.next_chunk()\r\n with io.open(\".\" + \"/\" + name, 'wb') as f:\r\n fh.seek(0)\r\n f.write(fh.read())\r\n\r\n\r\ndef is_duplicate(img1,img2):\r\n response=False\r\n image1 = cv2.imread(img1)\r\n image2 = cv2.imread(img2)\r\n try:\r\n difference = cv2.subtract(image1, image2)\r\n result = not np.any(difference) #if difference is all zeros it will return False\r\n if result is True:\r\n response=True\r\n #duplicate_image.append(list[i])\r\n #print(\"{} images is matching with {} Occurred {} times \".format(img1,img1,list.count(img1)))\r\n except:\r\n i=0\r\n\r\n return response\r\n\r\n\r\ndef check_duplicate_image_new(items):\r\n print(\"Images is loading to memory..\")\r\n #\"\"\"given items returned by Google Drive API, prints them in a tabular way\"\"\"\r\n map= {}\r\n list=[]\r\n message= set()\r\n duplicate_image=[]\r\n final_result={}\r\n if not items:\r\n print('No files found.')\r\n else:\r\n for item in items:\r\n if item[\"mimeType\"] == \"image/jpeg\":\r\n list.append(item[\"name\"])\r\n #Creating Map\r\n value=[]\r\n value.append(item[\"name\"])\r\n value.append(item[\"webViewLink\"])\r\n if item[\"name\"] in map:\r\n val=set()\r\n val.add(item[\"webViewLink\"])\r\n map[item[\"name\"]]=item[\"webViewLink\"]\r\n else:\r\n map[item[\"name\"]]=item[\"webViewLink\"]\r\n #Dowloading Image\r\n downloadFile(item[\"id\"],item[\"name\"])\r\n match=[]\r\n flag=False\r\n for i in range(len(list)-1):\r\n temp=[]\r\n dp_count=0\r\n flag=False\r\n if list[i] not in match :\r\n flag=True\r\n for j in range(i+1,len(list)):\r\n istrue=is_duplicate(list[i],list[j])\r\n if istrue==True:\r\n dp_count=dp_count+1\r\n temp.append(list[j])\r\n if list[j] not in match:\r\n match.append(list[j])\r\n if list[i] not in match:\r\n match.append(list[i])\r\n if len(match)==0:\r\n match.append(list[i])\r\n match.append(list[j])\r\n\r\n if flag==True and dp_count !=0:\r\n #print(list[i],\" - \",dp_count)\r\n final_result[list[i]]=temp\r\n\r\n\r\n m={}\r\n tdct=0\r\n for x, y in final_result.items():\r\n res=y\r\n tdct=tdct+len(res)\r\n s=set()\r\n for i in res:\r\n #s=set()\r\n for item in items:\r\n if item[\"mimeType\"] == \"image/jpeg\":\r\n if item[\"name\"]==i:\r\n s.add(item[\"webViewLink\"])\r\n m[x]=s\r\n return m,tdct\r\n\r\n\r\ndef duplicate_image_list(imagelist):\r\n #print(len(imagelist))\r\n dup_list = []\r\n if len(imagelist) >= 1:\r\n for i in range(len(imagelist) - 1):\r\n count=0\r\n l=[]\r\n for j in range(i + 1, len(imagelist)):\r\n image1 = cv2.imread(imagelist[i])\r\n image2 = cv2.imread(imagelist[j])\r\n try:\r\n difference = cv2.subtract(image1, image2)\r\n result = not np.any(difference) # if difference is all zeros it will return False\r\n if result is True:\r\n #print(imagelist[i],\"Matching with \",imagelist[j])\r\n l.append(imagelist[j])\r\n count=count+1\r\n dup_list.append(imagelist[i])\r\n \r\n except:\r\n i = 0\r\n return dup_list\r\n\r\n\r\ncsv_map = {}\r\n\r\n\r\ndef check_duplicate_image(items):\r\n # \"\"\"given items returned by Google Drive API, prints them in a tabular way\"\"\"\r\n map = {}\r\n image_name_list = []\r\n duplicate_image = []\r\n for item in items:\r\n file_type = item[\"mimeType\"]\r\n if file_type == \"image/jpeg\":\r\n image_name_list.append(item[\"name\"])\r\n #append url or \r\n # Creating Map\r\n value = []\r\n value.append(item[\"name\"])\r\n value.append(item[\"webViewLink\"])\r\n map[item[\"id\"]] = value\r\n csv_map[item[\"name\"]] = item[\"webViewLink\"]\r\n # Dowloading Image\r\n downloadFile(item[\"id\"], item[\"name\"])\r\n duplicate_image = duplicate_image_list(image_name_list)\r\n return duplicate_image\r\n\r\n\r\ndef renameFile(service,items, newName):\r\n count=1\r\n for item in items:\r\n id = item[\"id\"]\r\n name = item[\"name\"]\r\n mime_type = item[\"mimeType\"]\r\n file = service.files().get(fileId=id).execute()\r\n del file['id']\r\n if \"jpeg\" in mime_type:\r\n file['name'] = newName+str(count)+ \".jpg\";\r\n if \"png\" in mime_type:\r\n file['name'] = newName+str(count)+ \".png\";\r\n updated_file = service.files().update(fileId=id, body=file).execute()\r\n count=count+1\r\n\r\n\r\n\r\ndef count_image(id):\r\n imageList = []\r\n service = get_gdrive_service()\r\n results = service.files().list(pageSize=1000, q=\"'{}' in parents\".format(id)).execute()\r\n items = results.get('files', [])\r\n for item in items:\r\n mime_Type = item[\"mimeType\"]\r\n if mime_Type == \"image/jpeg\":\r\n imageList.append(item[\"name\"])\r\n if mime_Type == \"application/vnd.google-apps.folder\":\r\n imageList.extend(count_image(item[\"id\"]))\r\n\r\n return imageList\r\n\r\n\r\ndef list_files(items, service):\r\n folder_count = 0\r\n image_count = 0\r\n imglist = []\r\n count = 0\r\n testtechm_id = ''\r\n nm_name = []\r\n img_count = []\r\n list_all_folder_name=[]\r\n rows = []\r\n overview_map = {}\r\n img_nm=0\r\n for item in items:\r\n name = item[\"name\"]\r\n mime_type = item[\"mimeType\"]\r\n if name == 'Test Techm':\r\n testtechm_id = item['parents'][0]\r\n for item in items:\r\n id = item[\"id\"]\r\n name = item[\"name\"]\r\n mime_type = item[\"mimeType\"]\r\n if mime_type == \"application/vnd.google-apps.folder\":\r\n folder_count = folder_count + 1\r\n if mime_type == \"image/jpeg\":\r\n # renameFile(item[\"id\"],\"rajj_img\"+str(image_count))\r\n image_count = image_count + 1\r\n if mime_type == \"application/vnd.google-apps.folder\" and item[\"parents\"][0] == testtechm_id:\r\n list_all_folder_name.append(item[\"name\"])\r\n name1 = count_image(id)\r\n nm_name.append(name1)\r\n img_count.append(len(name1))\r\n overview_map[item[\"name\"]] = name1\r\n\r\n rows.append((id, name, mime_type, folder_count))\r\n imglist.append(count)\r\n rows.append((id, name, mime_type, folder_count))\r\n\r\n #duplicate_count = len(check_duplicate_image(items))\r\n\r\n lt,duplicate_ct=check_duplicate_image_new(items)\r\n duplicateImagehtml(folder_count, image_count, duplicate_ct,items)\r\n # overview chart report page\r\n draw_chart_create_report(list_all_folder_name, image_count, duplicate_ct, overview_map,folder_count)\r\n\r\n\r\ndef createDeviceCSV():\r\n fileName = 'DuplicateImage.csv'\r\n with open(fileName, 'w') as csvFile:\r\n writer = csv.writer(csvFile)\r\n row = [\"Image Name\", 'Image Url']\r\n writer.writerow(row)\r\n count = 0\r\n for k, v in csv_map.items():\r\n row = [k, v]\r\n writer.writerow(row)\r\n count = count + 1\r\n #print(\"Device's adding into csv: \" + str(count))\r\n csvFile.close()\r\n #print('Device CSV File creation is Done file name is ', fileName)\r\n\r\ndef duplicateImagehtml(folder_count, image_count, duplicate_ct,items):\r\n uri = []\r\n map1,count=check_duplicate_image_new(items)\r\n for k, v in map1.items():\r\n name_url = []\r\n name_url.append(k)\r\n name_url.append(str(len(v)))\r\n name_url.append(str(v))\r\n uri.append(name_url)\r\n fb = open('duplicateData.html', 'w')\r\n message = \"\"\" <html> <head>\r\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\r\n <script type=\"text/javascript\">\r\n google.charts.load('current', {'packages':['table']});\r\n google.charts.setOnLoadCallback(drawTable);\r\n function drawTable() {\r\n var data3 = new google.visualization.DataTable();\r\n data3.addColumn('string', 'Name');\r\n data3.addColumn('string', 'Count');\r\n data3.addRows([\r\n ['Total Folders', '\"\"\" + str(folder_count) + \"\"\"'],\r\n ['Total Images', '\"\"\" + str(image_count) + \"\"\"'],\r\n ['Duplicate Images', '\"\"\" + str(duplicate_ct) + \"\"\"']]);\r\n\r\n var table2 = new google.visualization.Table(document.getElementById('table_div_base'));\r\n\r\n table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'});\r\n var data = new google.visualization.DataTable();\r\n data.addColumn('string', 'Image Name');\r\n data.addColumn('string', 'Image Count');\r\n data.addColumn('string', 'Image Url');\r\n data.addRows(\"\"\" + str(uri) + \"\"\");\r\n var table = new google.visualization.Table(document.getElementById('table_div'));\r\n table.draw(data, {showRowNumber: true, width: '100%', height: '100%'});\r\n }\r\n </script>\r\n </head>\r\n <body><h2 style=\"text-align: center\">Google Drive Summary Table</h2>\r\n <div id=\"table_div_base\" style=\"width: 100%; height: 200px; display:inline-block;border-style: solid\"></div>\r\n <h2 style=\"text-align: center\" >List of Duplicate Image</h2>\r\n <div id=\"table_div\" style=\"width: 100%; height: 500px; display:inline-block;border-style: solid\"></div>\r\n </body></html>\"\"\"\r\n\r\n fb.write(message)\r\n fb.close()\r\n print(\"Duplicate image data preparing.. \")\r\n # webbrowser.open_new_tab('helloworld.html')\r\n\r\n\r\ndef draw_chart_create_report(folder_count, image_count, duplicate_ct, map,folder_count_real):\r\n #folder_count=len(folder_count)\r\n fb = open('gDriveOverview.html', 'w')\r\n values = list(map.values())\r\n newlist = []\r\n folder_name = list(map.keys())\r\n total_image_count = []\r\n duplicate_image_count_in_folder = []\r\n for v in values:\r\n newlist.append(duplicate_image_list(v))\r\n total_image_count.append(len(v))\r\n for n in newlist:\r\n duplicate_image_count_in_folder.append(len(n))\r\n # create plot\r\n #print(total_image_count, duplicate_image_count_in_folder, map.keys())\r\n m1 = \"\"\"<html>\r\n <head>\r\n <h1 style =\"color:black;text-align: center;font-size:25px;margin-left:-6px;margin-bottom:25px;width:1300px;float:left;\">Google Drive Data Overview</h1>\r\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\r\n <script type=\"text/javascript\">\r\n google.charts.load('current', {'packages':['bar','corechart','table']});\r\n google.charts.setOnLoadCallback(drawChart);\r\n function drawChart() {\r\n var paiData = google.visualization.arrayToDataTable([\r\n ['Drive', 'Drive Data'],\r\n ['Total Images', \"\"\" + str(image_count) + \"\"\"],\r\n ['Total duplicate Images', \"\"\" + str(duplicate_ct) + \"\"\"],\r\n ['Total Folder', \"\"\" + str(folder_count_real) + \"\"\"]\r\n ]);\r\n var paiOptions = {\r\n title: 'Google Drive Overview'\r\n };\r\n var chart = new google.visualization.PieChart(document.getElementById('piechart'));\r\n chart.draw(paiData, paiOptions);\r\n var barData = google.visualization.arrayToDataTable(\"\"\"\r\n fb.write(m1)\r\n barchart_data = []\r\n barchart_data.append(['Folders', 'Total no of Images', 'Total no of duplicate Images'])\r\n for i in range(len(values)):\r\n item_list = []\r\n item_list.append(folder_count[i])\r\n item_list.append(total_image_count[i])\r\n item_list.append(duplicate_image_count_in_folder[i])\r\n barchart_data.append(item_list)\r\n\r\n fb.write(m1)\r\n m3 = str(barchart_data) + \"\"\");\r\n \r\n var barOptions = {\r\n chart: { title: 'Google Drive Folderwise Overview',\r\n subtitle: 'This report is created on '+new Date(),\r\n }};\r\n \r\n var chart = new google.charts.Bar(document.getElementById('bar_chart'));\r\n \r\n chart.draw(barData, google.charts.Bar.convertOptions(barOptions));\r\n }\r\n </script>\r\n </head>\r\n <body>\r\n <div style=\"width:100%; margin:0px auto;\">\r\n <div id=\"piechart\" style=\"width: 900px; height: 500px; display:inline-block;\"></div>\r\n <div id=\"bar_chart\" style=\"width: 900px; height: 500px; display:inline-block;\"></div>\r\n </div>\r\n <div>\r\n <h2>\r\n <p style=\"float:right;color:red;\">** <a href=\"duplicateData.html\" target=\"_blank\">Click here to know more about duplicate image data</a></p>\r\n </h2></div></body></html>\r\n \"\"\"\r\n fb.write(m3)\r\n fb.close()\r\n print(\"Bar and Pie chart creating.... \")\r\n\r\n\r\ndef main():\r\n service = get_gdrive_service()\r\n print(\"Wait a moment script is running ..!!!\")\r\n results = service.files().list(pageSize=1000,\r\n fields=\"nextPageToken,files(id, name,mimeType,parents,webViewLink)\").execute()\r\n items = results.get('files', [])\r\n if not items:\r\n # empty drive\r\n print('No files found.')\r\n else:\r\n # create_folder(service)\r\n print(\"-----_\")\r\n name=\"g_image_\"\r\n renameFile(service,items,name)\r\n print(\"==============================\")\r\n #check_duplicate_image(items)\r\n # createDeviceCSV()\r\n list_files(items, service)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n print(\"Script is done ..!!!\")\r\n",
"step-ids": [
9,
11,
12,
13,
19
]
}
|
[
9,
11,
12,
13,
19
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_cant_ever_init_twice(ethtester, root_chain):
ethtester.chain.mine()
with pytest.raises(TransactionFailed):
root_chain.init(sender=ethtester.k0)
with pytest.raises(TransactionFailed):
root_chain.init(sender=ethtester.k1)
<|reserved_special_token_1|>
import pytest
from ethereum.tools.tester import TransactionFailed
def test_cant_ever_init_twice(ethtester, root_chain):
ethtester.chain.mine()
with pytest.raises(TransactionFailed):
root_chain.init(sender=ethtester.k0)
with pytest.raises(TransactionFailed):
root_chain.init(sender=ethtester.k1)
|
flexible
|
{
"blob_id": "8417b63e2b7b16d3d58175022662c5b3e59e4aaf",
"index": 4640,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_cant_ever_init_twice(ethtester, root_chain):\n ethtester.chain.mine()\n with pytest.raises(TransactionFailed):\n root_chain.init(sender=ethtester.k0)\n with pytest.raises(TransactionFailed):\n root_chain.init(sender=ethtester.k1)\n",
"step-3": "import pytest\nfrom ethereum.tools.tester import TransactionFailed\n\n\ndef test_cant_ever_init_twice(ethtester, root_chain):\n ethtester.chain.mine()\n with pytest.raises(TransactionFailed):\n root_chain.init(sender=ethtester.k0)\n with pytest.raises(TransactionFailed):\n root_chain.init(sender=ethtester.k1)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) ->None:
"""
m->Size of nums1 list
n->Size of nums2 list
"""
mergedArray = []
i = 0
j = 0
while i < m and j < n:
if nums1[i] <= nums2[j]:
mergedArray.append(nums1[i])
i += 1
else:
mergedArray.append(nums2[j])
j += 1
while i < m:
mergedArray.append(nums1[i])
i += 1
while j < n:
mergedArray.append(nums2[j])
j += 1
return mergedArray
<|reserved_special_token_1|>
# Classic solution for merging two sorted arrays/list to a new one.
# (Based on Merge Sort)
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
m->Size of nums1 list
n->Size of nums2 list
"""
mergedArray = []
i = 0
j = 0
while(i < m and j < n):
if(nums1[i] <= nums2[j]):
mergedArray.append(nums1[i])
i += 1
else:
mergedArray.append(nums2[j])
j += 1
while(i < m):
mergedArray.append(nums1[i])
i += 1
while(j < n):
mergedArray.append(nums2[j])
j += 1
return mergedArray
|
flexible
|
{
"blob_id": "a732e7141ffb403ca6c5d9c4204cb96c8e831aab",
"index": 6814,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) ->None:\n \"\"\"\n m->Size of nums1 list\n n->Size of nums2 list\n \"\"\"\n mergedArray = []\n i = 0\n j = 0\n while i < m and j < n:\n if nums1[i] <= nums2[j]:\n mergedArray.append(nums1[i])\n i += 1\n else:\n mergedArray.append(nums2[j])\n j += 1\n while i < m:\n mergedArray.append(nums1[i])\n i += 1\n while j < n:\n mergedArray.append(nums2[j])\n j += 1\n return mergedArray\n",
"step-4": "# Classic solution for merging two sorted arrays/list to a new one.\n# (Based on Merge Sort)\nclass Solution:\n def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n \"\"\"\n m->Size of nums1 list\n n->Size of nums2 list\n \"\"\"\n mergedArray = []\n i = 0 \n j = 0\n while(i < m and j < n):\n if(nums1[i] <= nums2[j]):\n mergedArray.append(nums1[i])\n i += 1\n else:\n mergedArray.append(nums2[j])\n j += 1\n while(i < m):\n mergedArray.append(nums1[i])\n i += 1\n while(j < n):\n mergedArray.append(nums2[j])\n j += 1\n return mergedArray",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 1 10:18:11 2017
@author: Duong
"""
import pandas as pd
import matplotlib.pyplot as plt
import psycopg2
from pandas.core.frame import DataFrame
# DBS verbinden
database = psycopg2.connect(database="TeamYellow_election", user="student", password="password", host="agdbs-edu01.imp.fu-berlin.de", port="5432")
# SQl-Abfrage
cursor = database.cursor()
cursor.execute(
'SELECT tweet_date, COUNT(*) FROM projekt_election.tweet as tweet , projekt_election.hashtag_use as use WHERE tweet.tweet_id = use.tweet_id GROUP BY tweet_date ORDER BY tweet_date ASC')
result = cursor.fetchall()
# Dataframe erstellen
data=DataFrame(result, columns=['tweet_date', 'count'])
#Umwandlung des Datentyp der Spalte tweet_date
data['tweet_date_with_time'] = data['tweet_date'].astype('datetime64[ns]')
data['week_number'] = data['tweet_date_with_time'].dt.week
data['weekday']= data['tweet_date_with_time'].dt.dayofweek
# Gruppierung der Kalendarwochen mit einzelnen Counts
data2=data.copy()
del data2['tweet_date']
del data2['tweet_date_with_time']
del data2['weekday']
print(data2.groupby('week_number')['count'].apply(list))
# Aufbau Dataframe auf Erkenntnisse aus data2-Prints
data3 = pd.DataFrame({'KW01': [0, 0, 1, 0, 3, 0, 0],
'KW02': [3, 1, 7, 1, 0, 1, 0],
'KW03': [0, 2, 6, 1, 11, 3, 2],
'KW04': [13, 5, 1, 3, 6, 2, 1],
'KW05': [0, 1, 2, 0, 4, 3, 4],
'KW06': [2, 6, 1, 2, 1, 5, 0],
'KW07': [1, 3, 5, 2, 5, 2, 1],
'KW08': [2, 7, 1, 3, 5, 1, 3],
'KW09': [3, 10, 9, 3, 3, 6, 2],
'KW10': [0, 1, 2, 0, 2, 4, 0],
'KW11': [2, 3, 8, 0, 3, 10, 5],
'KW12': [0, 11, 4, 1, 0, 0, 0],
'KW13': [1, 0, 3, 2, 1, 6, 5],
'KW14': [4, 5, 0, 0, 1, 1, 2],
'KW15': [2, 4, 1, 2, 0, 4, 2],
'KW16': [0, 11, 4, 2, 3, 4, 1],
'KW17': [2, 6, 0, 1, 1, 0, 0],
'KW18': [4, 8, 0, 1, 1, 0, 0],
'KW19': [2, 8, 3, 0, 0, 0, 0],
'KW20': [1, 1, 1, 0, 5, 0, 1],
'KW21': [0, 0, 2, 1, 1, 0, 0],
'KW22': [0, 0, 1, 4, 2, 3, 0],
'KW23': [0, 0, 1, 0, 1, 2, 0],
'KW24': [0, 0, 3, 0, 1, 4, 1],
'KW25': [0, 0, 1, 10, 0, 0, 0],
'KW26': [1, 1, 0, 0, 2, 3, 0],
'KW27': [1, 0, 0, 2, 0, 0, 0],
'KW28': [1, 2, 2, 1, 0, 1, 0],
'KW29': [0, 1, 2, 7, 2, 1, 0],
'KW30': [1, 3, 3, 4, 0, 1, 1],
'KW31': [3, 2, 2, 0, 1, 4, 1],
'KW32': [1, 6, 0, 0, 0, 1, 0],
'KW33': [0, 0, 4, 0, 1, 1, 0],
'KW34': [1, 0, 1, 2, 1, 2, 1],
'KW35': [2, 0, 1, 3, 1, 0, 0],
'KW36': [1, 1, 2, 2, 2, 0, 0],
'KW37': [0, 1, 1, 2, 4, 0, 0],
'KW38': [0, 3, 0, 2, 1, 1, 0],
'KW39': [3, 18, 0, 0, 0, 0, 0]})
data4= data3.transpose()
data4.columns =['Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag', 'Samstag', 'Sonntag']
data4['Kalendarwoche']=data4.index
############################# Bau eines Stacked Bar Chart ############################################
#Grundgerüst des Balkendiagramms
f, ax1 = plt.subplots(1, figsize=(25,20))
# Balkengröße
bar_width = 0.75
# Balken fangen von links an
bar_l = [i+1 for i in range(len(data4['Montag']))]
# Position der X-Achsen Werte
tick_pos = [i+(bar_width/2) for i in bar_l]
# Beginn der Erstellung der Balken nach Wochentagen
ax1.bar(bar_l,
data4['Montag'],
width=bar_width,
label='Montag',
alpha=0.5,
color='#1858ef')
ax1.bar(bar_l,
data4['Dienstag'],
width=bar_width,
bottom=data4['Montag'],
label='Dienstag',
alpha=0.5,
color='#6618ef')
ax1.bar(bar_l,
data4['Mittwoch'],
width=bar_width,
bottom=[i+j for i,j in zip(data4['Montag'],data4['Dienstag'])],
label='Mittwoch',
alpha=0.5,
color='#ef1829')
ax1.bar(bar_l,
data4['Donnerstag'],
width=bar_width,
bottom=[i+j+k for i,j,k in zip(data4['Montag'],data4['Dienstag'], data4['Mittwoch'])],
label='Donnerstag',
alpha=0.5,
color='#ef7c18')
ax1.bar(bar_l,
data4['Freitag'],
width=bar_width,
bottom=[i+j+k+l for i,j,k,l in zip(data4['Montag'],data4['Dienstag'],
data4['Mittwoch'], data4['Donnerstag'])],
label='Freitag',
alpha=0.5,
color='#efc718')
ax1.bar(bar_l,
data4['Samstag'],
width=bar_width,
bottom=[i+j+k+l+m for i,j,k,l,m in zip(data4['Montag'],data4['Dienstag'],
data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'])],
label='Samstag',
alpha=0.5,
color='#63ef18')
ax1.bar(bar_l,
data4['Sonntag'],
width=bar_width,
bottom=[i+j+k+l+m+n for i,j,k,l,m,n in zip(data4['Montag'],data4['Dienstag'],
data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'],
data4['Samstag'])],
label='Sonntag',
alpha=0.5,
color='#18efa3')
# X-Achse mit Werte versehen
plt.xticks(tick_pos, data4['Kalendarwoche'])
#Legende
ax1.set_ylabel("Häufigkeit")
ax1.set_xlabel("Kalendarwoche")
plt.legend(loc='upper left')
# Zwischen den Diagrammen Platz lassen
plt.xlim([min(tick_pos)-bar_width, max(tick_pos)+bar_width])
############### Balkendiagramm nach Kalendarwoche#########################################
kw = lambda x: x.isocalendar()[1]
grouped = data.groupby([data['tweet_date_with_time'].map(kw)], sort=False).agg({'count': 'sum'})
grouped['calendar week']= ('KW1','KW2','KW3','KW4','KW5','KW6','KW7','KW8','KW9','KW10','KW11','KW12','KW13',
'KW14','KW15','KW16','KW17','KW18','KW19','KW20','KW21','KW22','KW23','KW24','KW25','KW26', 'KW27','KW28','KW29',
'KW30','KW31','KW32','KW33','KW34','KW35','KW36','KW37','KW38','KW39')
#Balkendiagramm für alle Hashtag in Kalendarwoche
grouped.set_index('calendar week').plot.bar(rot=45, title='Nutzung von #makeamericagreatagain in Kalendarwoche', figsize=(15,10), fontsize=10)
############## Balkendiagramm für alle Hashtag pro Tag #####################################
data5=data[['tweet_date','count']].copy()
#Balkendiagramm für alle Hashtag in Tagen
data5.set_index('tweet_date').plot.bar(rot=90, title='Häufigkeit aller Hashtag in Tagen', figsize=(50,25), color ='#ef6618', fontsize=14)
|
normal
|
{
"blob_id": "076b852010ddcea69a294f9f2a653bb2fa2f2676",
"index": 3531,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncursor.execute(\n 'SELECT tweet_date, COUNT(*) FROM projekt_election.tweet as tweet , projekt_election.hashtag_use as use WHERE tweet.tweet_id = use.tweet_id GROUP BY tweet_date ORDER BY tweet_date ASC'\n )\n<mask token>\ndel data2['tweet_date']\ndel data2['tweet_date_with_time']\ndel data2['weekday']\nprint(data2.groupby('week_number')['count'].apply(list))\n<mask token>\nax1.bar(bar_l, data4['Montag'], width=bar_width, label='Montag', alpha=0.5,\n color='#1858ef')\nax1.bar(bar_l, data4['Dienstag'], width=bar_width, bottom=data4['Montag'],\n label='Dienstag', alpha=0.5, color='#6618ef')\nax1.bar(bar_l, data4['Mittwoch'], width=bar_width, bottom=[(i + j) for i, j in\n zip(data4['Montag'], data4['Dienstag'])], label='Mittwoch', alpha=0.5,\n color='#ef1829')\nax1.bar(bar_l, data4['Donnerstag'], width=bar_width, bottom=[(i + j + k) for\n i, j, k in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'])],\n label='Donnerstag', alpha=0.5, color='#ef7c18')\nax1.bar(bar_l, data4['Freitag'], width=bar_width, bottom=[(i + j + k + l) for\n i, j, k, l in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'],\n data4['Donnerstag'])], label='Freitag', alpha=0.5, color='#efc718')\nax1.bar(bar_l, data4['Samstag'], width=bar_width, bottom=[(i + j + k + l +\n m) for i, j, k, l, m in zip(data4['Montag'], data4['Dienstag'], data4[\n 'Mittwoch'], data4['Donnerstag'], data4['Freitag'])], label='Samstag',\n alpha=0.5, color='#63ef18')\nax1.bar(bar_l, data4['Sonntag'], width=bar_width, bottom=[(i + j + k + l +\n m + n) for i, j, k, l, m, n in zip(data4['Montag'], data4['Dienstag'],\n data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'], data4[\n 'Samstag'])], label='Sonntag', alpha=0.5, color='#18efa3')\nplt.xticks(tick_pos, data4['Kalendarwoche'])\nax1.set_ylabel('Häufigkeit')\nax1.set_xlabel('Kalendarwoche')\nplt.legend(loc='upper left')\nplt.xlim([min(tick_pos) - bar_width, max(tick_pos) + bar_width])\n<mask token>\ngrouped.set_index('calendar week').plot.bar(rot=45, title=\n 'Nutzung von #makeamericagreatagain in Kalendarwoche', figsize=(15, 10),\n fontsize=10)\n<mask token>\ndata5.set_index('tweet_date').plot.bar(rot=90, title=\n 'Häufigkeit aller Hashtag in Tagen', figsize=(50, 25), color='#ef6618',\n fontsize=14)\n",
"step-3": "<mask token>\ndatabase = psycopg2.connect(database='TeamYellow_election', user='student',\n password='password', host='agdbs-edu01.imp.fu-berlin.de', port='5432')\ncursor = database.cursor()\ncursor.execute(\n 'SELECT tweet_date, COUNT(*) FROM projekt_election.tweet as tweet , projekt_election.hashtag_use as use WHERE tweet.tweet_id = use.tweet_id GROUP BY tweet_date ORDER BY tweet_date ASC'\n )\nresult = cursor.fetchall()\ndata = DataFrame(result, columns=['tweet_date', 'count'])\ndata['tweet_date_with_time'] = data['tweet_date'].astype('datetime64[ns]')\ndata['week_number'] = data['tweet_date_with_time'].dt.week\ndata['weekday'] = data['tweet_date_with_time'].dt.dayofweek\ndata2 = data.copy()\ndel data2['tweet_date']\ndel data2['tweet_date_with_time']\ndel data2['weekday']\nprint(data2.groupby('week_number')['count'].apply(list))\ndata3 = pd.DataFrame({'KW01': [0, 0, 1, 0, 3, 0, 0], 'KW02': [3, 1, 7, 1, 0,\n 1, 0], 'KW03': [0, 2, 6, 1, 11, 3, 2], 'KW04': [13, 5, 1, 3, 6, 2, 1],\n 'KW05': [0, 1, 2, 0, 4, 3, 4], 'KW06': [2, 6, 1, 2, 1, 5, 0], 'KW07': [\n 1, 3, 5, 2, 5, 2, 1], 'KW08': [2, 7, 1, 3, 5, 1, 3], 'KW09': [3, 10, 9,\n 3, 3, 6, 2], 'KW10': [0, 1, 2, 0, 2, 4, 0], 'KW11': [2, 3, 8, 0, 3, 10,\n 5], 'KW12': [0, 11, 4, 1, 0, 0, 0], 'KW13': [1, 0, 3, 2, 1, 6, 5],\n 'KW14': [4, 5, 0, 0, 1, 1, 2], 'KW15': [2, 4, 1, 2, 0, 4, 2], 'KW16': [\n 0, 11, 4, 2, 3, 4, 1], 'KW17': [2, 6, 0, 1, 1, 0, 0], 'KW18': [4, 8, 0,\n 1, 1, 0, 0], 'KW19': [2, 8, 3, 0, 0, 0, 0], 'KW20': [1, 1, 1, 0, 5, 0, \n 1], 'KW21': [0, 0, 2, 1, 1, 0, 0], 'KW22': [0, 0, 1, 4, 2, 3, 0],\n 'KW23': [0, 0, 1, 0, 1, 2, 0], 'KW24': [0, 0, 3, 0, 1, 4, 1], 'KW25': [\n 0, 0, 1, 10, 0, 0, 0], 'KW26': [1, 1, 0, 0, 2, 3, 0], 'KW27': [1, 0, 0,\n 2, 0, 0, 0], 'KW28': [1, 2, 2, 1, 0, 1, 0], 'KW29': [0, 1, 2, 7, 2, 1, \n 0], 'KW30': [1, 3, 3, 4, 0, 1, 1], 'KW31': [3, 2, 2, 0, 1, 4, 1],\n 'KW32': [1, 6, 0, 0, 0, 1, 0], 'KW33': [0, 0, 4, 0, 1, 1, 0], 'KW34': [\n 1, 0, 1, 2, 1, 2, 1], 'KW35': [2, 0, 1, 3, 1, 0, 0], 'KW36': [1, 1, 2, \n 2, 2, 0, 0], 'KW37': [0, 1, 1, 2, 4, 0, 0], 'KW38': [0, 3, 0, 2, 1, 1, \n 0], 'KW39': [3, 18, 0, 0, 0, 0, 0]})\ndata4 = data3.transpose()\ndata4.columns = ['Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag',\n 'Samstag', 'Sonntag']\ndata4['Kalendarwoche'] = data4.index\nf, ax1 = plt.subplots(1, figsize=(25, 20))\nbar_width = 0.75\nbar_l = [(i + 1) for i in range(len(data4['Montag']))]\ntick_pos = [(i + bar_width / 2) for i in bar_l]\nax1.bar(bar_l, data4['Montag'], width=bar_width, label='Montag', alpha=0.5,\n color='#1858ef')\nax1.bar(bar_l, data4['Dienstag'], width=bar_width, bottom=data4['Montag'],\n label='Dienstag', alpha=0.5, color='#6618ef')\nax1.bar(bar_l, data4['Mittwoch'], width=bar_width, bottom=[(i + j) for i, j in\n zip(data4['Montag'], data4['Dienstag'])], label='Mittwoch', alpha=0.5,\n color='#ef1829')\nax1.bar(bar_l, data4['Donnerstag'], width=bar_width, bottom=[(i + j + k) for\n i, j, k in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'])],\n label='Donnerstag', alpha=0.5, color='#ef7c18')\nax1.bar(bar_l, data4['Freitag'], width=bar_width, bottom=[(i + j + k + l) for\n i, j, k, l in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'],\n data4['Donnerstag'])], label='Freitag', alpha=0.5, color='#efc718')\nax1.bar(bar_l, data4['Samstag'], width=bar_width, bottom=[(i + j + k + l +\n m) for i, j, k, l, m in zip(data4['Montag'], data4['Dienstag'], data4[\n 'Mittwoch'], data4['Donnerstag'], data4['Freitag'])], label='Samstag',\n alpha=0.5, color='#63ef18')\nax1.bar(bar_l, data4['Sonntag'], width=bar_width, bottom=[(i + j + k + l +\n m + n) for i, j, k, l, m, n in zip(data4['Montag'], data4['Dienstag'],\n data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'], data4[\n 'Samstag'])], label='Sonntag', alpha=0.5, color='#18efa3')\nplt.xticks(tick_pos, data4['Kalendarwoche'])\nax1.set_ylabel('Häufigkeit')\nax1.set_xlabel('Kalendarwoche')\nplt.legend(loc='upper left')\nplt.xlim([min(tick_pos) - bar_width, max(tick_pos) + bar_width])\nkw = lambda x: x.isocalendar()[1]\ngrouped = data.groupby([data['tweet_date_with_time'].map(kw)], sort=False).agg(\n {'count': 'sum'})\ngrouped['calendar week'] = ('KW1', 'KW2', 'KW3', 'KW4', 'KW5', 'KW6', 'KW7',\n 'KW8', 'KW9', 'KW10', 'KW11', 'KW12', 'KW13', 'KW14', 'KW15', 'KW16',\n 'KW17', 'KW18', 'KW19', 'KW20', 'KW21', 'KW22', 'KW23', 'KW24', 'KW25',\n 'KW26', 'KW27', 'KW28', 'KW29', 'KW30', 'KW31', 'KW32', 'KW33', 'KW34',\n 'KW35', 'KW36', 'KW37', 'KW38', 'KW39')\ngrouped.set_index('calendar week').plot.bar(rot=45, title=\n 'Nutzung von #makeamericagreatagain in Kalendarwoche', figsize=(15, 10),\n fontsize=10)\ndata5 = data[['tweet_date', 'count']].copy()\ndata5.set_index('tweet_date').plot.bar(rot=90, title=\n 'Häufigkeit aller Hashtag in Tagen', figsize=(50, 25), color='#ef6618',\n fontsize=14)\n",
"step-4": "<mask token>\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport psycopg2\nfrom pandas.core.frame import DataFrame\ndatabase = psycopg2.connect(database='TeamYellow_election', user='student',\n password='password', host='agdbs-edu01.imp.fu-berlin.de', port='5432')\ncursor = database.cursor()\ncursor.execute(\n 'SELECT tweet_date, COUNT(*) FROM projekt_election.tweet as tweet , projekt_election.hashtag_use as use WHERE tweet.tweet_id = use.tweet_id GROUP BY tweet_date ORDER BY tweet_date ASC'\n )\nresult = cursor.fetchall()\ndata = DataFrame(result, columns=['tweet_date', 'count'])\ndata['tweet_date_with_time'] = data['tweet_date'].astype('datetime64[ns]')\ndata['week_number'] = data['tweet_date_with_time'].dt.week\ndata['weekday'] = data['tweet_date_with_time'].dt.dayofweek\ndata2 = data.copy()\ndel data2['tweet_date']\ndel data2['tweet_date_with_time']\ndel data2['weekday']\nprint(data2.groupby('week_number')['count'].apply(list))\ndata3 = pd.DataFrame({'KW01': [0, 0, 1, 0, 3, 0, 0], 'KW02': [3, 1, 7, 1, 0,\n 1, 0], 'KW03': [0, 2, 6, 1, 11, 3, 2], 'KW04': [13, 5, 1, 3, 6, 2, 1],\n 'KW05': [0, 1, 2, 0, 4, 3, 4], 'KW06': [2, 6, 1, 2, 1, 5, 0], 'KW07': [\n 1, 3, 5, 2, 5, 2, 1], 'KW08': [2, 7, 1, 3, 5, 1, 3], 'KW09': [3, 10, 9,\n 3, 3, 6, 2], 'KW10': [0, 1, 2, 0, 2, 4, 0], 'KW11': [2, 3, 8, 0, 3, 10,\n 5], 'KW12': [0, 11, 4, 1, 0, 0, 0], 'KW13': [1, 0, 3, 2, 1, 6, 5],\n 'KW14': [4, 5, 0, 0, 1, 1, 2], 'KW15': [2, 4, 1, 2, 0, 4, 2], 'KW16': [\n 0, 11, 4, 2, 3, 4, 1], 'KW17': [2, 6, 0, 1, 1, 0, 0], 'KW18': [4, 8, 0,\n 1, 1, 0, 0], 'KW19': [2, 8, 3, 0, 0, 0, 0], 'KW20': [1, 1, 1, 0, 5, 0, \n 1], 'KW21': [0, 0, 2, 1, 1, 0, 0], 'KW22': [0, 0, 1, 4, 2, 3, 0],\n 'KW23': [0, 0, 1, 0, 1, 2, 0], 'KW24': [0, 0, 3, 0, 1, 4, 1], 'KW25': [\n 0, 0, 1, 10, 0, 0, 0], 'KW26': [1, 1, 0, 0, 2, 3, 0], 'KW27': [1, 0, 0,\n 2, 0, 0, 0], 'KW28': [1, 2, 2, 1, 0, 1, 0], 'KW29': [0, 1, 2, 7, 2, 1, \n 0], 'KW30': [1, 3, 3, 4, 0, 1, 1], 'KW31': [3, 2, 2, 0, 1, 4, 1],\n 'KW32': [1, 6, 0, 0, 0, 1, 0], 'KW33': [0, 0, 4, 0, 1, 1, 0], 'KW34': [\n 1, 0, 1, 2, 1, 2, 1], 'KW35': [2, 0, 1, 3, 1, 0, 0], 'KW36': [1, 1, 2, \n 2, 2, 0, 0], 'KW37': [0, 1, 1, 2, 4, 0, 0], 'KW38': [0, 3, 0, 2, 1, 1, \n 0], 'KW39': [3, 18, 0, 0, 0, 0, 0]})\ndata4 = data3.transpose()\ndata4.columns = ['Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag',\n 'Samstag', 'Sonntag']\ndata4['Kalendarwoche'] = data4.index\nf, ax1 = plt.subplots(1, figsize=(25, 20))\nbar_width = 0.75\nbar_l = [(i + 1) for i in range(len(data4['Montag']))]\ntick_pos = [(i + bar_width / 2) for i in bar_l]\nax1.bar(bar_l, data4['Montag'], width=bar_width, label='Montag', alpha=0.5,\n color='#1858ef')\nax1.bar(bar_l, data4['Dienstag'], width=bar_width, bottom=data4['Montag'],\n label='Dienstag', alpha=0.5, color='#6618ef')\nax1.bar(bar_l, data4['Mittwoch'], width=bar_width, bottom=[(i + j) for i, j in\n zip(data4['Montag'], data4['Dienstag'])], label='Mittwoch', alpha=0.5,\n color='#ef1829')\nax1.bar(bar_l, data4['Donnerstag'], width=bar_width, bottom=[(i + j + k) for\n i, j, k in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'])],\n label='Donnerstag', alpha=0.5, color='#ef7c18')\nax1.bar(bar_l, data4['Freitag'], width=bar_width, bottom=[(i + j + k + l) for\n i, j, k, l in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'],\n data4['Donnerstag'])], label='Freitag', alpha=0.5, color='#efc718')\nax1.bar(bar_l, data4['Samstag'], width=bar_width, bottom=[(i + j + k + l +\n m) for i, j, k, l, m in zip(data4['Montag'], data4['Dienstag'], data4[\n 'Mittwoch'], data4['Donnerstag'], data4['Freitag'])], label='Samstag',\n alpha=0.5, color='#63ef18')\nax1.bar(bar_l, data4['Sonntag'], width=bar_width, bottom=[(i + j + k + l +\n m + n) for i, j, k, l, m, n in zip(data4['Montag'], data4['Dienstag'],\n data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'], data4[\n 'Samstag'])], label='Sonntag', alpha=0.5, color='#18efa3')\nplt.xticks(tick_pos, data4['Kalendarwoche'])\nax1.set_ylabel('Häufigkeit')\nax1.set_xlabel('Kalendarwoche')\nplt.legend(loc='upper left')\nplt.xlim([min(tick_pos) - bar_width, max(tick_pos) + bar_width])\nkw = lambda x: x.isocalendar()[1]\ngrouped = data.groupby([data['tweet_date_with_time'].map(kw)], sort=False).agg(\n {'count': 'sum'})\ngrouped['calendar week'] = ('KW1', 'KW2', 'KW3', 'KW4', 'KW5', 'KW6', 'KW7',\n 'KW8', 'KW9', 'KW10', 'KW11', 'KW12', 'KW13', 'KW14', 'KW15', 'KW16',\n 'KW17', 'KW18', 'KW19', 'KW20', 'KW21', 'KW22', 'KW23', 'KW24', 'KW25',\n 'KW26', 'KW27', 'KW28', 'KW29', 'KW30', 'KW31', 'KW32', 'KW33', 'KW34',\n 'KW35', 'KW36', 'KW37', 'KW38', 'KW39')\ngrouped.set_index('calendar week').plot.bar(rot=45, title=\n 'Nutzung von #makeamericagreatagain in Kalendarwoche', figsize=(15, 10),\n fontsize=10)\ndata5 = data[['tweet_date', 'count']].copy()\ndata5.set_index('tweet_date').plot.bar(rot=90, title=\n 'Häufigkeit aller Hashtag in Tagen', figsize=(50, 25), color='#ef6618',\n fontsize=14)\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jul 1 10:18:11 2017\r\n\r\n@author: Duong\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport psycopg2\r\nfrom pandas.core.frame import DataFrame\r\n\r\n\r\n\r\n\r\n\r\n# DBS verbinden\r\ndatabase = psycopg2.connect(database=\"TeamYellow_election\", user=\"student\", password=\"password\", host=\"agdbs-edu01.imp.fu-berlin.de\", port=\"5432\")\r\n\r\n# SQl-Abfrage\r\ncursor = database.cursor()\r\ncursor.execute(\r\n 'SELECT tweet_date, COUNT(*) FROM projekt_election.tweet as tweet , projekt_election.hashtag_use as use WHERE tweet.tweet_id = use.tweet_id GROUP BY tweet_date ORDER BY tweet_date ASC')\r\nresult = cursor.fetchall()\r\n\r\n# Dataframe erstellen\r\ndata=DataFrame(result, columns=['tweet_date', 'count'])\r\n\r\n\r\n#Umwandlung des Datentyp der Spalte tweet_date\r\ndata['tweet_date_with_time'] = data['tweet_date'].astype('datetime64[ns]')\r\ndata['week_number'] = data['tweet_date_with_time'].dt.week\r\ndata['weekday']= data['tweet_date_with_time'].dt.dayofweek\r\n\r\n\r\n# Gruppierung der Kalendarwochen mit einzelnen Counts\r\ndata2=data.copy()\r\ndel data2['tweet_date']\r\ndel data2['tweet_date_with_time']\r\ndel data2['weekday']\r\n\r\nprint(data2.groupby('week_number')['count'].apply(list))\r\n\r\n# Aufbau Dataframe auf Erkenntnisse aus data2-Prints\r\ndata3 = pd.DataFrame({'KW01': [0, 0, 1, 0, 3, 0, 0],\r\n 'KW02': [3, 1, 7, 1, 0, 1, 0],\r\n 'KW03': [0, 2, 6, 1, 11, 3, 2],\r\n 'KW04': [13, 5, 1, 3, 6, 2, 1],\r\n 'KW05': [0, 1, 2, 0, 4, 3, 4],\r\n 'KW06': [2, 6, 1, 2, 1, 5, 0],\r\n 'KW07': [1, 3, 5, 2, 5, 2, 1],\r\n 'KW08': [2, 7, 1, 3, 5, 1, 3],\r\n 'KW09': [3, 10, 9, 3, 3, 6, 2],\r\n 'KW10': [0, 1, 2, 0, 2, 4, 0],\r\n 'KW11': [2, 3, 8, 0, 3, 10, 5],\r\n 'KW12': [0, 11, 4, 1, 0, 0, 0],\r\n 'KW13': [1, 0, 3, 2, 1, 6, 5],\r\n 'KW14': [4, 5, 0, 0, 1, 1, 2],\r\n 'KW15': [2, 4, 1, 2, 0, 4, 2],\r\n 'KW16': [0, 11, 4, 2, 3, 4, 1],\r\n 'KW17': [2, 6, 0, 1, 1, 0, 0],\r\n 'KW18': [4, 8, 0, 1, 1, 0, 0],\r\n 'KW19': [2, 8, 3, 0, 0, 0, 0],\r\n 'KW20': [1, 1, 1, 0, 5, 0, 1],\r\n 'KW21': [0, 0, 2, 1, 1, 0, 0],\r\n 'KW22': [0, 0, 1, 4, 2, 3, 0],\r\n 'KW23': [0, 0, 1, 0, 1, 2, 0],\r\n 'KW24': [0, 0, 3, 0, 1, 4, 1],\r\n 'KW25': [0, 0, 1, 10, 0, 0, 0],\r\n 'KW26': [1, 1, 0, 0, 2, 3, 0],\r\n 'KW27': [1, 0, 0, 2, 0, 0, 0],\r\n 'KW28': [1, 2, 2, 1, 0, 1, 0],\r\n 'KW29': [0, 1, 2, 7, 2, 1, 0],\r\n 'KW30': [1, 3, 3, 4, 0, 1, 1],\r\n 'KW31': [3, 2, 2, 0, 1, 4, 1],\r\n 'KW32': [1, 6, 0, 0, 0, 1, 0],\r\n 'KW33': [0, 0, 4, 0, 1, 1, 0],\r\n 'KW34': [1, 0, 1, 2, 1, 2, 1],\r\n 'KW35': [2, 0, 1, 3, 1, 0, 0],\r\n 'KW36': [1, 1, 2, 2, 2, 0, 0],\r\n 'KW37': [0, 1, 1, 2, 4, 0, 0],\r\n 'KW38': [0, 3, 0, 2, 1, 1, 0],\r\n 'KW39': [3, 18, 0, 0, 0, 0, 0]})\r\n\r\n\r\ndata4= data3.transpose()\r\ndata4.columns =['Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag', 'Samstag', 'Sonntag']\r\ndata4['Kalendarwoche']=data4.index\r\n\r\n############################# Bau eines Stacked Bar Chart ############################################\r\n\r\n#Grundgerüst des Balkendiagramms\r\nf, ax1 = plt.subplots(1, figsize=(25,20))\r\n\r\n# Balkengröße\r\nbar_width = 0.75\r\n\r\n# Balken fangen von links an\r\nbar_l = [i+1 for i in range(len(data4['Montag']))]\r\n\r\n# Position der X-Achsen Werte\r\ntick_pos = [i+(bar_width/2) for i in bar_l]\r\n\r\n# Beginn der Erstellung der Balken nach Wochentagen\r\nax1.bar(bar_l,\r\n data4['Montag'],\r\n width=bar_width,\r\n label='Montag',\r\n alpha=0.5,\r\n color='#1858ef')\r\n\r\n\r\nax1.bar(bar_l,\r\n data4['Dienstag'],\r\n width=bar_width,\r\n bottom=data4['Montag'],\r\n label='Dienstag',\r\n alpha=0.5,\r\n color='#6618ef')\r\n\r\nax1.bar(bar_l,\r\n data4['Mittwoch'],\r\n width=bar_width,\r\n bottom=[i+j for i,j in zip(data4['Montag'],data4['Dienstag'])],\r\n label='Mittwoch',\r\n alpha=0.5,\r\n color='#ef1829')\r\n\r\nax1.bar(bar_l,\r\n data4['Donnerstag'],\r\n width=bar_width,\r\n bottom=[i+j+k for i,j,k in zip(data4['Montag'],data4['Dienstag'], data4['Mittwoch'])],\r\n label='Donnerstag',\r\n alpha=0.5,\r\n color='#ef7c18')\r\n\r\nax1.bar(bar_l,\r\n data4['Freitag'],\r\n width=bar_width,\r\n bottom=[i+j+k+l for i,j,k,l in zip(data4['Montag'],data4['Dienstag'], \r\n data4['Mittwoch'], data4['Donnerstag'])],\r\n label='Freitag',\r\n alpha=0.5,\r\n color='#efc718')\r\n\r\nax1.bar(bar_l,\r\n data4['Samstag'],\r\n width=bar_width,\r\n bottom=[i+j+k+l+m for i,j,k,l,m in zip(data4['Montag'],data4['Dienstag'], \r\n data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'])],\r\n label='Samstag',\r\n alpha=0.5,\r\n color='#63ef18')\r\n\r\n\r\nax1.bar(bar_l,\r\n data4['Sonntag'],\r\n width=bar_width,\r\n bottom=[i+j+k+l+m+n for i,j,k,l,m,n in zip(data4['Montag'],data4['Dienstag'], \r\n data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'],\r\n data4['Samstag'])],\r\n label='Sonntag',\r\n alpha=0.5,\r\n color='#18efa3')\r\n\r\n# X-Achse mit Werte versehen\r\nplt.xticks(tick_pos, data4['Kalendarwoche'])\r\n\r\n#Legende\r\nax1.set_ylabel(\"Häufigkeit\")\r\nax1.set_xlabel(\"Kalendarwoche\")\r\nplt.legend(loc='upper left')\r\n\r\n# Zwischen den Diagrammen Platz lassen\r\nplt.xlim([min(tick_pos)-bar_width, max(tick_pos)+bar_width])\r\n\r\n############### Balkendiagramm nach Kalendarwoche#########################################\r\n\r\nkw = lambda x: x.isocalendar()[1]\r\ngrouped = data.groupby([data['tweet_date_with_time'].map(kw)], sort=False).agg({'count': 'sum'})\r\n\r\ngrouped['calendar week']= ('KW1','KW2','KW3','KW4','KW5','KW6','KW7','KW8','KW9','KW10','KW11','KW12','KW13',\r\n 'KW14','KW15','KW16','KW17','KW18','KW19','KW20','KW21','KW22','KW23','KW24','KW25','KW26', 'KW27','KW28','KW29',\r\n 'KW30','KW31','KW32','KW33','KW34','KW35','KW36','KW37','KW38','KW39')\r\n\r\n\r\n\r\n#Balkendiagramm für alle Hashtag in Kalendarwoche\r\ngrouped.set_index('calendar week').plot.bar(rot=45, title='Nutzung von #makeamericagreatagain in Kalendarwoche', figsize=(15,10), fontsize=10)\r\n\r\n############## Balkendiagramm für alle Hashtag pro Tag #####################################\r\ndata5=data[['tweet_date','count']].copy()\r\n#Balkendiagramm für alle Hashtag in Tagen\r\ndata5.set_index('tweet_date').plot.bar(rot=90, title='Häufigkeit aller Hashtag in Tagen', figsize=(50,25), color ='#ef6618', fontsize=14)\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import inaccel.coral as inaccel
import numpy as np
import time
class StereoBM:
def __init__(self, cameraMA_l=None, cameraMA_r=None, distC_l=None, distC_r=None, irA_l=None, irA_r=None, bm_state=None ):
# allocate mem for camera parameters for rectification and bm_state class
with inaccel.allocator:
if cameraMA_l is None:
self.cameraMA_l_fl = np.array([933.173, 0.0, 663.451, 0.0, 933.173, 377.015, 0.0, 0.0, 1.0], dtype=np.float32)
else:
self.cameraMA_l_fl = np.array(cameraMA_l, dtype=np.float32)
if cameraMA_r is None:
self.cameraMA_r_fl = np.array([933.467, 0.0, 678.297, 0.0, 933.467, 359.623, 0.0, 0.0, 1.0], dtype=np.float32)
else:
self.cameraMA_r_fl = np.array(cameraMA_r, dtype=np.float32)
if distC_l is None:
self.distC_l_fl = np.array([-0.169398, 0.0227329, 0.0, 0.0, 0.0], dtype=np.float32)
else:
self.distC_l_fl = np.array(distC_l, dtype=np.float32)
if distC_r is None:
self.distC_r_fl = np.array([-0.170581, 0.0249444, 0.0, 0.0, 0.0], dtype=np.float32)
else:
self.distC_r_fl = np.array(distC_r, dtype=np.float32)
if irA_l is None:
self.irA_l_fl = np.array([0.0011976323, -0.0000000019, -0.8153011732, 0.0000000007, 0.0011976994, \
-0.4422348617, 0.0000126839, 0.0000001064, 0.9913820905], dtype=np.float32)
else:
self.irA_l_fl = np.array(irA_l, dtype=np.float32)
if irA_r is None:
self.irA_r_fl = np.array([0.0011976994, 0.0000000000, -0.8047567905, -0.0000000000, 0.0011976994, \
-0.4420566166, -0.0000000000, -0.0000001064, 1.0000392898], dtype=np.float32)
else:
self.irA_r_fl = np.array(irA_r, dtype=np.float32)
if bm_state is None:
self.bm_state_arr = np.array([0, 15, 31, 15, 0, 48, 20, 15, 16, 3, 0], dtype=np.int32)
else:
self.bm_state_arr = np.array(bm_state, dtype=np.int32)
def runAsync(self, left_img, right_img):
self.m_runStartTime = int(round(time.time() * 1000000))
if left_img is None:
raise RuntimeError('Invalid left image')
if right_img is None:
raise RuntimeError('Invalid right image')
if left_img.shape[0] != right_img.shape[0] or left_img.shape[1] != right_img.shape[1]:
raise RuntimeError('Image sizes differ')
# allocate and initialize buffers
rows = np.int32(left_img.shape[0]);
cols = np.int32(left_img.shape[1]);
with inaccel.allocator:
self.left_mat = np.array(left_img)
self.right_mat = np.array(right_img)
self.disp_mat = np.ndarray((rows, cols), dtype=np.uint16)
# Create request for stereo accelerator
req = inaccel.request('com.xilinx.vitis.vision.stereoBM')
req.arg(self.left_mat).arg(self.right_mat).arg(self.disp_mat)
req.arg(self.cameraMA_l_fl).arg(self.cameraMA_r_fl)
req.arg(self.distC_l_fl).arg(self.distC_r_fl)
req.arg(self.irA_l_fl).arg(self.irA_r_fl)
req.arg(self.bm_state_arr)
req.arg(rows).arg(cols)
self.response = inaccel.submit(req)
def wait(self):
# Send request and wait for completion
self.response.result()
# Write output image
disp_mat_scaled = (self.disp_mat.view(np.ndarray)*(256.0 / 48.0) / (16.0)).astype(np.uint8)
self.m_runEndTime = int(round(time.time() * 1000000))
return disp_mat_scaled;
def run(self, left_img, right_img):
self.runAsync(left_img, right_img)
return self.wait()
def lastruntime(self):
duration = self.m_runEndTime - self.m_runStartTime
return duration
|
normal
|
{
"blob_id": "66f3590381fe96c49a8926a806b4a845f0d7e25d",
"index": 4681,
"step-1": "<mask token>\n\n\nclass StereoBM:\n <mask token>\n\n def runAsync(self, left_img, right_img):\n self.m_runStartTime = int(round(time.time() * 1000000))\n if left_img is None:\n raise RuntimeError('Invalid left image')\n if right_img is None:\n raise RuntimeError('Invalid right image')\n if left_img.shape[0] != right_img.shape[0] or left_img.shape[1\n ] != right_img.shape[1]:\n raise RuntimeError('Image sizes differ')\n rows = np.int32(left_img.shape[0])\n cols = np.int32(left_img.shape[1])\n with inaccel.allocator:\n self.left_mat = np.array(left_img)\n self.right_mat = np.array(right_img)\n self.disp_mat = np.ndarray((rows, cols), dtype=np.uint16)\n req = inaccel.request('com.xilinx.vitis.vision.stereoBM')\n req.arg(self.left_mat).arg(self.right_mat).arg(self.disp_mat)\n req.arg(self.cameraMA_l_fl).arg(self.cameraMA_r_fl)\n req.arg(self.distC_l_fl).arg(self.distC_r_fl)\n req.arg(self.irA_l_fl).arg(self.irA_r_fl)\n req.arg(self.bm_state_arr)\n req.arg(rows).arg(cols)\n self.response = inaccel.submit(req)\n <mask token>\n\n def run(self, left_img, right_img):\n self.runAsync(left_img, right_img)\n return self.wait()\n\n def lastruntime(self):\n duration = self.m_runEndTime - self.m_runStartTime\n return duration\n",
"step-2": "<mask token>\n\n\nclass StereoBM:\n\n def __init__(self, cameraMA_l=None, cameraMA_r=None, distC_l=None,\n distC_r=None, irA_l=None, irA_r=None, bm_state=None):\n with inaccel.allocator:\n if cameraMA_l is None:\n self.cameraMA_l_fl = np.array([933.173, 0.0, 663.451, 0.0, \n 933.173, 377.015, 0.0, 0.0, 1.0], dtype=np.float32)\n else:\n self.cameraMA_l_fl = np.array(cameraMA_l, dtype=np.float32)\n if cameraMA_r is None:\n self.cameraMA_r_fl = np.array([933.467, 0.0, 678.297, 0.0, \n 933.467, 359.623, 0.0, 0.0, 1.0], dtype=np.float32)\n else:\n self.cameraMA_r_fl = np.array(cameraMA_r, dtype=np.float32)\n if distC_l is None:\n self.distC_l_fl = np.array([-0.169398, 0.0227329, 0.0, 0.0,\n 0.0], dtype=np.float32)\n else:\n self.distC_l_fl = np.array(distC_l, dtype=np.float32)\n if distC_r is None:\n self.distC_r_fl = np.array([-0.170581, 0.0249444, 0.0, 0.0,\n 0.0], dtype=np.float32)\n else:\n self.distC_r_fl = np.array(distC_r, dtype=np.float32)\n if irA_l is None:\n self.irA_l_fl = np.array([0.0011976323, -1.9e-09, -\n 0.8153011732, 7e-10, 0.0011976994, -0.4422348617, \n 1.26839e-05, 1.064e-07, 0.9913820905], dtype=np.float32)\n else:\n self.irA_l_fl = np.array(irA_l, dtype=np.float32)\n if irA_r is None:\n self.irA_r_fl = np.array([0.0011976994, 0.0, -0.8047567905,\n -0.0, 0.0011976994, -0.4420566166, -0.0, -1.064e-07, \n 1.0000392898], dtype=np.float32)\n else:\n self.irA_r_fl = np.array(irA_r, dtype=np.float32)\n if bm_state is None:\n self.bm_state_arr = np.array([0, 15, 31, 15, 0, 48, 20, 15,\n 16, 3, 0], dtype=np.int32)\n else:\n self.bm_state_arr = np.array(bm_state, dtype=np.int32)\n\n def runAsync(self, left_img, right_img):\n self.m_runStartTime = int(round(time.time() * 1000000))\n if left_img is None:\n raise RuntimeError('Invalid left image')\n if right_img is None:\n raise RuntimeError('Invalid right image')\n if left_img.shape[0] != right_img.shape[0] or left_img.shape[1\n ] != right_img.shape[1]:\n raise RuntimeError('Image sizes differ')\n rows = np.int32(left_img.shape[0])\n cols = np.int32(left_img.shape[1])\n with inaccel.allocator:\n self.left_mat = np.array(left_img)\n self.right_mat = np.array(right_img)\n self.disp_mat = np.ndarray((rows, cols), dtype=np.uint16)\n req = inaccel.request('com.xilinx.vitis.vision.stereoBM')\n req.arg(self.left_mat).arg(self.right_mat).arg(self.disp_mat)\n req.arg(self.cameraMA_l_fl).arg(self.cameraMA_r_fl)\n req.arg(self.distC_l_fl).arg(self.distC_r_fl)\n req.arg(self.irA_l_fl).arg(self.irA_r_fl)\n req.arg(self.bm_state_arr)\n req.arg(rows).arg(cols)\n self.response = inaccel.submit(req)\n <mask token>\n\n def run(self, left_img, right_img):\n self.runAsync(left_img, right_img)\n return self.wait()\n\n def lastruntime(self):\n duration = self.m_runEndTime - self.m_runStartTime\n return duration\n",
"step-3": "<mask token>\n\n\nclass StereoBM:\n\n def __init__(self, cameraMA_l=None, cameraMA_r=None, distC_l=None,\n distC_r=None, irA_l=None, irA_r=None, bm_state=None):\n with inaccel.allocator:\n if cameraMA_l is None:\n self.cameraMA_l_fl = np.array([933.173, 0.0, 663.451, 0.0, \n 933.173, 377.015, 0.0, 0.0, 1.0], dtype=np.float32)\n else:\n self.cameraMA_l_fl = np.array(cameraMA_l, dtype=np.float32)\n if cameraMA_r is None:\n self.cameraMA_r_fl = np.array([933.467, 0.0, 678.297, 0.0, \n 933.467, 359.623, 0.0, 0.0, 1.0], dtype=np.float32)\n else:\n self.cameraMA_r_fl = np.array(cameraMA_r, dtype=np.float32)\n if distC_l is None:\n self.distC_l_fl = np.array([-0.169398, 0.0227329, 0.0, 0.0,\n 0.0], dtype=np.float32)\n else:\n self.distC_l_fl = np.array(distC_l, dtype=np.float32)\n if distC_r is None:\n self.distC_r_fl = np.array([-0.170581, 0.0249444, 0.0, 0.0,\n 0.0], dtype=np.float32)\n else:\n self.distC_r_fl = np.array(distC_r, dtype=np.float32)\n if irA_l is None:\n self.irA_l_fl = np.array([0.0011976323, -1.9e-09, -\n 0.8153011732, 7e-10, 0.0011976994, -0.4422348617, \n 1.26839e-05, 1.064e-07, 0.9913820905], dtype=np.float32)\n else:\n self.irA_l_fl = np.array(irA_l, dtype=np.float32)\n if irA_r is None:\n self.irA_r_fl = np.array([0.0011976994, 0.0, -0.8047567905,\n -0.0, 0.0011976994, -0.4420566166, -0.0, -1.064e-07, \n 1.0000392898], dtype=np.float32)\n else:\n self.irA_r_fl = np.array(irA_r, dtype=np.float32)\n if bm_state is None:\n self.bm_state_arr = np.array([0, 15, 31, 15, 0, 48, 20, 15,\n 16, 3, 0], dtype=np.int32)\n else:\n self.bm_state_arr = np.array(bm_state, dtype=np.int32)\n\n def runAsync(self, left_img, right_img):\n self.m_runStartTime = int(round(time.time() * 1000000))\n if left_img is None:\n raise RuntimeError('Invalid left image')\n if right_img is None:\n raise RuntimeError('Invalid right image')\n if left_img.shape[0] != right_img.shape[0] or left_img.shape[1\n ] != right_img.shape[1]:\n raise RuntimeError('Image sizes differ')\n rows = np.int32(left_img.shape[0])\n cols = np.int32(left_img.shape[1])\n with inaccel.allocator:\n self.left_mat = np.array(left_img)\n self.right_mat = np.array(right_img)\n self.disp_mat = np.ndarray((rows, cols), dtype=np.uint16)\n req = inaccel.request('com.xilinx.vitis.vision.stereoBM')\n req.arg(self.left_mat).arg(self.right_mat).arg(self.disp_mat)\n req.arg(self.cameraMA_l_fl).arg(self.cameraMA_r_fl)\n req.arg(self.distC_l_fl).arg(self.distC_r_fl)\n req.arg(self.irA_l_fl).arg(self.irA_r_fl)\n req.arg(self.bm_state_arr)\n req.arg(rows).arg(cols)\n self.response = inaccel.submit(req)\n\n def wait(self):\n self.response.result()\n disp_mat_scaled = (self.disp_mat.view(np.ndarray) * (256.0 / 48.0) /\n 16.0).astype(np.uint8)\n self.m_runEndTime = int(round(time.time() * 1000000))\n return disp_mat_scaled\n\n def run(self, left_img, right_img):\n self.runAsync(left_img, right_img)\n return self.wait()\n\n def lastruntime(self):\n duration = self.m_runEndTime - self.m_runStartTime\n return duration\n",
"step-4": "import inaccel.coral as inaccel\nimport numpy as np\nimport time\n\n\nclass StereoBM:\n\n def __init__(self, cameraMA_l=None, cameraMA_r=None, distC_l=None,\n distC_r=None, irA_l=None, irA_r=None, bm_state=None):\n with inaccel.allocator:\n if cameraMA_l is None:\n self.cameraMA_l_fl = np.array([933.173, 0.0, 663.451, 0.0, \n 933.173, 377.015, 0.0, 0.0, 1.0], dtype=np.float32)\n else:\n self.cameraMA_l_fl = np.array(cameraMA_l, dtype=np.float32)\n if cameraMA_r is None:\n self.cameraMA_r_fl = np.array([933.467, 0.0, 678.297, 0.0, \n 933.467, 359.623, 0.0, 0.0, 1.0], dtype=np.float32)\n else:\n self.cameraMA_r_fl = np.array(cameraMA_r, dtype=np.float32)\n if distC_l is None:\n self.distC_l_fl = np.array([-0.169398, 0.0227329, 0.0, 0.0,\n 0.0], dtype=np.float32)\n else:\n self.distC_l_fl = np.array(distC_l, dtype=np.float32)\n if distC_r is None:\n self.distC_r_fl = np.array([-0.170581, 0.0249444, 0.0, 0.0,\n 0.0], dtype=np.float32)\n else:\n self.distC_r_fl = np.array(distC_r, dtype=np.float32)\n if irA_l is None:\n self.irA_l_fl = np.array([0.0011976323, -1.9e-09, -\n 0.8153011732, 7e-10, 0.0011976994, -0.4422348617, \n 1.26839e-05, 1.064e-07, 0.9913820905], dtype=np.float32)\n else:\n self.irA_l_fl = np.array(irA_l, dtype=np.float32)\n if irA_r is None:\n self.irA_r_fl = np.array([0.0011976994, 0.0, -0.8047567905,\n -0.0, 0.0011976994, -0.4420566166, -0.0, -1.064e-07, \n 1.0000392898], dtype=np.float32)\n else:\n self.irA_r_fl = np.array(irA_r, dtype=np.float32)\n if bm_state is None:\n self.bm_state_arr = np.array([0, 15, 31, 15, 0, 48, 20, 15,\n 16, 3, 0], dtype=np.int32)\n else:\n self.bm_state_arr = np.array(bm_state, dtype=np.int32)\n\n def runAsync(self, left_img, right_img):\n self.m_runStartTime = int(round(time.time() * 1000000))\n if left_img is None:\n raise RuntimeError('Invalid left image')\n if right_img is None:\n raise RuntimeError('Invalid right image')\n if left_img.shape[0] != right_img.shape[0] or left_img.shape[1\n ] != right_img.shape[1]:\n raise RuntimeError('Image sizes differ')\n rows = np.int32(left_img.shape[0])\n cols = np.int32(left_img.shape[1])\n with inaccel.allocator:\n self.left_mat = np.array(left_img)\n self.right_mat = np.array(right_img)\n self.disp_mat = np.ndarray((rows, cols), dtype=np.uint16)\n req = inaccel.request('com.xilinx.vitis.vision.stereoBM')\n req.arg(self.left_mat).arg(self.right_mat).arg(self.disp_mat)\n req.arg(self.cameraMA_l_fl).arg(self.cameraMA_r_fl)\n req.arg(self.distC_l_fl).arg(self.distC_r_fl)\n req.arg(self.irA_l_fl).arg(self.irA_r_fl)\n req.arg(self.bm_state_arr)\n req.arg(rows).arg(cols)\n self.response = inaccel.submit(req)\n\n def wait(self):\n self.response.result()\n disp_mat_scaled = (self.disp_mat.view(np.ndarray) * (256.0 / 48.0) /\n 16.0).astype(np.uint8)\n self.m_runEndTime = int(round(time.time() * 1000000))\n return disp_mat_scaled\n\n def run(self, left_img, right_img):\n self.runAsync(left_img, right_img)\n return self.wait()\n\n def lastruntime(self):\n duration = self.m_runEndTime - self.m_runStartTime\n return duration\n",
"step-5": "import inaccel.coral as inaccel\nimport numpy as np\nimport time\n\nclass StereoBM:\n\tdef __init__(self, cameraMA_l=None, cameraMA_r=None, distC_l=None, distC_r=None, irA_l=None, irA_r=None, bm_state=None ):\n\t\t# allocate mem for camera parameters for rectification and bm_state class\n\t\twith inaccel.allocator:\n\t\t\tif cameraMA_l is None:\n\t\t\t\tself.cameraMA_l_fl = np.array([933.173, 0.0, 663.451, 0.0, 933.173, 377.015, 0.0, 0.0, 1.0], dtype=np.float32)\n\t\t\telse:\n\t\t\t\tself.cameraMA_l_fl = np.array(cameraMA_l, dtype=np.float32)\n\n\t\t\tif cameraMA_r is None:\n\t\t\t\tself.cameraMA_r_fl = np.array([933.467, 0.0, 678.297, 0.0, 933.467, 359.623, 0.0, 0.0, 1.0], dtype=np.float32)\n\t\t\telse:\n\t\t\t\tself.cameraMA_r_fl = np.array(cameraMA_r, dtype=np.float32)\n\n\t\t\tif distC_l is None:\n\t\t\t\tself.distC_l_fl = np.array([-0.169398, 0.0227329, 0.0, 0.0, 0.0], dtype=np.float32)\n\t\t\telse:\n\t\t\t\tself.distC_l_fl = np.array(distC_l, dtype=np.float32)\n\n\t\t\tif distC_r is None:\n\t\t\t\tself.distC_r_fl = np.array([-0.170581, 0.0249444, 0.0, 0.0, 0.0], dtype=np.float32)\n\t\t\telse:\n\t\t\t\tself.distC_r_fl = np.array(distC_r, dtype=np.float32)\n\n\t\t\tif irA_l is None:\n\t\t\t\tself.irA_l_fl = np.array([0.0011976323, -0.0000000019, -0.8153011732, 0.0000000007, 0.0011976994, \\\n\t \t\t\t\t\t\t\t\t-0.4422348617, 0.0000126839, 0.0000001064, 0.9913820905], dtype=np.float32)\n\t\t\telse:\n\t\t\t\tself.irA_l_fl = np.array(irA_l, dtype=np.float32)\n\n\t\t\tif irA_r is None:\n\t\t\t\tself.irA_r_fl = np.array([0.0011976994, 0.0000000000, -0.8047567905, -0.0000000000, 0.0011976994, \\\n\t -0.4420566166, -0.0000000000, -0.0000001064, 1.0000392898], dtype=np.float32)\n\t\t\telse:\n\t\t\t\tself.irA_r_fl = np.array(irA_r, dtype=np.float32)\n\n\t\t\tif bm_state is None:\n\t\t\t\tself.bm_state_arr = np.array([0, 15, 31, 15, 0, 48, 20, 15, 16, 3, 0], dtype=np.int32)\n\t\t\telse:\n\t\t\t\tself.bm_state_arr = np.array(bm_state, dtype=np.int32)\n\n\n\tdef runAsync(self, left_img, right_img):\n\t\tself.m_runStartTime = int(round(time.time() * 1000000))\n\n\t\tif left_img is None:\n\t\t\traise RuntimeError('Invalid left image')\n\t\tif right_img is None:\n\t\t\traise RuntimeError('Invalid right image')\n\t\tif left_img.shape[0] != right_img.shape[0] or left_img.shape[1] != right_img.shape[1]:\n\t\t\traise RuntimeError('Image sizes differ')\n\n\t\t# allocate and initialize buffers\n\t\trows = np.int32(left_img.shape[0]);\n\t\tcols = np.int32(left_img.shape[1]);\n\n\t\twith inaccel.allocator:\n\t\t\tself.left_mat = np.array(left_img)\n\t\t\tself.right_mat = np.array(right_img)\n\t\t\tself.disp_mat = np.ndarray((rows, cols), dtype=np.uint16)\n\n\t\t# Create request for stereo accelerator\n\t\treq = inaccel.request('com.xilinx.vitis.vision.stereoBM')\n\t\treq.arg(self.left_mat).arg(self.right_mat).arg(self.disp_mat)\n\t\treq.arg(self.cameraMA_l_fl).arg(self.cameraMA_r_fl)\n\t\treq.arg(self.distC_l_fl).arg(self.distC_r_fl)\n\t\treq.arg(self.irA_l_fl).arg(self.irA_r_fl)\n\t\treq.arg(self.bm_state_arr)\n\t\treq.arg(rows).arg(cols)\n\n\t\tself.response = inaccel.submit(req)\n\n\tdef wait(self):\n\t\t# Send request and wait for completion\n\t\tself.response.result()\n\n\t\t# Write output image\n\t\tdisp_mat_scaled = (self.disp_mat.view(np.ndarray)*(256.0 / 48.0) / (16.0)).astype(np.uint8)\n\n\t\tself.m_runEndTime = int(round(time.time() * 1000000))\n\t\treturn disp_mat_scaled;\n\n\tdef run(self, left_img, right_img):\n\t\tself.runAsync(left_img, right_img)\n\t\treturn self.wait()\n\n\tdef lastruntime(self):\n\t\tduration = self.m_runEndTime - self.m_runStartTime\n\t\treturn duration\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DocumentForm(forms.ModelForm):
class Meta:
model = Upload
fields = 'document',
<|reserved_special_token_1|>
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Upload
class DocumentForm(forms.ModelForm):
class Meta:
model = Upload
fields = 'document',
<|reserved_special_token_1|>
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Upload
class DocumentForm(forms.ModelForm):
class Meta:
model = Upload
fields = ('document',)
|
flexible
|
{
"blob_id": "e7b1ccbcbb81ff02561d858a4db54d49a2aa0f8a",
"index": 6094,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass DocumentForm(forms.ModelForm):\n\n\n class Meta:\n model = Upload\n fields = 'document',\n",
"step-3": "from django import forms\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm\nfrom .models import Upload\n\n\nclass DocumentForm(forms.ModelForm):\n\n\n class Meta:\n model = Upload\n fields = 'document',\n",
"step-4": "from django import forms\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm\nfrom .models import Upload\n\nclass DocumentForm(forms.ModelForm):\n class Meta:\n model = Upload\n fields = ('document',)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Student(db.Model):
__tablename__ = 'students'
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.Text)
last_name = db.Column(db.Text)
excuses = db.relationship('Excuse', backref='student', lazy='dynamic')
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
class Excuse(db.Model):
__tablename__ = 'excuses'
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.Text)
is_believable = db.Column(db.Text)
student_id = db.Column(db.Integer, db.ForeignKey('students.id'))
@app.route('/')
def root():
return redirect(url_for('index'))
@app.route('/students', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
new_student = Student(request.form['first_name'], request.form[
'last_name'])
db.session.add(new_student)
db.session.commit()
return redirect(url_for('index'))
return render_template('students/index.html', students=Student.query.all())
@app.route('/students/new')
def new():
return render_template('students/new.html')
@app.route('/students/<int:id>/edit')
def edit(id):
return render_template('students/edit.html', student=Student.query.get(id))
@app.route('/students/<int:id>', methods=['GET', 'PATCH'])
def show(id):
found_student = Student.query.get(id)
if request.method == b'PATCH':
found_student.first_name = request.form['first_name']
found_student.last_name = request.form['last_name']
db.session.add(found_student)
db.session.commit()
return redirect(url_for('index'))
return render_template('students/show.html', student=found_student)
@app.route('/students/<int:id>/excuses', methods=['GET', 'POST'])
def excuses_index(id):
found_student = Student.query.get(id)
if request.method == 'POST':
new_excuse = Excuse(content=request.form.get('content'),
is_believable=request.form.get('is_believable'), student_id=id)
db.session.add(new_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=id))
excuses_list = found_student.excuses.all()
return render_template('excuses/index.html', excuses=excuses_list,
student=found_student)
@app.route('/students/<int:id>/excuses/new')
def new_excuse(id):
return render_template('/excuses/new.html', id=id)
@app.route('/students/<int:id>/excuses/<int:excuse_id>/edit', methods=[
'GET', 'PATCH', 'DELETE'])
def edit_excuse(id, excuse_id):
print(id)
found_student = Student.query.get(id)
found_excuse = Excuse.query.get(excuse_id)
excuses_list = found_student.excuses.all()
if request.method == b'DELETE':
db.session.delete(found_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=found_student.id))
elif request.method == b'PATCH':
found_excuse.content = request.form.get('content')
found_excuse.is_believable = request.form.get('is_believable')
db.session.add(found_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=found_student.id))
return render_template('excuses/edit.html', excuse=found_excuse,
student=found_student)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
Migrate(app, db)
class Student(db.Model):
__tablename__ = 'students'
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.Text)
last_name = db.Column(db.Text)
excuses = db.relationship('Excuse', backref='student', lazy='dynamic')
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
class Excuse(db.Model):
__tablename__ = 'excuses'
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.Text)
is_believable = db.Column(db.Text)
student_id = db.Column(db.Integer, db.ForeignKey('students.id'))
@app.route('/')
def root():
return redirect(url_for('index'))
@app.route('/students', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
new_student = Student(request.form['first_name'], request.form[
'last_name'])
db.session.add(new_student)
db.session.commit()
return redirect(url_for('index'))
return render_template('students/index.html', students=Student.query.all())
@app.route('/students/new')
def new():
return render_template('students/new.html')
@app.route('/students/<int:id>/edit')
def edit(id):
return render_template('students/edit.html', student=Student.query.get(id))
@app.route('/students/<int:id>', methods=['GET', 'PATCH'])
def show(id):
found_student = Student.query.get(id)
if request.method == b'PATCH':
found_student.first_name = request.form['first_name']
found_student.last_name = request.form['last_name']
db.session.add(found_student)
db.session.commit()
return redirect(url_for('index'))
return render_template('students/show.html', student=found_student)
@app.route('/students/<int:id>/excuses', methods=['GET', 'POST'])
def excuses_index(id):
found_student = Student.query.get(id)
if request.method == 'POST':
new_excuse = Excuse(content=request.form.get('content'),
is_believable=request.form.get('is_believable'), student_id=id)
db.session.add(new_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=id))
excuses_list = found_student.excuses.all()
return render_template('excuses/index.html', excuses=excuses_list,
student=found_student)
@app.route('/students/<int:id>/excuses/new')
def new_excuse(id):
return render_template('/excuses/new.html', id=id)
@app.route('/students/<int:id>/excuses/<int:excuse_id>/edit', methods=[
'GET', 'PATCH', 'DELETE'])
def edit_excuse(id, excuse_id):
print(id)
found_student = Student.query.get(id)
found_excuse = Excuse.query.get(excuse_id)
excuses_list = found_student.excuses.all()
if request.method == b'DELETE':
db.session.delete(found_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=found_student.id))
elif request.method == b'PATCH':
found_excuse.content = request.form.get('content')
found_excuse.is_believable = request.form.get('is_believable')
db.session.add(found_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=found_student.id))
return render_template('excuses/edit.html', excuse=found_excuse,
student=found_student)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'
] = 'postgres://localhost/flask_one_to_many'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = True
modus = Modus(app)
db = SQLAlchemy(app)
Migrate(app, db)
class Student(db.Model):
__tablename__ = 'students'
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.Text)
last_name = db.Column(db.Text)
excuses = db.relationship('Excuse', backref='student', lazy='dynamic')
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
class Excuse(db.Model):
__tablename__ = 'excuses'
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.Text)
is_believable = db.Column(db.Text)
student_id = db.Column(db.Integer, db.ForeignKey('students.id'))
@app.route('/')
def root():
return redirect(url_for('index'))
@app.route('/students', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
new_student = Student(request.form['first_name'], request.form[
'last_name'])
db.session.add(new_student)
db.session.commit()
return redirect(url_for('index'))
return render_template('students/index.html', students=Student.query.all())
@app.route('/students/new')
def new():
return render_template('students/new.html')
@app.route('/students/<int:id>/edit')
def edit(id):
return render_template('students/edit.html', student=Student.query.get(id))
@app.route('/students/<int:id>', methods=['GET', 'PATCH'])
def show(id):
found_student = Student.query.get(id)
if request.method == b'PATCH':
found_student.first_name = request.form['first_name']
found_student.last_name = request.form['last_name']
db.session.add(found_student)
db.session.commit()
return redirect(url_for('index'))
return render_template('students/show.html', student=found_student)
@app.route('/students/<int:id>/excuses', methods=['GET', 'POST'])
def excuses_index(id):
found_student = Student.query.get(id)
if request.method == 'POST':
new_excuse = Excuse(content=request.form.get('content'),
is_believable=request.form.get('is_believable'), student_id=id)
db.session.add(new_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=id))
excuses_list = found_student.excuses.all()
return render_template('excuses/index.html', excuses=excuses_list,
student=found_student)
@app.route('/students/<int:id>/excuses/new')
def new_excuse(id):
return render_template('/excuses/new.html', id=id)
@app.route('/students/<int:id>/excuses/<int:excuse_id>/edit', methods=[
'GET', 'PATCH', 'DELETE'])
def edit_excuse(id, excuse_id):
print(id)
found_student = Student.query.get(id)
found_excuse = Excuse.query.get(excuse_id)
excuses_list = found_student.excuses.all()
if request.method == b'DELETE':
db.session.delete(found_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=found_student.id))
elif request.method == b'PATCH':
found_excuse.content = request.form.get('content')
found_excuse.is_believable = request.form.get('is_believable')
db.session.add(found_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=found_student.id))
return render_template('excuses/edit.html', excuse=found_excuse,
student=found_student)
<|reserved_special_token_1|>
from flask import Flask, request, redirect, url_for, render_template
from flask_modus import Modus
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'
] = 'postgres://localhost/flask_one_to_many'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = True
modus = Modus(app)
db = SQLAlchemy(app)
Migrate(app, db)
class Student(db.Model):
__tablename__ = 'students'
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.Text)
last_name = db.Column(db.Text)
excuses = db.relationship('Excuse', backref='student', lazy='dynamic')
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
class Excuse(db.Model):
__tablename__ = 'excuses'
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.Text)
is_believable = db.Column(db.Text)
student_id = db.Column(db.Integer, db.ForeignKey('students.id'))
@app.route('/')
def root():
return redirect(url_for('index'))
@app.route('/students', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
new_student = Student(request.form['first_name'], request.form[
'last_name'])
db.session.add(new_student)
db.session.commit()
return redirect(url_for('index'))
return render_template('students/index.html', students=Student.query.all())
@app.route('/students/new')
def new():
return render_template('students/new.html')
@app.route('/students/<int:id>/edit')
def edit(id):
return render_template('students/edit.html', student=Student.query.get(id))
@app.route('/students/<int:id>', methods=['GET', 'PATCH'])
def show(id):
found_student = Student.query.get(id)
if request.method == b'PATCH':
found_student.first_name = request.form['first_name']
found_student.last_name = request.form['last_name']
db.session.add(found_student)
db.session.commit()
return redirect(url_for('index'))
return render_template('students/show.html', student=found_student)
@app.route('/students/<int:id>/excuses', methods=['GET', 'POST'])
def excuses_index(id):
found_student = Student.query.get(id)
if request.method == 'POST':
new_excuse = Excuse(content=request.form.get('content'),
is_believable=request.form.get('is_believable'), student_id=id)
db.session.add(new_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=id))
excuses_list = found_student.excuses.all()
return render_template('excuses/index.html', excuses=excuses_list,
student=found_student)
@app.route('/students/<int:id>/excuses/new')
def new_excuse(id):
return render_template('/excuses/new.html', id=id)
@app.route('/students/<int:id>/excuses/<int:excuse_id>/edit', methods=[
'GET', 'PATCH', 'DELETE'])
def edit_excuse(id, excuse_id):
print(id)
found_student = Student.query.get(id)
found_excuse = Excuse.query.get(excuse_id)
excuses_list = found_student.excuses.all()
if request.method == b'DELETE':
db.session.delete(found_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=found_student.id))
elif request.method == b'PATCH':
found_excuse.content = request.form.get('content')
found_excuse.is_believable = request.form.get('is_believable')
db.session.add(found_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=found_student.id))
return render_template('excuses/edit.html', excuse=found_excuse,
student=found_student)
<|reserved_special_token_1|>
from flask import Flask, request, redirect, url_for, render_template
from flask_modus import Modus
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
app = Flask(__name__)
app.config[
'SQLALCHEMY_DATABASE_URI'] = "postgres://localhost/flask_one_to_many"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = True
modus = Modus(app)
db = SQLAlchemy(app)
Migrate(app, db)
class Student(db.Model):
__tablename__ = "students"
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.Text)
last_name = db.Column(db.Text)
excuses = db.relationship('Excuse', backref='student',
lazy='dynamic')
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
class Excuse(db.Model):
__tablename__ = "excuses"
id = db.Column(db.Integer, primary_key = True)
content = db.Column(db.Text)
is_believable = db.Column(db.Text)
student_id = db.Column(db.Integer, db.ForeignKey("students.id"))
@app.route('/')
def root():
return redirect(url_for('index'))
@app.route('/students', methods=["GET", "POST"])
def index():
if request.method == 'POST':
new_student = Student(request.form['first_name'],
request.form['last_name'])
db.session.add(new_student)
db.session.commit()
return redirect(url_for('index'))
return render_template('students/index.html', students=Student.query.all())
@app.route('/students/new')
def new():
return render_template('students/new.html')
@app.route('/students/<int:id>/edit')
def edit(id):
return render_template('students/edit.html', student=Student.query.get(id))
@app.route('/students/<int:id>', methods=["GET", "PATCH"])
def show(id):
found_student = Student.query.get(id)
if request.method == b'PATCH':
found_student.first_name = request.form['first_name']
found_student.last_name = request.form['last_name']
db.session.add(found_student)
db.session.commit()
return redirect(url_for('index'))
return render_template('students/show.html', student=found_student)
@app.route("/students/<int:id>/excuses", methods = ["GET", "POST"])
def excuses_index(id):
found_student = Student.query.get(id)
if request.method == "POST":
new_excuse = Excuse(content = request.form.get("content"), is_believable = request.form.get("is_believable"), student_id = id)
db.session.add(new_excuse)
db.session.commit()
return redirect(url_for("excuses_index", id = id))
excuses_list = found_student.excuses.all()
return render_template("excuses/index.html", excuses=excuses_list, student= found_student)
@app.route("/students/<int:id>/excuses/new")
def new_excuse(id):
return render_template("/excuses/new.html", id = id)
@app.route("/students/<int:id>/excuses/<int:excuse_id>/edit", methods = ["GET", "PATCH","DELETE"])
def edit_excuse(id,excuse_id):
print(id)
found_student = Student.query.get(id)
found_excuse = Excuse.query.get(excuse_id)
excuses_list = found_student.excuses.all()
if request.method == b'DELETE':
db.session.delete(found_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id = found_student.id))
elif request.method == b"PATCH":
found_excuse.content = request.form.get("content")
found_excuse.is_believable = request.form.get("is_believable")
db.session.add(found_excuse)
db.session.commit()
return redirect(url_for("excuses_index", id = found_student.id))
# return render_template("excuses/index.html",excuses = excuses_list, student = found_student)
return render_template("excuses/edit.html",excuse = found_excuse, student = found_student)
|
flexible
|
{
"blob_id": "026e06e777d64f8724ec5e89a7829b3a42a25d6b",
"index": 800,
"step-1": "<mask token>\n\n\nclass Student(db.Model):\n __tablename__ = 'students'\n id = db.Column(db.Integer, primary_key=True)\n first_name = db.Column(db.Text)\n last_name = db.Column(db.Text)\n excuses = db.relationship('Excuse', backref='student', lazy='dynamic')\n\n def __init__(self, first_name, last_name):\n self.first_name = first_name\n self.last_name = last_name\n\n\nclass Excuse(db.Model):\n __tablename__ = 'excuses'\n id = db.Column(db.Integer, primary_key=True)\n content = db.Column(db.Text)\n is_believable = db.Column(db.Text)\n student_id = db.Column(db.Integer, db.ForeignKey('students.id'))\n\n\n@app.route('/')\ndef root():\n return redirect(url_for('index'))\n\n\n@app.route('/students', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n new_student = Student(request.form['first_name'], request.form[\n 'last_name'])\n db.session.add(new_student)\n db.session.commit()\n return redirect(url_for('index'))\n return render_template('students/index.html', students=Student.query.all())\n\n\n@app.route('/students/new')\ndef new():\n return render_template('students/new.html')\n\n\n@app.route('/students/<int:id>/edit')\ndef edit(id):\n return render_template('students/edit.html', student=Student.query.get(id))\n\n\n@app.route('/students/<int:id>', methods=['GET', 'PATCH'])\ndef show(id):\n found_student = Student.query.get(id)\n if request.method == b'PATCH':\n found_student.first_name = request.form['first_name']\n found_student.last_name = request.form['last_name']\n db.session.add(found_student)\n db.session.commit()\n return redirect(url_for('index'))\n return render_template('students/show.html', student=found_student)\n\n\n@app.route('/students/<int:id>/excuses', methods=['GET', 'POST'])\ndef excuses_index(id):\n found_student = Student.query.get(id)\n if request.method == 'POST':\n new_excuse = Excuse(content=request.form.get('content'),\n is_believable=request.form.get('is_believable'), student_id=id)\n db.session.add(new_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=id))\n excuses_list = found_student.excuses.all()\n return render_template('excuses/index.html', excuses=excuses_list,\n student=found_student)\n\n\n@app.route('/students/<int:id>/excuses/new')\ndef new_excuse(id):\n return render_template('/excuses/new.html', id=id)\n\n\n@app.route('/students/<int:id>/excuses/<int:excuse_id>/edit', methods=[\n 'GET', 'PATCH', 'DELETE'])\ndef edit_excuse(id, excuse_id):\n print(id)\n found_student = Student.query.get(id)\n found_excuse = Excuse.query.get(excuse_id)\n excuses_list = found_student.excuses.all()\n if request.method == b'DELETE':\n db.session.delete(found_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=found_student.id))\n elif request.method == b'PATCH':\n found_excuse.content = request.form.get('content')\n found_excuse.is_believable = request.form.get('is_believable')\n db.session.add(found_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=found_student.id))\n return render_template('excuses/edit.html', excuse=found_excuse,\n student=found_student)\n",
"step-2": "<mask token>\nMigrate(app, db)\n\n\nclass Student(db.Model):\n __tablename__ = 'students'\n id = db.Column(db.Integer, primary_key=True)\n first_name = db.Column(db.Text)\n last_name = db.Column(db.Text)\n excuses = db.relationship('Excuse', backref='student', lazy='dynamic')\n\n def __init__(self, first_name, last_name):\n self.first_name = first_name\n self.last_name = last_name\n\n\nclass Excuse(db.Model):\n __tablename__ = 'excuses'\n id = db.Column(db.Integer, primary_key=True)\n content = db.Column(db.Text)\n is_believable = db.Column(db.Text)\n student_id = db.Column(db.Integer, db.ForeignKey('students.id'))\n\n\n@app.route('/')\ndef root():\n return redirect(url_for('index'))\n\n\n@app.route('/students', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n new_student = Student(request.form['first_name'], request.form[\n 'last_name'])\n db.session.add(new_student)\n db.session.commit()\n return redirect(url_for('index'))\n return render_template('students/index.html', students=Student.query.all())\n\n\n@app.route('/students/new')\ndef new():\n return render_template('students/new.html')\n\n\n@app.route('/students/<int:id>/edit')\ndef edit(id):\n return render_template('students/edit.html', student=Student.query.get(id))\n\n\n@app.route('/students/<int:id>', methods=['GET', 'PATCH'])\ndef show(id):\n found_student = Student.query.get(id)\n if request.method == b'PATCH':\n found_student.first_name = request.form['first_name']\n found_student.last_name = request.form['last_name']\n db.session.add(found_student)\n db.session.commit()\n return redirect(url_for('index'))\n return render_template('students/show.html', student=found_student)\n\n\n@app.route('/students/<int:id>/excuses', methods=['GET', 'POST'])\ndef excuses_index(id):\n found_student = Student.query.get(id)\n if request.method == 'POST':\n new_excuse = Excuse(content=request.form.get('content'),\n is_believable=request.form.get('is_believable'), student_id=id)\n db.session.add(new_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=id))\n excuses_list = found_student.excuses.all()\n return render_template('excuses/index.html', excuses=excuses_list,\n student=found_student)\n\n\n@app.route('/students/<int:id>/excuses/new')\ndef new_excuse(id):\n return render_template('/excuses/new.html', id=id)\n\n\n@app.route('/students/<int:id>/excuses/<int:excuse_id>/edit', methods=[\n 'GET', 'PATCH', 'DELETE'])\ndef edit_excuse(id, excuse_id):\n print(id)\n found_student = Student.query.get(id)\n found_excuse = Excuse.query.get(excuse_id)\n excuses_list = found_student.excuses.all()\n if request.method == b'DELETE':\n db.session.delete(found_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=found_student.id))\n elif request.method == b'PATCH':\n found_excuse.content = request.form.get('content')\n found_excuse.is_believable = request.form.get('is_believable')\n db.session.add(found_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=found_student.id))\n return render_template('excuses/edit.html', excuse=found_excuse,\n student=found_student)\n",
"step-3": "<mask token>\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'\n ] = 'postgres://localhost/flask_one_to_many'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = True\nmodus = Modus(app)\ndb = SQLAlchemy(app)\nMigrate(app, db)\n\n\nclass Student(db.Model):\n __tablename__ = 'students'\n id = db.Column(db.Integer, primary_key=True)\n first_name = db.Column(db.Text)\n last_name = db.Column(db.Text)\n excuses = db.relationship('Excuse', backref='student', lazy='dynamic')\n\n def __init__(self, first_name, last_name):\n self.first_name = first_name\n self.last_name = last_name\n\n\nclass Excuse(db.Model):\n __tablename__ = 'excuses'\n id = db.Column(db.Integer, primary_key=True)\n content = db.Column(db.Text)\n is_believable = db.Column(db.Text)\n student_id = db.Column(db.Integer, db.ForeignKey('students.id'))\n\n\n@app.route('/')\ndef root():\n return redirect(url_for('index'))\n\n\n@app.route('/students', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n new_student = Student(request.form['first_name'], request.form[\n 'last_name'])\n db.session.add(new_student)\n db.session.commit()\n return redirect(url_for('index'))\n return render_template('students/index.html', students=Student.query.all())\n\n\n@app.route('/students/new')\ndef new():\n return render_template('students/new.html')\n\n\n@app.route('/students/<int:id>/edit')\ndef edit(id):\n return render_template('students/edit.html', student=Student.query.get(id))\n\n\n@app.route('/students/<int:id>', methods=['GET', 'PATCH'])\ndef show(id):\n found_student = Student.query.get(id)\n if request.method == b'PATCH':\n found_student.first_name = request.form['first_name']\n found_student.last_name = request.form['last_name']\n db.session.add(found_student)\n db.session.commit()\n return redirect(url_for('index'))\n return render_template('students/show.html', student=found_student)\n\n\n@app.route('/students/<int:id>/excuses', methods=['GET', 'POST'])\ndef excuses_index(id):\n found_student = Student.query.get(id)\n if request.method == 'POST':\n new_excuse = Excuse(content=request.form.get('content'),\n is_believable=request.form.get('is_believable'), student_id=id)\n db.session.add(new_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=id))\n excuses_list = found_student.excuses.all()\n return render_template('excuses/index.html', excuses=excuses_list,\n student=found_student)\n\n\n@app.route('/students/<int:id>/excuses/new')\ndef new_excuse(id):\n return render_template('/excuses/new.html', id=id)\n\n\n@app.route('/students/<int:id>/excuses/<int:excuse_id>/edit', methods=[\n 'GET', 'PATCH', 'DELETE'])\ndef edit_excuse(id, excuse_id):\n print(id)\n found_student = Student.query.get(id)\n found_excuse = Excuse.query.get(excuse_id)\n excuses_list = found_student.excuses.all()\n if request.method == b'DELETE':\n db.session.delete(found_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=found_student.id))\n elif request.method == b'PATCH':\n found_excuse.content = request.form.get('content')\n found_excuse.is_believable = request.form.get('is_believable')\n db.session.add(found_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=found_student.id))\n return render_template('excuses/edit.html', excuse=found_excuse,\n student=found_student)\n",
"step-4": "from flask import Flask, request, redirect, url_for, render_template\nfrom flask_modus import Modus\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'\n ] = 'postgres://localhost/flask_one_to_many'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = True\nmodus = Modus(app)\ndb = SQLAlchemy(app)\nMigrate(app, db)\n\n\nclass Student(db.Model):\n __tablename__ = 'students'\n id = db.Column(db.Integer, primary_key=True)\n first_name = db.Column(db.Text)\n last_name = db.Column(db.Text)\n excuses = db.relationship('Excuse', backref='student', lazy='dynamic')\n\n def __init__(self, first_name, last_name):\n self.first_name = first_name\n self.last_name = last_name\n\n\nclass Excuse(db.Model):\n __tablename__ = 'excuses'\n id = db.Column(db.Integer, primary_key=True)\n content = db.Column(db.Text)\n is_believable = db.Column(db.Text)\n student_id = db.Column(db.Integer, db.ForeignKey('students.id'))\n\n\n@app.route('/')\ndef root():\n return redirect(url_for('index'))\n\n\n@app.route('/students', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n new_student = Student(request.form['first_name'], request.form[\n 'last_name'])\n db.session.add(new_student)\n db.session.commit()\n return redirect(url_for('index'))\n return render_template('students/index.html', students=Student.query.all())\n\n\n@app.route('/students/new')\ndef new():\n return render_template('students/new.html')\n\n\n@app.route('/students/<int:id>/edit')\ndef edit(id):\n return render_template('students/edit.html', student=Student.query.get(id))\n\n\n@app.route('/students/<int:id>', methods=['GET', 'PATCH'])\ndef show(id):\n found_student = Student.query.get(id)\n if request.method == b'PATCH':\n found_student.first_name = request.form['first_name']\n found_student.last_name = request.form['last_name']\n db.session.add(found_student)\n db.session.commit()\n return redirect(url_for('index'))\n return render_template('students/show.html', student=found_student)\n\n\n@app.route('/students/<int:id>/excuses', methods=['GET', 'POST'])\ndef excuses_index(id):\n found_student = Student.query.get(id)\n if request.method == 'POST':\n new_excuse = Excuse(content=request.form.get('content'),\n is_believable=request.form.get('is_believable'), student_id=id)\n db.session.add(new_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=id))\n excuses_list = found_student.excuses.all()\n return render_template('excuses/index.html', excuses=excuses_list,\n student=found_student)\n\n\n@app.route('/students/<int:id>/excuses/new')\ndef new_excuse(id):\n return render_template('/excuses/new.html', id=id)\n\n\n@app.route('/students/<int:id>/excuses/<int:excuse_id>/edit', methods=[\n 'GET', 'PATCH', 'DELETE'])\ndef edit_excuse(id, excuse_id):\n print(id)\n found_student = Student.query.get(id)\n found_excuse = Excuse.query.get(excuse_id)\n excuses_list = found_student.excuses.all()\n if request.method == b'DELETE':\n db.session.delete(found_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=found_student.id))\n elif request.method == b'PATCH':\n found_excuse.content = request.form.get('content')\n found_excuse.is_believable = request.form.get('is_believable')\n db.session.add(found_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=found_student.id))\n return render_template('excuses/edit.html', excuse=found_excuse,\n student=found_student)\n",
"step-5": "from flask import Flask, request, redirect, url_for, render_template\nfrom flask_modus import Modus\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\n\napp = Flask(__name__)\napp.config[\n 'SQLALCHEMY_DATABASE_URI'] = \"postgres://localhost/flask_one_to_many\"\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = True\nmodus = Modus(app)\ndb = SQLAlchemy(app)\nMigrate(app, db)\n\n\nclass Student(db.Model):\n\n __tablename__ = \"students\"\n\n id = db.Column(db.Integer, primary_key=True)\n first_name = db.Column(db.Text)\n last_name = db.Column(db.Text)\n excuses = db.relationship('Excuse', backref='student',\n lazy='dynamic')\n def __init__(self, first_name, last_name):\n self.first_name = first_name\n self.last_name = last_name\n\nclass Excuse(db.Model):\n __tablename__ = \"excuses\"\n\n id = db.Column(db.Integer, primary_key = True)\n content = db.Column(db.Text)\n is_believable = db.Column(db.Text)\n student_id = db.Column(db.Integer, db.ForeignKey(\"students.id\"))\n \n\n\n\n\n@app.route('/')\ndef root():\n return redirect(url_for('index'))\n\n\n@app.route('/students', methods=[\"GET\", \"POST\"])\ndef index():\n if request.method == 'POST':\n new_student = Student(request.form['first_name'],\n request.form['last_name'])\n db.session.add(new_student)\n db.session.commit()\n return redirect(url_for('index'))\n return render_template('students/index.html', students=Student.query.all())\n\n\n@app.route('/students/new')\ndef new():\n return render_template('students/new.html')\n\n\n@app.route('/students/<int:id>/edit')\ndef edit(id):\n return render_template('students/edit.html', student=Student.query.get(id))\n\n\n@app.route('/students/<int:id>', methods=[\"GET\", \"PATCH\"])\ndef show(id):\n found_student = Student.query.get(id)\n if request.method == b'PATCH':\n found_student.first_name = request.form['first_name']\n found_student.last_name = request.form['last_name']\n db.session.add(found_student)\n db.session.commit()\n return redirect(url_for('index'))\n return render_template('students/show.html', student=found_student)\n\n@app.route(\"/students/<int:id>/excuses\", methods = [\"GET\", \"POST\"])\ndef excuses_index(id):\n found_student = Student.query.get(id)\n if request.method == \"POST\":\n new_excuse = Excuse(content = request.form.get(\"content\"), is_believable = request.form.get(\"is_believable\"), student_id = id)\n db.session.add(new_excuse)\n db.session.commit()\n return redirect(url_for(\"excuses_index\", id = id))\n\n \n excuses_list = found_student.excuses.all()\n return render_template(\"excuses/index.html\", excuses=excuses_list, student= found_student)\n\n\n\n@app.route(\"/students/<int:id>/excuses/new\")\ndef new_excuse(id):\n\n return render_template(\"/excuses/new.html\", id = id)\n\n\n@app.route(\"/students/<int:id>/excuses/<int:excuse_id>/edit\", methods = [\"GET\", \"PATCH\",\"DELETE\"])\ndef edit_excuse(id,excuse_id):\n print(id)\n found_student = Student.query.get(id)\n found_excuse = Excuse.query.get(excuse_id)\n excuses_list = found_student.excuses.all()\n if request.method == b'DELETE':\n db.session.delete(found_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id = found_student.id))\n elif request.method == b\"PATCH\":\n \n found_excuse.content = request.form.get(\"content\")\n found_excuse.is_believable = request.form.get(\"is_believable\")\n \n db.session.add(found_excuse)\n db.session.commit()\n\n return redirect(url_for(\"excuses_index\", id = found_student.id))\n \n # return render_template(\"excuses/index.html\",excuses = excuses_list, student = found_student)\n return render_template(\"excuses/edit.html\",excuse = found_excuse, student = found_student)",
"step-ids": [
13,
14,
15,
16,
17
]
}
|
[
13,
14,
15,
16,
17
] |
from django.db import models
from django.contrib.gis.db import models
from django.contrib.auth.models import User
from django.urls import reverse
class Project(models.Model):
actual_developer = models.ForeignKey(User,null = True,blank=True, on_delete=models.CASCADE)
# actual_developer = models.CharField(User,null = True,blank=True, max_length=200)
projects_name = models.CharField(max_length=100)
project_hours = models.CharField(max_length=100)
developer_name = models.CharField(max_length=255)
Month_Cycle = models.CharField(max_length = 1000, blank=True, null=True)
mailing_hrs = models.CharField(max_length=100,null=True,blank=True)
developer_email = models.EmailField()
expected_daily_hours = models.CharField(max_length=200, null=True, blank=True)
expected_cycle_hours = models.CharField(max_length=200, null=True, default = "176 Hr")
cycle_hour_diff = models.IntegerField(null=True, default=0)
def get_absolute_url(self):
return reverse('project')
class Holidays(models.Model):
holidays = models.DateField()
|
normal
|
{
"blob_id": "ac1d38f550e548dff6ba226dbfc3dd1e5ff876a8",
"index": 5563,
"step-1": "<mask token>\n\n\nclass Project(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Holidays(models.Model):\n holidays = models.DateField()\n",
"step-2": "<mask token>\n\n\nclass Project(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_absolute_url(self):\n return reverse('project')\n\n\nclass Holidays(models.Model):\n holidays = models.DateField()\n",
"step-3": "<mask token>\n\n\nclass Project(models.Model):\n actual_developer = models.ForeignKey(User, null=True, blank=True,\n on_delete=models.CASCADE)\n projects_name = models.CharField(max_length=100)\n project_hours = models.CharField(max_length=100)\n developer_name = models.CharField(max_length=255)\n Month_Cycle = models.CharField(max_length=1000, blank=True, null=True)\n mailing_hrs = models.CharField(max_length=100, null=True, blank=True)\n developer_email = models.EmailField()\n expected_daily_hours = models.CharField(max_length=200, null=True,\n blank=True)\n expected_cycle_hours = models.CharField(max_length=200, null=True,\n default='176 Hr')\n cycle_hour_diff = models.IntegerField(null=True, default=0)\n\n def get_absolute_url(self):\n return reverse('project')\n\n\nclass Holidays(models.Model):\n holidays = models.DateField()\n",
"step-4": "from django.db import models\nfrom django.contrib.gis.db import models\nfrom django.contrib.auth.models import User\nfrom django.urls import reverse\n\n\nclass Project(models.Model):\n actual_developer = models.ForeignKey(User, null=True, blank=True,\n on_delete=models.CASCADE)\n projects_name = models.CharField(max_length=100)\n project_hours = models.CharField(max_length=100)\n developer_name = models.CharField(max_length=255)\n Month_Cycle = models.CharField(max_length=1000, blank=True, null=True)\n mailing_hrs = models.CharField(max_length=100, null=True, blank=True)\n developer_email = models.EmailField()\n expected_daily_hours = models.CharField(max_length=200, null=True,\n blank=True)\n expected_cycle_hours = models.CharField(max_length=200, null=True,\n default='176 Hr')\n cycle_hour_diff = models.IntegerField(null=True, default=0)\n\n def get_absolute_url(self):\n return reverse('project')\n\n\nclass Holidays(models.Model):\n holidays = models.DateField()\n",
"step-5": "from django.db import models\nfrom django.contrib.gis.db import models\nfrom django.contrib.auth.models import User\n\nfrom django.urls import reverse\n\n\nclass Project(models.Model):\n\tactual_developer = models.ForeignKey(User,null = True,blank=True, on_delete=models.CASCADE)\n\t# actual_developer = models.CharField(User,null = True,blank=True, max_length=200)\n\tprojects_name = models.CharField(max_length=100)\n\tproject_hours = models.CharField(max_length=100)\n\tdeveloper_name = models.CharField(max_length=255)\n\tMonth_Cycle = models.CharField(max_length = 1000, blank=True, null=True)\n\tmailing_hrs = models.CharField(max_length=100,null=True,blank=True)\n\tdeveloper_email = models.EmailField()\n\texpected_daily_hours = models.CharField(max_length=200, null=True, blank=True)\n\texpected_cycle_hours = models.CharField(max_length=200, null=True, default = \"176 Hr\")\n\tcycle_hour_diff = models.IntegerField(null=True, default=0)\n\n\tdef get_absolute_url(self):\n\t\treturn reverse('project')\n\nclass Holidays(models.Model):\n\tholidays = models.DateField()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def mouseMoved():
redraw()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def setup():
global table
table = loadTable('flights.csv', 'header')
size(width, height)
noLoop()
noStroke()
<|reserved_special_token_0|>
def mouseMoved():
redraw()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def setup():
global table
table = loadTable('flights.csv', 'header')
size(width, height)
noLoop()
noStroke()
def draw():
global table
background(255, 255, 255)
for row in table.rows():
from_x = map(row.getFloat('from_long'), -180, 180, 0, width)
from_y = map(row.getFloat('from_lat'), -90, 90, height / 2, 0)
to_x = map(row.getFloat('to_long'), -180, 180, 0, width)
to_y = map(row.getFloat('to_lat'), -90, 90, height, height / 2)
r = 3
if dist(from_x, from_y, mouseX, mouseY) < 15:
fill(255, 0, 0, 20)
else:
fill(0, 0, 255, 5)
ellipse(from_x, from_y, r, r)
ellipse(to_x, to_y, r, r)
def mouseMoved():
redraw()
<|reserved_special_token_1|>
table = None
width = 1000
height = 1000
def setup():
global table
table = loadTable('flights.csv', 'header')
size(width, height)
noLoop()
noStroke()
def draw():
global table
background(255, 255, 255)
for row in table.rows():
from_x = map(row.getFloat('from_long'), -180, 180, 0, width)
from_y = map(row.getFloat('from_lat'), -90, 90, height / 2, 0)
to_x = map(row.getFloat('to_long'), -180, 180, 0, width)
to_y = map(row.getFloat('to_lat'), -90, 90, height, height / 2)
r = 3
if dist(from_x, from_y, mouseX, mouseY) < 15:
fill(255, 0, 0, 20)
else:
fill(0, 0, 255, 5)
ellipse(from_x, from_y, r, r)
ellipse(to_x, to_y, r, r)
def mouseMoved():
redraw()
<|reserved_special_token_1|>
table = None
width = 1000
height = 1000
def setup():
global table
table = loadTable("flights.csv", "header")
size(width, height)
noLoop()
noStroke()
def draw():
global table
background(255, 255, 255)
for row in table.rows():
from_x = map(row.getFloat('from_long'), -180, 180, 0, width)
from_y = map(row.getFloat('from_lat'), -90, 90, height/2, 0)
to_x = map(row.getFloat('to_long'), -180, 180, 0, width)
to_y = map(row.getFloat('to_lat'), -90, 90, height, height/2)
r = 3
if dist(from_x, from_y, mouseX, mouseY) < 15:
fill(255, 0, 0, 20)
else:
fill(0, 0, 255, 5)
ellipse(from_x, from_y, r, r)
ellipse(to_x, to_y, r, r)
def mouseMoved():
redraw()
|
flexible
|
{
"blob_id": "a2eabf4dae931d82e4e9eda87d79031711faf1aa",
"index": 2221,
"step-1": "<mask token>\n\n\ndef mouseMoved():\n redraw()\n",
"step-2": "<mask token>\n\n\ndef setup():\n global table\n table = loadTable('flights.csv', 'header')\n size(width, height)\n noLoop()\n noStroke()\n\n\n<mask token>\n\n\ndef mouseMoved():\n redraw()\n",
"step-3": "<mask token>\n\n\ndef setup():\n global table\n table = loadTable('flights.csv', 'header')\n size(width, height)\n noLoop()\n noStroke()\n\n\ndef draw():\n global table\n background(255, 255, 255)\n for row in table.rows():\n from_x = map(row.getFloat('from_long'), -180, 180, 0, width)\n from_y = map(row.getFloat('from_lat'), -90, 90, height / 2, 0)\n to_x = map(row.getFloat('to_long'), -180, 180, 0, width)\n to_y = map(row.getFloat('to_lat'), -90, 90, height, height / 2)\n r = 3\n if dist(from_x, from_y, mouseX, mouseY) < 15:\n fill(255, 0, 0, 20)\n else:\n fill(0, 0, 255, 5)\n ellipse(from_x, from_y, r, r)\n ellipse(to_x, to_y, r, r)\n\n\ndef mouseMoved():\n redraw()\n",
"step-4": "table = None\nwidth = 1000\nheight = 1000\n\n\ndef setup():\n global table\n table = loadTable('flights.csv', 'header')\n size(width, height)\n noLoop()\n noStroke()\n\n\ndef draw():\n global table\n background(255, 255, 255)\n for row in table.rows():\n from_x = map(row.getFloat('from_long'), -180, 180, 0, width)\n from_y = map(row.getFloat('from_lat'), -90, 90, height / 2, 0)\n to_x = map(row.getFloat('to_long'), -180, 180, 0, width)\n to_y = map(row.getFloat('to_lat'), -90, 90, height, height / 2)\n r = 3\n if dist(from_x, from_y, mouseX, mouseY) < 15:\n fill(255, 0, 0, 20)\n else:\n fill(0, 0, 255, 5)\n ellipse(from_x, from_y, r, r)\n ellipse(to_x, to_y, r, r)\n\n\ndef mouseMoved():\n redraw()\n",
"step-5": "table = None\n\n\nwidth = 1000\nheight = 1000\n\ndef setup():\n global table\n table = loadTable(\"flights.csv\", \"header\")\n size(width, height)\n noLoop()\n noStroke()\n \ndef draw():\n global table\n background(255, 255, 255)\n for row in table.rows():\n from_x = map(row.getFloat('from_long'), -180, 180, 0, width) \n from_y = map(row.getFloat('from_lat'), -90, 90, height/2, 0) \n to_x = map(row.getFloat('to_long'), -180, 180, 0, width) \n to_y = map(row.getFloat('to_lat'), -90, 90, height, height/2) \n r = 3\n if dist(from_x, from_y, mouseX, mouseY) < 15:\n fill(255, 0, 0, 20)\n else:\n fill(0, 0, 255, 5)\n ellipse(from_x, from_y, r, r)\n ellipse(to_x, to_y, r, r)\n\ndef mouseMoved():\n redraw()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from app.models.tables import Warehouse, Contractor, Articles
class ContractorTools(Contractor):
"""
Работа со справочником КА
"""
@staticmethod
def add_contractor(**kwargs):
ca = Contractor(**kwargs)
ca.insert()
@staticmethod
def delete_contractor(**kwargs):
ca = Contractor()
ca = ca.select_expression(id_contr=kwargs['id_contr'])[0]
ca.delete_data()
@staticmethod
def update_contractor(**kwargs):
ca = Contractor(**kwargs)
# ca = ca.select_expression(id_contr=kwargs['id_contr'])[0]
ca.update_data()
@staticmethod
def get_contractors():
contr = Contractor()
contrs = contr.select_expression()
contrs = contr.db_obj_to_dict(*contrs)
return dict(contractors=contrs)
class ArticleTools(Articles):
"""
Работа со справочником ТП
"""
@staticmethod
def add_article(**kwargs):
ca = Articles(**kwargs)
ca.insert()
@staticmethod
def update_article(**kwargs):
ca = Articles(**kwargs)
# ca = ca.select_expression(id_contr=kwargs['id_contr'])[0]
ca.update_data()
@staticmethod
def delete_article(**kwargs):
ca = Articles()
ca = ca.select_expression(id_art=kwargs['id_art'])[0]
ca.delete_data()
class WarehouseTools(Warehouse):
"""
Работа со справочником МХ
"""
def set_new_name(self, id_ws, name):
"""
Переименовывает переданный МХ
"""
ws = super().select_expression(id_ws=id_ws)[0]
ws.name.set_value(name)
ws.update_data()
return True
def delete_warehouse(self, id_ws, name):
ws = super().select_expression(id_ws=id_ws)[0]
for child in super().select_expression(id_higher=id_ws):
for child_child in super().select_expression(id_higher=child.id_ws.value):
child_child.delete_data()
child.delete_data()
ws.delete_data()
return True
@staticmethod
def add_warehouse(id_higher, name):
ws = Warehouse(name=name, id_higher=id_higher)
try:
ws_parent = ws.select_expression(id_ws=id_higher)[0]
parent_level = ws_parent.level.value + 1
except IndexError:
parent_level = 1
id_higher = None
ws.level.set_value(parent_level)
ws.id_higher.set_value(id_higher)
ws.insert()
return True
@staticmethod
def move_warehouse(id_ws, id_higher):
if id_higher == '':
id_higher = None
ws = Warehouse()
ws = ws.select_expression(id_ws=id_ws)[0]
ws.id_higher.set_value(id_higher)
ws.update_data()
return True
@staticmethod
def get_warehouses():
warehouse = Warehouse()
warehouses = warehouse.select_expression()
warehouses = warehouse.db_obj_to_dict(*warehouses)
return dict(warehouses=warehouses)
@staticmethod
def get_ws_tree():
warehouse = Warehouse()
return warehouse.get_full_tree()
|
normal
|
{
"blob_id": "79c4a2d4503c2639950675b398e000aae367ff4a",
"index": 8117,
"step-1": "<mask token>\n\n\nclass ArticleTools(Articles):\n <mask token>\n\n @staticmethod\n def add_article(**kwargs):\n ca = Articles(**kwargs)\n ca.insert()\n\n @staticmethod\n def update_article(**kwargs):\n ca = Articles(**kwargs)\n ca.update_data()\n\n @staticmethod\n def delete_article(**kwargs):\n ca = Articles()\n ca = ca.select_expression(id_art=kwargs['id_art'])[0]\n ca.delete_data()\n\n\nclass WarehouseTools(Warehouse):\n \"\"\"\n Работа со справочником МХ\n \"\"\"\n\n def set_new_name(self, id_ws, name):\n \"\"\"\n Переименовывает переданный МХ\n \"\"\"\n ws = super().select_expression(id_ws=id_ws)[0]\n ws.name.set_value(name)\n ws.update_data()\n return True\n\n def delete_warehouse(self, id_ws, name):\n ws = super().select_expression(id_ws=id_ws)[0]\n for child in super().select_expression(id_higher=id_ws):\n for child_child in super().select_expression(id_higher=child.\n id_ws.value):\n child_child.delete_data()\n child.delete_data()\n ws.delete_data()\n return True\n\n @staticmethod\n def add_warehouse(id_higher, name):\n ws = Warehouse(name=name, id_higher=id_higher)\n try:\n ws_parent = ws.select_expression(id_ws=id_higher)[0]\n parent_level = ws_parent.level.value + 1\n except IndexError:\n parent_level = 1\n id_higher = None\n ws.level.set_value(parent_level)\n ws.id_higher.set_value(id_higher)\n ws.insert()\n return True\n\n @staticmethod\n def move_warehouse(id_ws, id_higher):\n if id_higher == '':\n id_higher = None\n ws = Warehouse()\n ws = ws.select_expression(id_ws=id_ws)[0]\n ws.id_higher.set_value(id_higher)\n ws.update_data()\n return True\n\n @staticmethod\n def get_warehouses():\n warehouse = Warehouse()\n warehouses = warehouse.select_expression()\n warehouses = warehouse.db_obj_to_dict(*warehouses)\n return dict(warehouses=warehouses)\n\n @staticmethod\n def get_ws_tree():\n warehouse = Warehouse()\n return warehouse.get_full_tree()\n",
"step-2": "<mask token>\n\n\nclass ContractorTools(Contractor):\n <mask token>\n <mask token>\n <mask token>\n\n @staticmethod\n def update_contractor(**kwargs):\n ca = Contractor(**kwargs)\n ca.update_data()\n <mask token>\n\n\nclass ArticleTools(Articles):\n \"\"\"\n Работа со справочником ТП\n \"\"\"\n\n @staticmethod\n def add_article(**kwargs):\n ca = Articles(**kwargs)\n ca.insert()\n\n @staticmethod\n def update_article(**kwargs):\n ca = Articles(**kwargs)\n ca.update_data()\n\n @staticmethod\n def delete_article(**kwargs):\n ca = Articles()\n ca = ca.select_expression(id_art=kwargs['id_art'])[0]\n ca.delete_data()\n\n\nclass WarehouseTools(Warehouse):\n \"\"\"\n Работа со справочником МХ\n \"\"\"\n\n def set_new_name(self, id_ws, name):\n \"\"\"\n Переименовывает переданный МХ\n \"\"\"\n ws = super().select_expression(id_ws=id_ws)[0]\n ws.name.set_value(name)\n ws.update_data()\n return True\n\n def delete_warehouse(self, id_ws, name):\n ws = super().select_expression(id_ws=id_ws)[0]\n for child in super().select_expression(id_higher=id_ws):\n for child_child in super().select_expression(id_higher=child.\n id_ws.value):\n child_child.delete_data()\n child.delete_data()\n ws.delete_data()\n return True\n\n @staticmethod\n def add_warehouse(id_higher, name):\n ws = Warehouse(name=name, id_higher=id_higher)\n try:\n ws_parent = ws.select_expression(id_ws=id_higher)[0]\n parent_level = ws_parent.level.value + 1\n except IndexError:\n parent_level = 1\n id_higher = None\n ws.level.set_value(parent_level)\n ws.id_higher.set_value(id_higher)\n ws.insert()\n return True\n\n @staticmethod\n def move_warehouse(id_ws, id_higher):\n if id_higher == '':\n id_higher = None\n ws = Warehouse()\n ws = ws.select_expression(id_ws=id_ws)[0]\n ws.id_higher.set_value(id_higher)\n ws.update_data()\n return True\n\n @staticmethod\n def get_warehouses():\n warehouse = Warehouse()\n warehouses = warehouse.select_expression()\n warehouses = warehouse.db_obj_to_dict(*warehouses)\n return dict(warehouses=warehouses)\n\n @staticmethod\n def get_ws_tree():\n warehouse = Warehouse()\n return warehouse.get_full_tree()\n",
"step-3": "<mask token>\n\n\nclass ContractorTools(Contractor):\n \"\"\"\n Работа со справочником КА\n \"\"\"\n\n @staticmethod\n def add_contractor(**kwargs):\n ca = Contractor(**kwargs)\n ca.insert()\n\n @staticmethod\n def delete_contractor(**kwargs):\n ca = Contractor()\n ca = ca.select_expression(id_contr=kwargs['id_contr'])[0]\n ca.delete_data()\n\n @staticmethod\n def update_contractor(**kwargs):\n ca = Contractor(**kwargs)\n ca.update_data()\n\n @staticmethod\n def get_contractors():\n contr = Contractor()\n contrs = contr.select_expression()\n contrs = contr.db_obj_to_dict(*contrs)\n return dict(contractors=contrs)\n\n\nclass ArticleTools(Articles):\n \"\"\"\n Работа со справочником ТП\n \"\"\"\n\n @staticmethod\n def add_article(**kwargs):\n ca = Articles(**kwargs)\n ca.insert()\n\n @staticmethod\n def update_article(**kwargs):\n ca = Articles(**kwargs)\n ca.update_data()\n\n @staticmethod\n def delete_article(**kwargs):\n ca = Articles()\n ca = ca.select_expression(id_art=kwargs['id_art'])[0]\n ca.delete_data()\n\n\nclass WarehouseTools(Warehouse):\n \"\"\"\n Работа со справочником МХ\n \"\"\"\n\n def set_new_name(self, id_ws, name):\n \"\"\"\n Переименовывает переданный МХ\n \"\"\"\n ws = super().select_expression(id_ws=id_ws)[0]\n ws.name.set_value(name)\n ws.update_data()\n return True\n\n def delete_warehouse(self, id_ws, name):\n ws = super().select_expression(id_ws=id_ws)[0]\n for child in super().select_expression(id_higher=id_ws):\n for child_child in super().select_expression(id_higher=child.\n id_ws.value):\n child_child.delete_data()\n child.delete_data()\n ws.delete_data()\n return True\n\n @staticmethod\n def add_warehouse(id_higher, name):\n ws = Warehouse(name=name, id_higher=id_higher)\n try:\n ws_parent = ws.select_expression(id_ws=id_higher)[0]\n parent_level = ws_parent.level.value + 1\n except IndexError:\n parent_level = 1\n id_higher = None\n ws.level.set_value(parent_level)\n ws.id_higher.set_value(id_higher)\n ws.insert()\n return True\n\n @staticmethod\n def move_warehouse(id_ws, id_higher):\n if id_higher == '':\n id_higher = None\n ws = Warehouse()\n ws = ws.select_expression(id_ws=id_ws)[0]\n ws.id_higher.set_value(id_higher)\n ws.update_data()\n return True\n\n @staticmethod\n def get_warehouses():\n warehouse = Warehouse()\n warehouses = warehouse.select_expression()\n warehouses = warehouse.db_obj_to_dict(*warehouses)\n return dict(warehouses=warehouses)\n\n @staticmethod\n def get_ws_tree():\n warehouse = Warehouse()\n return warehouse.get_full_tree()\n",
"step-4": "from app.models.tables import Warehouse, Contractor, Articles\n\n\nclass ContractorTools(Contractor):\n \"\"\"\n Работа со справочником КА\n \"\"\"\n\n @staticmethod\n def add_contractor(**kwargs):\n ca = Contractor(**kwargs)\n ca.insert()\n\n @staticmethod\n def delete_contractor(**kwargs):\n ca = Contractor()\n ca = ca.select_expression(id_contr=kwargs['id_contr'])[0]\n ca.delete_data()\n\n @staticmethod\n def update_contractor(**kwargs):\n ca = Contractor(**kwargs)\n ca.update_data()\n\n @staticmethod\n def get_contractors():\n contr = Contractor()\n contrs = contr.select_expression()\n contrs = contr.db_obj_to_dict(*contrs)\n return dict(contractors=contrs)\n\n\nclass ArticleTools(Articles):\n \"\"\"\n Работа со справочником ТП\n \"\"\"\n\n @staticmethod\n def add_article(**kwargs):\n ca = Articles(**kwargs)\n ca.insert()\n\n @staticmethod\n def update_article(**kwargs):\n ca = Articles(**kwargs)\n ca.update_data()\n\n @staticmethod\n def delete_article(**kwargs):\n ca = Articles()\n ca = ca.select_expression(id_art=kwargs['id_art'])[0]\n ca.delete_data()\n\n\nclass WarehouseTools(Warehouse):\n \"\"\"\n Работа со справочником МХ\n \"\"\"\n\n def set_new_name(self, id_ws, name):\n \"\"\"\n Переименовывает переданный МХ\n \"\"\"\n ws = super().select_expression(id_ws=id_ws)[0]\n ws.name.set_value(name)\n ws.update_data()\n return True\n\n def delete_warehouse(self, id_ws, name):\n ws = super().select_expression(id_ws=id_ws)[0]\n for child in super().select_expression(id_higher=id_ws):\n for child_child in super().select_expression(id_higher=child.\n id_ws.value):\n child_child.delete_data()\n child.delete_data()\n ws.delete_data()\n return True\n\n @staticmethod\n def add_warehouse(id_higher, name):\n ws = Warehouse(name=name, id_higher=id_higher)\n try:\n ws_parent = ws.select_expression(id_ws=id_higher)[0]\n parent_level = ws_parent.level.value + 1\n except IndexError:\n parent_level = 1\n id_higher = None\n ws.level.set_value(parent_level)\n ws.id_higher.set_value(id_higher)\n ws.insert()\n return True\n\n @staticmethod\n def move_warehouse(id_ws, id_higher):\n if id_higher == '':\n id_higher = None\n ws = Warehouse()\n ws = ws.select_expression(id_ws=id_ws)[0]\n ws.id_higher.set_value(id_higher)\n ws.update_data()\n return True\n\n @staticmethod\n def get_warehouses():\n warehouse = Warehouse()\n warehouses = warehouse.select_expression()\n warehouses = warehouse.db_obj_to_dict(*warehouses)\n return dict(warehouses=warehouses)\n\n @staticmethod\n def get_ws_tree():\n warehouse = Warehouse()\n return warehouse.get_full_tree()\n",
"step-5": "from app.models.tables import Warehouse, Contractor, Articles\n\n\nclass ContractorTools(Contractor):\n \"\"\"\n Работа со справочником КА\n \"\"\"\n\n @staticmethod\n def add_contractor(**kwargs):\n ca = Contractor(**kwargs)\n ca.insert()\n\n @staticmethod\n def delete_contractor(**kwargs):\n ca = Contractor()\n ca = ca.select_expression(id_contr=kwargs['id_contr'])[0]\n ca.delete_data()\n\n @staticmethod\n def update_contractor(**kwargs):\n ca = Contractor(**kwargs)\n # ca = ca.select_expression(id_contr=kwargs['id_contr'])[0]\n ca.update_data()\n\n @staticmethod\n def get_contractors():\n contr = Contractor()\n contrs = contr.select_expression()\n contrs = contr.db_obj_to_dict(*contrs)\n return dict(contractors=contrs)\n\n\nclass ArticleTools(Articles):\n \"\"\"\n Работа со справочником ТП\n \"\"\"\n\n @staticmethod\n def add_article(**kwargs):\n ca = Articles(**kwargs)\n ca.insert()\n\n @staticmethod\n def update_article(**kwargs):\n ca = Articles(**kwargs)\n # ca = ca.select_expression(id_contr=kwargs['id_contr'])[0]\n ca.update_data()\n\n @staticmethod\n def delete_article(**kwargs):\n ca = Articles()\n ca = ca.select_expression(id_art=kwargs['id_art'])[0]\n ca.delete_data()\n\n\nclass WarehouseTools(Warehouse):\n \"\"\"\n Работа со справочником МХ\n \"\"\"\n\n def set_new_name(self, id_ws, name):\n \"\"\"\n Переименовывает переданный МХ\n \"\"\"\n ws = super().select_expression(id_ws=id_ws)[0]\n ws.name.set_value(name)\n ws.update_data()\n return True\n\n def delete_warehouse(self, id_ws, name):\n ws = super().select_expression(id_ws=id_ws)[0]\n for child in super().select_expression(id_higher=id_ws):\n for child_child in super().select_expression(id_higher=child.id_ws.value):\n child_child.delete_data()\n child.delete_data()\n ws.delete_data()\n return True\n\n @staticmethod\n def add_warehouse(id_higher, name):\n ws = Warehouse(name=name, id_higher=id_higher)\n try:\n ws_parent = ws.select_expression(id_ws=id_higher)[0]\n parent_level = ws_parent.level.value + 1\n except IndexError:\n parent_level = 1\n id_higher = None\n ws.level.set_value(parent_level)\n ws.id_higher.set_value(id_higher)\n ws.insert()\n return True\n\n @staticmethod\n def move_warehouse(id_ws, id_higher):\n if id_higher == '':\n id_higher = None\n ws = Warehouse()\n ws = ws.select_expression(id_ws=id_ws)[0]\n ws.id_higher.set_value(id_higher)\n ws.update_data()\n return True\n\n @staticmethod\n def get_warehouses():\n warehouse = Warehouse()\n warehouses = warehouse.select_expression()\n warehouses = warehouse.db_obj_to_dict(*warehouses)\n return dict(warehouses=warehouses)\n\n @staticmethod\n def get_ws_tree():\n warehouse = Warehouse()\n return warehouse.get_full_tree()\n",
"step-ids": [
12,
15,
19,
20,
21
]
}
|
[
12,
15,
19,
20,
21
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
initial = True
dependencies = [('auth', '0007_alter_validators_add_error_messages'), (
'enterprise', '0002_auto_20160804_1616')]
operations = [migrations.CreateModel(name='User', fields=[('id', models
.AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('password', models.CharField(max_length=128,
verbose_name='password')), ('last_login', models.DateTimeField(
blank=True, null=True, verbose_name='last login')), ('is_superuser',
models.BooleanField(default=False, help_text=
'Designates that this user has all permissions without explicitly assigning them.'
, verbose_name='superuser status')), ('email', models.EmailField(
blank=True, max_length=255, unique=True, verbose_name=
'Электронная почта')), ('username', models.CharField(db_index=True,
max_length=40, unique=True, verbose_name='Идентификатор')), (
'created_at', models.DateField(auto_now_add=True, verbose_name=
'Дата регистрации')), ('is_active', models.BooleanField(default=
True, verbose_name='Активен')), ('ip', models.GenericIPAddressField
(blank=True, null=True, verbose_name='IP адрес')), ('surname',
models.CharField(blank=True, max_length=50, verbose_name='Фамилия')
), ('first_name', models.CharField(blank=True, max_length=25,
verbose_name='Имя')), ('middle_name', models.CharField(blank=True,
max_length=25, verbose_name='Отчество')), ('groups', models.
ManyToManyField(blank=True, help_text=
'The groups this user belongs to. A user will get all permissions granted to each of their groups.'
, related_name='user_set', related_query_name='user', to=
'auth.Group', verbose_name='groups')), ('organisation', models.
ForeignKey(blank=True, null=True, on_delete=django.db.models.
deletion.CASCADE, to='enterprise.Organisation', verbose_name=
'Сотрудник организации')), ('user_permissions', models.
ManyToManyField(blank=True, help_text=
'Specific permissions for this user.', related_name='user_set',
related_query_name='user', to='auth.Permission', verbose_name=
'user permissions'))], options={'verbose_name': 'Пользователь',
'verbose_name_plural': 'Пользователи'})]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [('auth', '0007_alter_validators_add_error_messages'), (
'enterprise', '0002_auto_20160804_1616')]
operations = [migrations.CreateModel(name='User', fields=[('id', models
.AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('password', models.CharField(max_length=128,
verbose_name='password')), ('last_login', models.DateTimeField(
blank=True, null=True, verbose_name='last login')), ('is_superuser',
models.BooleanField(default=False, help_text=
'Designates that this user has all permissions without explicitly assigning them.'
, verbose_name='superuser status')), ('email', models.EmailField(
blank=True, max_length=255, unique=True, verbose_name=
'Электронная почта')), ('username', models.CharField(db_index=True,
max_length=40, unique=True, verbose_name='Идентификатор')), (
'created_at', models.DateField(auto_now_add=True, verbose_name=
'Дата регистрации')), ('is_active', models.BooleanField(default=
True, verbose_name='Активен')), ('ip', models.GenericIPAddressField
(blank=True, null=True, verbose_name='IP адрес')), ('surname',
models.CharField(blank=True, max_length=50, verbose_name='Фамилия')
), ('first_name', models.CharField(blank=True, max_length=25,
verbose_name='Имя')), ('middle_name', models.CharField(blank=True,
max_length=25, verbose_name='Отчество')), ('groups', models.
ManyToManyField(blank=True, help_text=
'The groups this user belongs to. A user will get all permissions granted to each of their groups.'
, related_name='user_set', related_query_name='user', to=
'auth.Group', verbose_name='groups')), ('organisation', models.
ForeignKey(blank=True, null=True, on_delete=django.db.models.
deletion.CASCADE, to='enterprise.Organisation', verbose_name=
'Сотрудник организации')), ('user_permissions', models.
ManyToManyField(blank=True, help_text=
'Specific permissions for this user.', related_name='user_set',
related_query_name='user', to='auth.Permission', verbose_name=
'user permissions'))], options={'verbose_name': 'Пользователь',
'verbose_name_plural': 'Пользователи'})]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-08-04 13:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
('enterprise', '0002_auto_20160804_1616'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(blank=True, max_length=255, unique=True, verbose_name='Электронная почта')),
('username', models.CharField(db_index=True, max_length=40, unique=True, verbose_name='Идентификатор')),
('created_at', models.DateField(auto_now_add=True, verbose_name='Дата регистрации')),
('is_active', models.BooleanField(default=True, verbose_name='Активен')),
('ip', models.GenericIPAddressField(blank=True, null=True, verbose_name='IP адрес')),
('surname', models.CharField(blank=True, max_length=50, verbose_name='Фамилия')),
('first_name', models.CharField(blank=True, max_length=25, verbose_name='Имя')),
('middle_name', models.CharField(blank=True, max_length=25, verbose_name='Отчество')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('organisation', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='enterprise.Organisation', verbose_name='Сотрудник организации')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'Пользователь',
'verbose_name_plural': 'Пользователи',
},
),
]
|
flexible
|
{
"blob_id": "71662ff8c68559bf08e1da7f1a1504bfe842c950",
"index": 7430,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('auth', '0007_alter_validators_add_error_messages'), (\n 'enterprise', '0002_auto_20160804_1616')]\n operations = [migrations.CreateModel(name='User', fields=[('id', models\n .AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('password', models.CharField(max_length=128,\n verbose_name='password')), ('last_login', models.DateTimeField(\n blank=True, null=True, verbose_name='last login')), ('is_superuser',\n models.BooleanField(default=False, help_text=\n 'Designates that this user has all permissions without explicitly assigning them.'\n , verbose_name='superuser status')), ('email', models.EmailField(\n blank=True, max_length=255, unique=True, verbose_name=\n 'Электронная почта')), ('username', models.CharField(db_index=True,\n max_length=40, unique=True, verbose_name='Идентификатор')), (\n 'created_at', models.DateField(auto_now_add=True, verbose_name=\n 'Дата регистрации')), ('is_active', models.BooleanField(default=\n True, verbose_name='Активен')), ('ip', models.GenericIPAddressField\n (blank=True, null=True, verbose_name='IP адрес')), ('surname',\n models.CharField(blank=True, max_length=50, verbose_name='Фамилия')\n ), ('first_name', models.CharField(blank=True, max_length=25,\n verbose_name='Имя')), ('middle_name', models.CharField(blank=True,\n max_length=25, verbose_name='Отчество')), ('groups', models.\n ManyToManyField(blank=True, help_text=\n 'The groups this user belongs to. A user will get all permissions granted to each of their groups.'\n , related_name='user_set', related_query_name='user', to=\n 'auth.Group', verbose_name='groups')), ('organisation', models.\n ForeignKey(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='enterprise.Organisation', verbose_name=\n 'Сотрудник организации')), ('user_permissions', models.\n ManyToManyField(blank=True, help_text=\n 'Specific permissions for this user.', related_name='user_set',\n related_query_name='user', to='auth.Permission', verbose_name=\n 'user permissions'))], options={'verbose_name': 'Пользователь',\n 'verbose_name_plural': 'Пользователи'})]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('auth', '0007_alter_validators_add_error_messages'), (\n 'enterprise', '0002_auto_20160804_1616')]\n operations = [migrations.CreateModel(name='User', fields=[('id', models\n .AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('password', models.CharField(max_length=128,\n verbose_name='password')), ('last_login', models.DateTimeField(\n blank=True, null=True, verbose_name='last login')), ('is_superuser',\n models.BooleanField(default=False, help_text=\n 'Designates that this user has all permissions without explicitly assigning them.'\n , verbose_name='superuser status')), ('email', models.EmailField(\n blank=True, max_length=255, unique=True, verbose_name=\n 'Электронная почта')), ('username', models.CharField(db_index=True,\n max_length=40, unique=True, verbose_name='Идентификатор')), (\n 'created_at', models.DateField(auto_now_add=True, verbose_name=\n 'Дата регистрации')), ('is_active', models.BooleanField(default=\n True, verbose_name='Активен')), ('ip', models.GenericIPAddressField\n (blank=True, null=True, verbose_name='IP адрес')), ('surname',\n models.CharField(blank=True, max_length=50, verbose_name='Фамилия')\n ), ('first_name', models.CharField(blank=True, max_length=25,\n verbose_name='Имя')), ('middle_name', models.CharField(blank=True,\n max_length=25, verbose_name='Отчество')), ('groups', models.\n ManyToManyField(blank=True, help_text=\n 'The groups this user belongs to. A user will get all permissions granted to each of their groups.'\n , related_name='user_set', related_query_name='user', to=\n 'auth.Group', verbose_name='groups')), ('organisation', models.\n ForeignKey(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='enterprise.Organisation', verbose_name=\n 'Сотрудник организации')), ('user_permissions', models.\n ManyToManyField(blank=True, help_text=\n 'Specific permissions for this user.', related_name='user_set',\n related_query_name='user', to='auth.Permission', verbose_name=\n 'user permissions'))], options={'verbose_name': 'Пользователь',\n 'verbose_name_plural': 'Пользователи'})]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.6 on 2016-08-04 13:16\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('auth', '0007_alter_validators_add_error_messages'),\n ('enterprise', '0002_auto_20160804_1616'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('password', models.CharField(max_length=128, verbose_name='password')),\n ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),\n ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),\n ('email', models.EmailField(blank=True, max_length=255, unique=True, verbose_name='Электронная почта')),\n ('username', models.CharField(db_index=True, max_length=40, unique=True, verbose_name='Идентификатор')),\n ('created_at', models.DateField(auto_now_add=True, verbose_name='Дата регистрации')),\n ('is_active', models.BooleanField(default=True, verbose_name='Активен')),\n ('ip', models.GenericIPAddressField(blank=True, null=True, verbose_name='IP адрес')),\n ('surname', models.CharField(blank=True, max_length=50, verbose_name='Фамилия')),\n ('first_name', models.CharField(blank=True, max_length=25, verbose_name='Имя')),\n ('middle_name', models.CharField(blank=True, max_length=25, verbose_name='Отчество')),\n ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),\n ('organisation', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='enterprise.Organisation', verbose_name='Сотрудник организации')),\n ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),\n ],\n options={\n 'verbose_name': 'Пользователь',\n 'verbose_name_plural': 'Пользователи',\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while 'spam' in my_order:
print("I don't like spam!")
my_order.remove('spam')
print(my_order)
<|reserved_special_token_1|>
my_order = ['spam', 'eggs', 'sausage', 'spam', 'bacon', 'spam']
while 'spam' in my_order:
print("I don't like spam!")
my_order.remove('spam')
print(my_order)
|
flexible
|
{
"blob_id": "8e8629dd2d4bb601347694b18d7cb6a94880201d",
"index": 8192,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile 'spam' in my_order:\n print(\"I don't like spam!\")\n my_order.remove('spam')\nprint(my_order)\n",
"step-3": "my_order = ['spam', 'eggs', 'sausage', 'spam', 'bacon', 'spam']\nwhile 'spam' in my_order:\n print(\"I don't like spam!\")\n my_order.remove('spam')\nprint(my_order)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def _kernel_shape(nd, k, indim, outdim):
if isinstance(k, int):
k = [k for _ in range(nd)]
k = list(k)
assert len(k) == nd
k.extend([indim, outdim])
return k
<|reserved_special_token_0|>
@layer
def conv(x, outdim, kernel=3, stride=1, pad=0, padding='SAME', mode=
'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):
nd = x.ndim
if nd == 3:
return conv1d(x, outdim, kernel, stride=stride, pad=pad, padding=
padding, mode=mode, initializer=initializer, bias=bias, **kwargs)
elif nd == 4:
return conv2d(x, outdim, kernel, stride=stride, pad=pad, padding=
padding, mode=mode, initializer=initializer, bias=bias, **kwargs)
elif nd == 5:
return conv3d(x, outdim, kernel, stride=stride, pad=pad, padding=
padding, mode=mode, initializer=initializer, bias=bias, **kwargs)
else:
raise ValueError('conv for {nd}? nd <= 5'.format(nd=nd))
@layer
def conv1d(x, outdim, kernel, stride=1, pad=0, padding='SAME', mode=
'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):
kernel = _kernel_shape(1, kernel, x.dims[-1], outdim)
pads = None
if padding == 'SAME' and mode != 'CONSTANT':
half = (kernel[0] - 1) // 2
pads = [(0, 0), (pad + half, pad + kernel[0] - 1 - half), (0, 0)]
padding = 'VALID'
elif pad:
pads = [(0, 0), (pad, pad), (0, 0)]
if pads is not None:
x = tf.pad(x, pads, mode=mode)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),
**kwargs)
out = tf.nn.conv1d(x, W, stride, padding)
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
return out
@layer
def conv2d(x, outdim, kernel, stride=1, pad=0, padding='SAME', mode=
'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):
kernel = _kernel_shape(2, kernel, x.dims[-1], outdim)
stride = _stride_shape(2, stride)
pads = None
if padding == 'SAME' and mode != 'CONSTANT':
half = (kernel[0] - 1) // 2, (kernel[1] - 1) // 2
pads = [(0, 0), (pad + half[0], pad + kernel[0] - 1 - half[0]), (
pad + half[1], pad + kernel[1] - 1 - half[1]), (0, 0)]
padding = 'VALID'
elif pad:
pads = [(0, 0), (pad, pad), (pad, pad), (0, 0)]
if pads is not None:
x = tf.pad(x, pads, mode=mode)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),
**kwargs)
out = tf.nn.conv2d(x, W, stride, padding)
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
return out
<|reserved_special_token_0|>
@layer
def renorm(x, axis=-1, momentum=0.99, epsilon=0.001, training=None, gamma=
1.0, beta=0.0, stddev=0.002, renorm_momentum=0.99, renorm_clipping=None,
**kwargs):
"""
https://arxiv.org/abs/1702.03275
https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization
:param x:
:param dict renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to scalar Tensors
used to clip the renorm correction. The correction (r, d) is used as corrected_value = normalized_value * r + d,
with r clipped to [rmin, rmax], and d to [-dmax, dmax].
Missing rmax, rmin, dmax are set to inf, 0, inf, respectively.
:return:
"""
init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)
init_beta = tf.constant_initializer(beta)
reuse = tf.get_variable_scope().reuse
if training is None and (reuse or kwargs.get('reuse', False)):
training = False
elif training is None:
training = x.graph.is_training
if renorm_clipping is not None:
if renorm_clipping.get('rmin', None) is None:
rmax = renorm_clipping.get('rmax', None)
if rmax is not None and not np.isinf(rmax):
rmin = 1 / rmax
renorm_clipping['rmin'] = rmin
out = tf.layers.batch_normalization(x, axis=axis, momentum=momentum,
epsilon=epsilon, beta_initializer=init_beta, gamma_initializer=
init_gamma, training=training, renorm=True, renorm_clipping=
renorm_clipping, renorm_momentum=renorm_momentum, **kwargs)
return out
<|reserved_special_token_0|>
@layer
def lnorm(x, center=True, scale=True, activation_fn=None, reuse=None,
variables_collections=None, outputs_collections=None, trainable=True,
begin_norm_axis=1, begin_params_axis=-1, scope=None, **kwargs):
"""
# layer normalization
:param x:
:return:
"""
return tf.contrib.layers.layer_norm(x, center=center, scale=scale,
activation_fn=activation_fn, reuse=reuse, variables_collections=
variables_collections, outputs_collections=outputs_collections,
trainable=trainable, begin_norm_axis=begin_norm_axis,
begin_params_axis=begin_params_axis, scope=None, **kwargs)
<|reserved_special_token_0|>
@layer
def dropout(x, keep_prob=0.5, is_training=None, noise_shape=None, seed=None):
if keep_prob == 1.0:
return x
def _dropout():
return tf.nn.dropout(x, keep_prob, noise_shape, seed)
if is_training is None:
is_training = x.graph.is_training
else:
is_training = tf.convert_to_tensor(is_training)
return tf.cond(is_training, _dropout, lambda : x)
@layer
def dense(x, outdim, initializer=tf.glorot_uniform, bias=False, name=None):
"""
out = dense( shape=shape, init=None, paramset=None)
:param x: tensor
:param bias:
:param outdim: output_size
:param initializer:
:param name:
:return: layer | output | (output, params)
"""
if x.ndim == 4:
x = x.flat2d()
assert x.ndim == 2
outshape = not isinstance(outdim, int)
if outshape:
dim = [-1] + list(outdim)
outdim = np.prod(outdim)
shape = [x.dims[-1], outdim]
W = tf.get_weight('W', shape=shape, initializer=initializer(shape))
out = x.dot(W)
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer())
out = tf.nn.bias_add(out, b)
if outshape:
out = out.reshape(dim)
return tf.identity(out, name=name)
@layer
def bias(x, initializer=tf.zeros_initializer, name=None):
outdim = x.dims[-1]
b = tf.get_bias('b', shape=(outdim,), initializer=initializer())
return tf.nn.bias_add(x, b, name=name)
def _pool_kernel_stide(dim, kernel, stride):
if isinstance(kernel, int):
kernel = [kernel] * dim
if isinstance(stride, int):
stride = [stride] * dim
assert len(kernel) == dim and len(stride) == dim
return [1] + list(kernel) + [1], [1] + list(stride) + [1]
@layer
def maxpool(x, kernel=2, stride=None, padding='SAME'):
nd = x.ndim - 2
stride = kernel if stride is None else stride
kernel, stride = _pool_kernel_stide(nd, kernel, stride)
if nd == 2:
return tf.nn.max_pool(x, kernel, stride, padding)
elif nd == 3:
return tf.nn.max_pool3d(x, kernel, stride, padding)
else:
raise ValueError('maxpool support {0}? '.format(nd))
@layer
def maxpool_where(x, kernel, stride=None, pads=None, padding='SAME', keep=None
):
assert stride is None and padding == 'SAME'
stride = kernel
pooled = maxpool(x, kernel, stride=stride, padding=padding)
mask = where_pooled(x, pooled, kernel, pads=pads)
if keep is None:
return pooled, mask
else:
keep.append(mask)
return pooled
@layer
def where_pooled(x, pooled, kernel=None, pads=None):
"""
return mask
:param x:
:param pooled:
:param kernel:
:param pads:
:return:
"""
assert x.ndim == 4
import math
if kernel is None:
kernel = [math.ceil(float(d) / float(p)) for d, p in zip(x.dims,
pooled.zip)]
repeat = pooled.repeats(kernel, axis=[1, 2])
elif isinstance(kernel, (tuple, list)):
repeat = pooled.repeats(kernel, axis=[1, 2])
else:
repeat = pooled.repeats([kernel, kernel], axis=[1, 2])
if pads is not None:
repeat = repeat.pad(pads, axis=[1, 2])
dim = x.dims
sameshaped = repeat[:dim[0], :dim[1], :dim[2], :dim[3]]
mask = tf.equal(x, sameshaped).to_float()
return mask
<|reserved_special_token_0|>
@layer
def unpool_repeat(x, kernel):
""" upsample by repeating"""
if not isinstance(kernel, (list, tuple)) and isinstance(kernel, int):
kernel = [kernel] * (x.ndim - 2)
return x.repeats(kernel, axis=list(range(1, x.ndim - 2)))
@layer
def avgpool(x, kernel, stride, padding='SAME'):
nd = x.ndim - 2
kernel, stride = _pool_kernel_stide(nd, kernel, stride)
if nd == 2:
return tf.nn.avg_pool(x, kernel, stride, padding)
elif nd == 3:
return tf.nn.avg_pool3d(x, kernel, stride, padding)
else:
raise ValueError('avgpool support {0}? '.format(nd))
@layer
def gpool(x, keepdims=True):
"""
global_avgpool
:param x:
:param keepdims:
:return:
"""
axis = list(range(1, x.ndim - 1))
return x.mean(axis=axis, keepdims=keepdims)
def _atrous1d(x, kernel, rate, padding='SAME'):
"""
cf https://www.tensorflow.org/versions/r0.11/api_docs/python/nn.html#atrous_conv2d
:param x: [batch, time, channel]
:param kernel: [1, 1, inchannel, outchannel]
:param rate: dialtion rate
:param padding: 'same' or 'valid'
:param bias:
:return:
"""
if rate == 1:
out = tf.nn.conv1d(x, kernel, stride=(1, 1, 1), padding=padding)
return out
if padding == 'SAME':
filter_width = kernel.dims[0]
filter_width_up = filter_width + (filter_width - 1) * (rate - 1)
pad = filter_width_up - 1
pad_left = pad // 2
pad_right = pad - pad_left
elif padding == 'VALID':
pad_left = 0
pad_right = 0
else:
raise ValueError('Invalid padding')
in_width = x.dims[1] + pad_left + pad_right
pad_right_extra = (rate - in_width % rate) % rate
pads = [(0, 0), (pad_left, pad_right + pad_right_extra), (0, 0)]
out = x.time_to_batch(rate, pads)
out = tf.nn.conv1d(out, kernel, stride=(1, 1, 1), padding='VALID')
crops = [(0, 0), (0, pad_right_extra), (0, 0)]
out = out.batch_to_time(rate, crops)
return out
@layer
def atrous(x, outdim, kernel, rate, pad=0, padding='SAME', initializer=tf.
he_uniform, bias=None, **kwargs):
assert isinstance(pad, int)
nd = x.ndim - 2
if pad:
pads = [(0, 0)] + [(pad, pad)] * nd + [(0, 0)]
x = tf.pad(x, pads, mode='CONSTANT')
kernel = _kernel_shape(nd, kernel, x.dims[-1], outdim)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),
**kwargs)
if nd == 1:
out = _atrous1d(x, W, rate, padding=padding)
elif nd == 2:
out = tf.nn.atrous_conv2d(x, W, rate, padding)
else:
raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))
if bias is not None:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
return out
<|reserved_special_token_0|>
@layer
def deconv(x, outdim, kernel, stride=1, padding='SAME', initializer=tf.
he_uniform, bias=False, extra=None, **kwargs):
nd = x.ndim - 2
out_shape = _deconv_outshape(nd, x.dims, outdim, kernel, stride,
padding, extra)
oshape = tf.TensorShape(out_shape)
if out_shape[0] is None:
out_shape[0] = tf.shape(x)[0]
out_shape = tf.stack(out_shape)
kernel_shape = _kernel_shape(nd, kernel, outdim, x.dims[-1])
stride = _stride_shape(nd, stride)
W = tf.get_weight('W', shape=kernel_shape, initializer=initializer(
kernel_shape))
if nd == 2:
out = tf.nn.conv2d_transpose(x, W, out_shape, strides=stride,
padding=padding)
elif nd == 3:
out = tf.nn.conv3d_transpose(x, W, out_shape, strides=stride,
padding=padding)
else:
raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
out.set_shape(oshape)
return out
@layer
def dwconv(x, kernel, multiplier=1, stride=1, pad=0, padding='SAME',
initializer=tf.he_uniform, bias=False, **kwargs):
if pad:
pads = [(0, 0), (pad, pad), (pad, pad), (0, 0)]
x = tf.pad(x, pads, mode='CONSTANT')
kernel = _kernel_shape(2, kernel, x.dims[-1], multiplier)
stride = _stride_shape(2, stride)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),
**kwargs)
out = tf.nn.depthwise_conv2d(x, W, stride, padding)
if bias:
outdim = kernel[2] * multiplier
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
return out
<|reserved_special_token_0|>
@layer
def keep(t, keepto, collection=None):
"""
append to list and return t as is
:param t: tensor
:param keepto: list
:return:
"""
if collection is not None:
tf.add_to_collection(collection, t)
keepto.append(t)
return t
<|reserved_special_token_0|>
@layer
def iname(t, name):
return tf.identity(t, name=name)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _kernel_shape(nd, k, indim, outdim):
if isinstance(k, int):
k = [k for _ in range(nd)]
k = list(k)
assert len(k) == nd
k.extend([indim, outdim])
return k
def _stride_shape(nd, s):
"""
:param nd:
:param s: int | list | tuple
:return:
"""
if isinstance(s, int):
s = [s for _ in range(nd)]
s = list(s)
assert len(s) == nd
s = [1] + s + [1]
return s
@layer
def conv(x, outdim, kernel=3, stride=1, pad=0, padding='SAME', mode=
'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):
nd = x.ndim
if nd == 3:
return conv1d(x, outdim, kernel, stride=stride, pad=pad, padding=
padding, mode=mode, initializer=initializer, bias=bias, **kwargs)
elif nd == 4:
return conv2d(x, outdim, kernel, stride=stride, pad=pad, padding=
padding, mode=mode, initializer=initializer, bias=bias, **kwargs)
elif nd == 5:
return conv3d(x, outdim, kernel, stride=stride, pad=pad, padding=
padding, mode=mode, initializer=initializer, bias=bias, **kwargs)
else:
raise ValueError('conv for {nd}? nd <= 5'.format(nd=nd))
@layer
def conv1d(x, outdim, kernel, stride=1, pad=0, padding='SAME', mode=
'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):
kernel = _kernel_shape(1, kernel, x.dims[-1], outdim)
pads = None
if padding == 'SAME' and mode != 'CONSTANT':
half = (kernel[0] - 1) // 2
pads = [(0, 0), (pad + half, pad + kernel[0] - 1 - half), (0, 0)]
padding = 'VALID'
elif pad:
pads = [(0, 0), (pad, pad), (0, 0)]
if pads is not None:
x = tf.pad(x, pads, mode=mode)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),
**kwargs)
out = tf.nn.conv1d(x, W, stride, padding)
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
return out
@layer
def conv2d(x, outdim, kernel, stride=1, pad=0, padding='SAME', mode=
'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):
kernel = _kernel_shape(2, kernel, x.dims[-1], outdim)
stride = _stride_shape(2, stride)
pads = None
if padding == 'SAME' and mode != 'CONSTANT':
half = (kernel[0] - 1) // 2, (kernel[1] - 1) // 2
pads = [(0, 0), (pad + half[0], pad + kernel[0] - 1 - half[0]), (
pad + half[1], pad + kernel[1] - 1 - half[1]), (0, 0)]
padding = 'VALID'
elif pad:
pads = [(0, 0), (pad, pad), (pad, pad), (0, 0)]
if pads is not None:
x = tf.pad(x, pads, mode=mode)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),
**kwargs)
out = tf.nn.conv2d(x, W, stride, padding)
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
return out
<|reserved_special_token_0|>
@layer
def renorm(x, axis=-1, momentum=0.99, epsilon=0.001, training=None, gamma=
1.0, beta=0.0, stddev=0.002, renorm_momentum=0.99, renorm_clipping=None,
**kwargs):
"""
https://arxiv.org/abs/1702.03275
https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization
:param x:
:param dict renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to scalar Tensors
used to clip the renorm correction. The correction (r, d) is used as corrected_value = normalized_value * r + d,
with r clipped to [rmin, rmax], and d to [-dmax, dmax].
Missing rmax, rmin, dmax are set to inf, 0, inf, respectively.
:return:
"""
init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)
init_beta = tf.constant_initializer(beta)
reuse = tf.get_variable_scope().reuse
if training is None and (reuse or kwargs.get('reuse', False)):
training = False
elif training is None:
training = x.graph.is_training
if renorm_clipping is not None:
if renorm_clipping.get('rmin', None) is None:
rmax = renorm_clipping.get('rmax', None)
if rmax is not None and not np.isinf(rmax):
rmin = 1 / rmax
renorm_clipping['rmin'] = rmin
out = tf.layers.batch_normalization(x, axis=axis, momentum=momentum,
epsilon=epsilon, beta_initializer=init_beta, gamma_initializer=
init_gamma, training=training, renorm=True, renorm_clipping=
renorm_clipping, renorm_momentum=renorm_momentum, **kwargs)
return out
<|reserved_special_token_0|>
@layer
def cnorm(x, labels, klass=None, stddev=0.01, beta=0.0, gamma=1.0, epsilon=
1e-05):
"""
conditional instance normalization (by label index)
for learning embedding value of beta and gamma
# https://arxiv.org/pdf/1610.07629.pdf for conditional instance normalization
:param x:
:param labels: [B,]
:param klass: size of embedding var
:param gamma: initial_gamma
:param stddev: stddev for gamma random init
:param beta: initial beta value
:param epsilon: 1e-5 for var_epsilon
:return:
"""
assert klass is not None
init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)
init_beta = tf.constant_initializer(beta)
shape = [1] * x.ndim
shape[0] = klass
shape[-1] = x.dims[-1]
beta_v = tf.get_weight(name='beta', shape=shape, initializer=init_beta)
gamma_v = tf.get_weight(name='gamma', shape=shape, initializer=init_gamma)
beta_l = tf.nn.embedding_lookup(beta_v, labels)
gamma_l = tf.nn.embedding_lookup(gamma_v, labels)
return inorm(x, beta=beta_l, gamma=gamma_l, epsilon=epsilon)
@layer
def lnorm(x, center=True, scale=True, activation_fn=None, reuse=None,
variables_collections=None, outputs_collections=None, trainable=True,
begin_norm_axis=1, begin_params_axis=-1, scope=None, **kwargs):
"""
# layer normalization
:param x:
:return:
"""
return tf.contrib.layers.layer_norm(x, center=center, scale=scale,
activation_fn=activation_fn, reuse=reuse, variables_collections=
variables_collections, outputs_collections=outputs_collections,
trainable=trainable, begin_norm_axis=begin_norm_axis,
begin_params_axis=begin_params_axis, scope=None, **kwargs)
@layer
def gnorm(x, group):
"""
group normalization
:param x: [N, ...., C]
:param int group: G,
:return:
"""
shape = list(x.dims)
if shape[0] is None:
shape[0] = -1
ch = shape[-1]
shape[-1] = ch // group
shape.append(group)
assert ch // group * group == ch
x = tf.reshape(x, shape)
x_n = lnorm(x)
shape = shape[:-1]
shape[-1] = ch
x = tf.reshape(x_n, shape)
return x
@layer
def dropout(x, keep_prob=0.5, is_training=None, noise_shape=None, seed=None):
if keep_prob == 1.0:
return x
def _dropout():
return tf.nn.dropout(x, keep_prob, noise_shape, seed)
if is_training is None:
is_training = x.graph.is_training
else:
is_training = tf.convert_to_tensor(is_training)
return tf.cond(is_training, _dropout, lambda : x)
@layer
def dense(x, outdim, initializer=tf.glorot_uniform, bias=False, name=None):
"""
out = dense( shape=shape, init=None, paramset=None)
:param x: tensor
:param bias:
:param outdim: output_size
:param initializer:
:param name:
:return: layer | output | (output, params)
"""
if x.ndim == 4:
x = x.flat2d()
assert x.ndim == 2
outshape = not isinstance(outdim, int)
if outshape:
dim = [-1] + list(outdim)
outdim = np.prod(outdim)
shape = [x.dims[-1], outdim]
W = tf.get_weight('W', shape=shape, initializer=initializer(shape))
out = x.dot(W)
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer())
out = tf.nn.bias_add(out, b)
if outshape:
out = out.reshape(dim)
return tf.identity(out, name=name)
@layer
def bias(x, initializer=tf.zeros_initializer, name=None):
outdim = x.dims[-1]
b = tf.get_bias('b', shape=(outdim,), initializer=initializer())
return tf.nn.bias_add(x, b, name=name)
def _pool_kernel_stide(dim, kernel, stride):
if isinstance(kernel, int):
kernel = [kernel] * dim
if isinstance(stride, int):
stride = [stride] * dim
assert len(kernel) == dim and len(stride) == dim
return [1] + list(kernel) + [1], [1] + list(stride) + [1]
@layer
def maxpool(x, kernel=2, stride=None, padding='SAME'):
nd = x.ndim - 2
stride = kernel if stride is None else stride
kernel, stride = _pool_kernel_stide(nd, kernel, stride)
if nd == 2:
return tf.nn.max_pool(x, kernel, stride, padding)
elif nd == 3:
return tf.nn.max_pool3d(x, kernel, stride, padding)
else:
raise ValueError('maxpool support {0}? '.format(nd))
@layer
def maxpool_where(x, kernel, stride=None, pads=None, padding='SAME', keep=None
):
assert stride is None and padding == 'SAME'
stride = kernel
pooled = maxpool(x, kernel, stride=stride, padding=padding)
mask = where_pooled(x, pooled, kernel, pads=pads)
if keep is None:
return pooled, mask
else:
keep.append(mask)
return pooled
@layer
def where_pooled(x, pooled, kernel=None, pads=None):
"""
return mask
:param x:
:param pooled:
:param kernel:
:param pads:
:return:
"""
assert x.ndim == 4
import math
if kernel is None:
kernel = [math.ceil(float(d) / float(p)) for d, p in zip(x.dims,
pooled.zip)]
repeat = pooled.repeats(kernel, axis=[1, 2])
elif isinstance(kernel, (tuple, list)):
repeat = pooled.repeats(kernel, axis=[1, 2])
else:
repeat = pooled.repeats([kernel, kernel], axis=[1, 2])
if pads is not None:
repeat = repeat.pad(pads, axis=[1, 2])
dim = x.dims
sameshaped = repeat[:dim[0], :dim[1], :dim[2], :dim[3]]
mask = tf.equal(x, sameshaped).to_float()
return mask
<|reserved_special_token_0|>
@layer
def unpool_repeat(x, kernel):
""" upsample by repeating"""
if not isinstance(kernel, (list, tuple)) and isinstance(kernel, int):
kernel = [kernel] * (x.ndim - 2)
return x.repeats(kernel, axis=list(range(1, x.ndim - 2)))
@layer
def avgpool(x, kernel, stride, padding='SAME'):
nd = x.ndim - 2
kernel, stride = _pool_kernel_stide(nd, kernel, stride)
if nd == 2:
return tf.nn.avg_pool(x, kernel, stride, padding)
elif nd == 3:
return tf.nn.avg_pool3d(x, kernel, stride, padding)
else:
raise ValueError('avgpool support {0}? '.format(nd))
@layer
def gpool(x, keepdims=True):
"""
global_avgpool
:param x:
:param keepdims:
:return:
"""
axis = list(range(1, x.ndim - 1))
return x.mean(axis=axis, keepdims=keepdims)
def _atrous1d(x, kernel, rate, padding='SAME'):
"""
cf https://www.tensorflow.org/versions/r0.11/api_docs/python/nn.html#atrous_conv2d
:param x: [batch, time, channel]
:param kernel: [1, 1, inchannel, outchannel]
:param rate: dialtion rate
:param padding: 'same' or 'valid'
:param bias:
:return:
"""
if rate == 1:
out = tf.nn.conv1d(x, kernel, stride=(1, 1, 1), padding=padding)
return out
if padding == 'SAME':
filter_width = kernel.dims[0]
filter_width_up = filter_width + (filter_width - 1) * (rate - 1)
pad = filter_width_up - 1
pad_left = pad // 2
pad_right = pad - pad_left
elif padding == 'VALID':
pad_left = 0
pad_right = 0
else:
raise ValueError('Invalid padding')
in_width = x.dims[1] + pad_left + pad_right
pad_right_extra = (rate - in_width % rate) % rate
pads = [(0, 0), (pad_left, pad_right + pad_right_extra), (0, 0)]
out = x.time_to_batch(rate, pads)
out = tf.nn.conv1d(out, kernel, stride=(1, 1, 1), padding='VALID')
crops = [(0, 0), (0, pad_right_extra), (0, 0)]
out = out.batch_to_time(rate, crops)
return out
@layer
def atrous(x, outdim, kernel, rate, pad=0, padding='SAME', initializer=tf.
he_uniform, bias=None, **kwargs):
assert isinstance(pad, int)
nd = x.ndim - 2
if pad:
pads = [(0, 0)] + [(pad, pad)] * nd + [(0, 0)]
x = tf.pad(x, pads, mode='CONSTANT')
kernel = _kernel_shape(nd, kernel, x.dims[-1], outdim)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),
**kwargs)
if nd == 1:
out = _atrous1d(x, W, rate, padding=padding)
elif nd == 2:
out = tf.nn.atrous_conv2d(x, W, rate, padding)
else:
raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))
if bias is not None:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
return out
<|reserved_special_token_0|>
@layer
def deconv(x, outdim, kernel, stride=1, padding='SAME', initializer=tf.
he_uniform, bias=False, extra=None, **kwargs):
nd = x.ndim - 2
out_shape = _deconv_outshape(nd, x.dims, outdim, kernel, stride,
padding, extra)
oshape = tf.TensorShape(out_shape)
if out_shape[0] is None:
out_shape[0] = tf.shape(x)[0]
out_shape = tf.stack(out_shape)
kernel_shape = _kernel_shape(nd, kernel, outdim, x.dims[-1])
stride = _stride_shape(nd, stride)
W = tf.get_weight('W', shape=kernel_shape, initializer=initializer(
kernel_shape))
if nd == 2:
out = tf.nn.conv2d_transpose(x, W, out_shape, strides=stride,
padding=padding)
elif nd == 3:
out = tf.nn.conv3d_transpose(x, W, out_shape, strides=stride,
padding=padding)
else:
raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
out.set_shape(oshape)
return out
@layer
def dwconv(x, kernel, multiplier=1, stride=1, pad=0, padding='SAME',
initializer=tf.he_uniform, bias=False, **kwargs):
if pad:
pads = [(0, 0), (pad, pad), (pad, pad), (0, 0)]
x = tf.pad(x, pads, mode='CONSTANT')
kernel = _kernel_shape(2, kernel, x.dims[-1], multiplier)
stride = _stride_shape(2, stride)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),
**kwargs)
out = tf.nn.depthwise_conv2d(x, W, stride, padding)
if bias:
outdim = kernel[2] * multiplier
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
return out
<|reserved_special_token_0|>
@layer
def sizedown(x, factors=(2, 2), extras=(0, 0), method=ResizeMethod.
NEAREST_NEIGHBOR, align_corners=False):
inshape = x.dims
if isinstance(factors, int):
factors = factors, factors
if isinstance(extras, int):
extras = extras, extras
hw = [inshape[1] // factors[0] + extras[0], inshape[2] // factors[1] +
extras[1]]
return tf.image.resize_images(x, hw, method=method, align_corners=
align_corners)
@layer
def keep(t, keepto, collection=None):
"""
append to list and return t as is
:param t: tensor
:param keepto: list
:return:
"""
if collection is not None:
tf.add_to_collection(collection, t)
keepto.append(t)
return t
<|reserved_special_token_0|>
@layer
def iname(t, name):
return tf.identity(t, name=name)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _kernel_shape(nd, k, indim, outdim):
if isinstance(k, int):
k = [k for _ in range(nd)]
k = list(k)
assert len(k) == nd
k.extend([indim, outdim])
return k
def _stride_shape(nd, s):
"""
:param nd:
:param s: int | list | tuple
:return:
"""
if isinstance(s, int):
s = [s for _ in range(nd)]
s = list(s)
assert len(s) == nd
s = [1] + s + [1]
return s
@layer
def conv(x, outdim, kernel=3, stride=1, pad=0, padding='SAME', mode=
'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):
nd = x.ndim
if nd == 3:
return conv1d(x, outdim, kernel, stride=stride, pad=pad, padding=
padding, mode=mode, initializer=initializer, bias=bias, **kwargs)
elif nd == 4:
return conv2d(x, outdim, kernel, stride=stride, pad=pad, padding=
padding, mode=mode, initializer=initializer, bias=bias, **kwargs)
elif nd == 5:
return conv3d(x, outdim, kernel, stride=stride, pad=pad, padding=
padding, mode=mode, initializer=initializer, bias=bias, **kwargs)
else:
raise ValueError('conv for {nd}? nd <= 5'.format(nd=nd))
@layer
def conv1d(x, outdim, kernel, stride=1, pad=0, padding='SAME', mode=
'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):
kernel = _kernel_shape(1, kernel, x.dims[-1], outdim)
pads = None
if padding == 'SAME' and mode != 'CONSTANT':
half = (kernel[0] - 1) // 2
pads = [(0, 0), (pad + half, pad + kernel[0] - 1 - half), (0, 0)]
padding = 'VALID'
elif pad:
pads = [(0, 0), (pad, pad), (0, 0)]
if pads is not None:
x = tf.pad(x, pads, mode=mode)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),
**kwargs)
out = tf.nn.conv1d(x, W, stride, padding)
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
return out
@layer
def conv2d(x, outdim, kernel, stride=1, pad=0, padding='SAME', mode=
'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):
kernel = _kernel_shape(2, kernel, x.dims[-1], outdim)
stride = _stride_shape(2, stride)
pads = None
if padding == 'SAME' and mode != 'CONSTANT':
half = (kernel[0] - 1) // 2, (kernel[1] - 1) // 2
pads = [(0, 0), (pad + half[0], pad + kernel[0] - 1 - half[0]), (
pad + half[1], pad + kernel[1] - 1 - half[1]), (0, 0)]
padding = 'VALID'
elif pad:
pads = [(0, 0), (pad, pad), (pad, pad), (0, 0)]
if pads is not None:
x = tf.pad(x, pads, mode=mode)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),
**kwargs)
out = tf.nn.conv2d(x, W, stride, padding)
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
return out
<|reserved_special_token_0|>
@layer
def renorm(x, axis=-1, momentum=0.99, epsilon=0.001, training=None, gamma=
1.0, beta=0.0, stddev=0.002, renorm_momentum=0.99, renorm_clipping=None,
**kwargs):
"""
https://arxiv.org/abs/1702.03275
https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization
:param x:
:param dict renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to scalar Tensors
used to clip the renorm correction. The correction (r, d) is used as corrected_value = normalized_value * r + d,
with r clipped to [rmin, rmax], and d to [-dmax, dmax].
Missing rmax, rmin, dmax are set to inf, 0, inf, respectively.
:return:
"""
init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)
init_beta = tf.constant_initializer(beta)
reuse = tf.get_variable_scope().reuse
if training is None and (reuse or kwargs.get('reuse', False)):
training = False
elif training is None:
training = x.graph.is_training
if renorm_clipping is not None:
if renorm_clipping.get('rmin', None) is None:
rmax = renorm_clipping.get('rmax', None)
if rmax is not None and not np.isinf(rmax):
rmin = 1 / rmax
renorm_clipping['rmin'] = rmin
out = tf.layers.batch_normalization(x, axis=axis, momentum=momentum,
epsilon=epsilon, beta_initializer=init_beta, gamma_initializer=
init_gamma, training=training, renorm=True, renorm_clipping=
renorm_clipping, renorm_momentum=renorm_momentum, **kwargs)
return out
@layer
def inorm(x, beta=0.0, gamma=1.0, stddev=0.002, epsilon=1e-05, axis=None,
trainable=True, **kwargs):
"""
instance normalization normalization for (W,H)
same output not regard to trainmode
# https://arxiv.org/pdf/1607.08022.pdf for instance normalization
# z = gamma * (x-m)/s + beta
# note gamma, beta
:param x: [BHWC] is common case
:param gamma:
:param beta:
:param epsilon:
:return:
"""
axes = list(range(1, 1 + x.ndim - 2))
m, v = tf.nn.moments(x, axes=axes, keep_dims=True)
shapelast = x.dims[-1:]
if trainable:
init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)
init_beta = tf.constant_initializer(beta)
gamma_t = tf.get_weight(name='gamma', shape=shapelast, initializer=
init_gamma)
beta_t = tf.get_bias(name='beta', shape=shapelast, initializer=
init_beta)
else:
gamma_t = gamma
beta_t = beta
out = tf.nn.batch_normalization(x, m, v, offset=beta_t, scale=gamma_t,
variance_epsilon=epsilon)
return out
@layer
def cnorm(x, labels, klass=None, stddev=0.01, beta=0.0, gamma=1.0, epsilon=
1e-05):
"""
conditional instance normalization (by label index)
for learning embedding value of beta and gamma
# https://arxiv.org/pdf/1610.07629.pdf for conditional instance normalization
:param x:
:param labels: [B,]
:param klass: size of embedding var
:param gamma: initial_gamma
:param stddev: stddev for gamma random init
:param beta: initial beta value
:param epsilon: 1e-5 for var_epsilon
:return:
"""
assert klass is not None
init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)
init_beta = tf.constant_initializer(beta)
shape = [1] * x.ndim
shape[0] = klass
shape[-1] = x.dims[-1]
beta_v = tf.get_weight(name='beta', shape=shape, initializer=init_beta)
gamma_v = tf.get_weight(name='gamma', shape=shape, initializer=init_gamma)
beta_l = tf.nn.embedding_lookup(beta_v, labels)
gamma_l = tf.nn.embedding_lookup(gamma_v, labels)
return inorm(x, beta=beta_l, gamma=gamma_l, epsilon=epsilon)
@layer
def lnorm(x, center=True, scale=True, activation_fn=None, reuse=None,
variables_collections=None, outputs_collections=None, trainable=True,
begin_norm_axis=1, begin_params_axis=-1, scope=None, **kwargs):
"""
# layer normalization
:param x:
:return:
"""
return tf.contrib.layers.layer_norm(x, center=center, scale=scale,
activation_fn=activation_fn, reuse=reuse, variables_collections=
variables_collections, outputs_collections=outputs_collections,
trainable=trainable, begin_norm_axis=begin_norm_axis,
begin_params_axis=begin_params_axis, scope=None, **kwargs)
@layer
def gnorm(x, group):
"""
group normalization
:param x: [N, ...., C]
:param int group: G,
:return:
"""
shape = list(x.dims)
if shape[0] is None:
shape[0] = -1
ch = shape[-1]
shape[-1] = ch // group
shape.append(group)
assert ch // group * group == ch
x = tf.reshape(x, shape)
x_n = lnorm(x)
shape = shape[:-1]
shape[-1] = ch
x = tf.reshape(x_n, shape)
return x
@layer
def dropout(x, keep_prob=0.5, is_training=None, noise_shape=None, seed=None):
if keep_prob == 1.0:
return x
def _dropout():
return tf.nn.dropout(x, keep_prob, noise_shape, seed)
if is_training is None:
is_training = x.graph.is_training
else:
is_training = tf.convert_to_tensor(is_training)
return tf.cond(is_training, _dropout, lambda : x)
@layer
def dense(x, outdim, initializer=tf.glorot_uniform, bias=False, name=None):
"""
out = dense( shape=shape, init=None, paramset=None)
:param x: tensor
:param bias:
:param outdim: output_size
:param initializer:
:param name:
:return: layer | output | (output, params)
"""
if x.ndim == 4:
x = x.flat2d()
assert x.ndim == 2
outshape = not isinstance(outdim, int)
if outshape:
dim = [-1] + list(outdim)
outdim = np.prod(outdim)
shape = [x.dims[-1], outdim]
W = tf.get_weight('W', shape=shape, initializer=initializer(shape))
out = x.dot(W)
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer())
out = tf.nn.bias_add(out, b)
if outshape:
out = out.reshape(dim)
return tf.identity(out, name=name)
@layer
def bias(x, initializer=tf.zeros_initializer, name=None):
outdim = x.dims[-1]
b = tf.get_bias('b', shape=(outdim,), initializer=initializer())
return tf.nn.bias_add(x, b, name=name)
def _pool_kernel_stide(dim, kernel, stride):
if isinstance(kernel, int):
kernel = [kernel] * dim
if isinstance(stride, int):
stride = [stride] * dim
assert len(kernel) == dim and len(stride) == dim
return [1] + list(kernel) + [1], [1] + list(stride) + [1]
@layer
def maxpool(x, kernel=2, stride=None, padding='SAME'):
nd = x.ndim - 2
stride = kernel if stride is None else stride
kernel, stride = _pool_kernel_stide(nd, kernel, stride)
if nd == 2:
return tf.nn.max_pool(x, kernel, stride, padding)
elif nd == 3:
return tf.nn.max_pool3d(x, kernel, stride, padding)
else:
raise ValueError('maxpool support {0}? '.format(nd))
@layer
def maxpool_where(x, kernel, stride=None, pads=None, padding='SAME', keep=None
):
assert stride is None and padding == 'SAME'
stride = kernel
pooled = maxpool(x, kernel, stride=stride, padding=padding)
mask = where_pooled(x, pooled, kernel, pads=pads)
if keep is None:
return pooled, mask
else:
keep.append(mask)
return pooled
@layer
def where_pooled(x, pooled, kernel=None, pads=None):
"""
return mask
:param x:
:param pooled:
:param kernel:
:param pads:
:return:
"""
assert x.ndim == 4
import math
if kernel is None:
kernel = [math.ceil(float(d) / float(p)) for d, p in zip(x.dims,
pooled.zip)]
repeat = pooled.repeats(kernel, axis=[1, 2])
elif isinstance(kernel, (tuple, list)):
repeat = pooled.repeats(kernel, axis=[1, 2])
else:
repeat = pooled.repeats([kernel, kernel], axis=[1, 2])
if pads is not None:
repeat = repeat.pad(pads, axis=[1, 2])
dim = x.dims
sameshaped = repeat[:dim[0], :dim[1], :dim[2], :dim[3]]
mask = tf.equal(x, sameshaped).to_float()
return mask
<|reserved_special_token_0|>
@layer
def unpool_repeat(x, kernel):
""" upsample by repeating"""
if not isinstance(kernel, (list, tuple)) and isinstance(kernel, int):
kernel = [kernel] * (x.ndim - 2)
return x.repeats(kernel, axis=list(range(1, x.ndim - 2)))
@layer
def avgpool(x, kernel, stride, padding='SAME'):
nd = x.ndim - 2
kernel, stride = _pool_kernel_stide(nd, kernel, stride)
if nd == 2:
return tf.nn.avg_pool(x, kernel, stride, padding)
elif nd == 3:
return tf.nn.avg_pool3d(x, kernel, stride, padding)
else:
raise ValueError('avgpool support {0}? '.format(nd))
@layer
def gpool(x, keepdims=True):
"""
global_avgpool
:param x:
:param keepdims:
:return:
"""
axis = list(range(1, x.ndim - 1))
return x.mean(axis=axis, keepdims=keepdims)
def _atrous1d(x, kernel, rate, padding='SAME'):
"""
cf https://www.tensorflow.org/versions/r0.11/api_docs/python/nn.html#atrous_conv2d
:param x: [batch, time, channel]
:param kernel: [1, 1, inchannel, outchannel]
:param rate: dialtion rate
:param padding: 'same' or 'valid'
:param bias:
:return:
"""
if rate == 1:
out = tf.nn.conv1d(x, kernel, stride=(1, 1, 1), padding=padding)
return out
if padding == 'SAME':
filter_width = kernel.dims[0]
filter_width_up = filter_width + (filter_width - 1) * (rate - 1)
pad = filter_width_up - 1
pad_left = pad // 2
pad_right = pad - pad_left
elif padding == 'VALID':
pad_left = 0
pad_right = 0
else:
raise ValueError('Invalid padding')
in_width = x.dims[1] + pad_left + pad_right
pad_right_extra = (rate - in_width % rate) % rate
pads = [(0, 0), (pad_left, pad_right + pad_right_extra), (0, 0)]
out = x.time_to_batch(rate, pads)
out = tf.nn.conv1d(out, kernel, stride=(1, 1, 1), padding='VALID')
crops = [(0, 0), (0, pad_right_extra), (0, 0)]
out = out.batch_to_time(rate, crops)
return out
@layer
def atrous(x, outdim, kernel, rate, pad=0, padding='SAME', initializer=tf.
he_uniform, bias=None, **kwargs):
assert isinstance(pad, int)
nd = x.ndim - 2
if pad:
pads = [(0, 0)] + [(pad, pad)] * nd + [(0, 0)]
x = tf.pad(x, pads, mode='CONSTANT')
kernel = _kernel_shape(nd, kernel, x.dims[-1], outdim)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),
**kwargs)
if nd == 1:
out = _atrous1d(x, W, rate, padding=padding)
elif nd == 2:
out = tf.nn.atrous_conv2d(x, W, rate, padding)
else:
raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))
if bias is not None:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
return out
<|reserved_special_token_0|>
@layer
def deconv(x, outdim, kernel, stride=1, padding='SAME', initializer=tf.
he_uniform, bias=False, extra=None, **kwargs):
nd = x.ndim - 2
out_shape = _deconv_outshape(nd, x.dims, outdim, kernel, stride,
padding, extra)
oshape = tf.TensorShape(out_shape)
if out_shape[0] is None:
out_shape[0] = tf.shape(x)[0]
out_shape = tf.stack(out_shape)
kernel_shape = _kernel_shape(nd, kernel, outdim, x.dims[-1])
stride = _stride_shape(nd, stride)
W = tf.get_weight('W', shape=kernel_shape, initializer=initializer(
kernel_shape))
if nd == 2:
out = tf.nn.conv2d_transpose(x, W, out_shape, strides=stride,
padding=padding)
elif nd == 3:
out = tf.nn.conv3d_transpose(x, W, out_shape, strides=stride,
padding=padding)
else:
raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
out.set_shape(oshape)
return out
@layer
def dwconv(x, kernel, multiplier=1, stride=1, pad=0, padding='SAME',
initializer=tf.he_uniform, bias=False, **kwargs):
if pad:
pads = [(0, 0), (pad, pad), (pad, pad), (0, 0)]
x = tf.pad(x, pads, mode='CONSTANT')
kernel = _kernel_shape(2, kernel, x.dims[-1], multiplier)
stride = _stride_shape(2, stride)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),
**kwargs)
out = tf.nn.depthwise_conv2d(x, W, stride, padding)
if bias:
outdim = kernel[2] * multiplier
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
return out
@layer
def subpixel(x, kernel, factor=2, stride=1, pad=0, padding='SAME',
initializer=tf.he_uniform, bias=False, **kwargs):
from .ireshape import channel_to_space
assert x.ndim == 4
indim = x.dims[-1]
outdim = indim * factor * factor
kernel = _kernel_shape(2, kernel, indim, outdim)
stride = _stride_shape(2, stride)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel))
out = tf.nn.conv2d(x, W, stride, padding=padding)
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer())
out = tf.nn.bias_add(out, b)
out = channel_to_space(out, factor)
return out
@layer
def leaky(x, slope=0.01, name=None):
"""
leaky_relu
see also pleaky
:param x:
:param slope: 0.01 default
:return:
"""
return tf.maximum(x, x * slope, name=name)
<|reserved_special_token_0|>
@layer
def sizedown(x, factors=(2, 2), extras=(0, 0), method=ResizeMethod.
NEAREST_NEIGHBOR, align_corners=False):
inshape = x.dims
if isinstance(factors, int):
factors = factors, factors
if isinstance(extras, int):
extras = extras, extras
hw = [inshape[1] // factors[0] + extras[0], inshape[2] // factors[1] +
extras[1]]
return tf.image.resize_images(x, hw, method=method, align_corners=
align_corners)
@layer
def keep(t, keepto, collection=None):
"""
append to list and return t as is
:param t: tensor
:param keepto: list
:return:
"""
if collection is not None:
tf.add_to_collection(collection, t)
keepto.append(t)
return t
<|reserved_special_token_0|>
@layer
def iname(t, name):
return tf.identity(t, name=name)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _kernel_shape(nd, k, indim, outdim):
if isinstance(k, int):
k = [k for _ in range(nd)]
k = list(k)
assert len(k) == nd
k.extend([indim, outdim])
return k
def _stride_shape(nd, s):
"""
:param nd:
:param s: int | list | tuple
:return:
"""
if isinstance(s, int):
s = [s for _ in range(nd)]
s = list(s)
assert len(s) == nd
s = [1] + s + [1]
return s
@layer
def conv(x, outdim, kernel=3, stride=1, pad=0, padding='SAME', mode=
'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):
nd = x.ndim
if nd == 3:
return conv1d(x, outdim, kernel, stride=stride, pad=pad, padding=
padding, mode=mode, initializer=initializer, bias=bias, **kwargs)
elif nd == 4:
return conv2d(x, outdim, kernel, stride=stride, pad=pad, padding=
padding, mode=mode, initializer=initializer, bias=bias, **kwargs)
elif nd == 5:
return conv3d(x, outdim, kernel, stride=stride, pad=pad, padding=
padding, mode=mode, initializer=initializer, bias=bias, **kwargs)
else:
raise ValueError('conv for {nd}? nd <= 5'.format(nd=nd))
@layer
def conv1d(x, outdim, kernel, stride=1, pad=0, padding='SAME', mode=
'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):
kernel = _kernel_shape(1, kernel, x.dims[-1], outdim)
pads = None
if padding == 'SAME' and mode != 'CONSTANT':
half = (kernel[0] - 1) // 2
pads = [(0, 0), (pad + half, pad + kernel[0] - 1 - half), (0, 0)]
padding = 'VALID'
elif pad:
pads = [(0, 0), (pad, pad), (0, 0)]
if pads is not None:
x = tf.pad(x, pads, mode=mode)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),
**kwargs)
out = tf.nn.conv1d(x, W, stride, padding)
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
return out
@layer
def conv2d(x, outdim, kernel, stride=1, pad=0, padding='SAME', mode=
'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):
kernel = _kernel_shape(2, kernel, x.dims[-1], outdim)
stride = _stride_shape(2, stride)
pads = None
if padding == 'SAME' and mode != 'CONSTANT':
half = (kernel[0] - 1) // 2, (kernel[1] - 1) // 2
pads = [(0, 0), (pad + half[0], pad + kernel[0] - 1 - half[0]), (
pad + half[1], pad + kernel[1] - 1 - half[1]), (0, 0)]
padding = 'VALID'
elif pad:
pads = [(0, 0), (pad, pad), (pad, pad), (0, 0)]
if pads is not None:
x = tf.pad(x, pads, mode=mode)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),
**kwargs)
out = tf.nn.conv2d(x, W, stride, padding)
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
return out
<|reserved_special_token_0|>
@layer
def bn(x, stddev=0.002, beta=0.0, gamma=1.0, epsilon=1e-05, momentum=0.99,
axis=-1, training=None, **kwargs):
if kwargs.pop('scale', True):
init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)
else:
init_gamma = None
if kwargs.pop('center', True):
init_beta = tf.constant_initializer(beta)
else:
init_beta = None
reuse = tf.get_variable_scope().reuse
if training is None and (reuse or kwargs.get('reuse', False)):
training = False
elif training is None:
training = x.graph.is_training
out = tf.layers.batch_normalization(x, axis=axis, momentum=momentum,
epsilon=epsilon, beta_initializer=init_beta, gamma_initializer=
init_gamma, moving_mean_initializer=tf.zeros_initializer(),
moving_variance_initializer=tf.ones_initializer(), training=
training, **kwargs)
return out
@layer
def renorm(x, axis=-1, momentum=0.99, epsilon=0.001, training=None, gamma=
1.0, beta=0.0, stddev=0.002, renorm_momentum=0.99, renorm_clipping=None,
**kwargs):
"""
https://arxiv.org/abs/1702.03275
https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization
:param x:
:param dict renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to scalar Tensors
used to clip the renorm correction. The correction (r, d) is used as corrected_value = normalized_value * r + d,
with r clipped to [rmin, rmax], and d to [-dmax, dmax].
Missing rmax, rmin, dmax are set to inf, 0, inf, respectively.
:return:
"""
init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)
init_beta = tf.constant_initializer(beta)
reuse = tf.get_variable_scope().reuse
if training is None and (reuse or kwargs.get('reuse', False)):
training = False
elif training is None:
training = x.graph.is_training
if renorm_clipping is not None:
if renorm_clipping.get('rmin', None) is None:
rmax = renorm_clipping.get('rmax', None)
if rmax is not None and not np.isinf(rmax):
rmin = 1 / rmax
renorm_clipping['rmin'] = rmin
out = tf.layers.batch_normalization(x, axis=axis, momentum=momentum,
epsilon=epsilon, beta_initializer=init_beta, gamma_initializer=
init_gamma, training=training, renorm=True, renorm_clipping=
renorm_clipping, renorm_momentum=renorm_momentum, **kwargs)
return out
@layer
def inorm(x, beta=0.0, gamma=1.0, stddev=0.002, epsilon=1e-05, axis=None,
trainable=True, **kwargs):
"""
instance normalization normalization for (W,H)
same output not regard to trainmode
# https://arxiv.org/pdf/1607.08022.pdf for instance normalization
# z = gamma * (x-m)/s + beta
# note gamma, beta
:param x: [BHWC] is common case
:param gamma:
:param beta:
:param epsilon:
:return:
"""
axes = list(range(1, 1 + x.ndim - 2))
m, v = tf.nn.moments(x, axes=axes, keep_dims=True)
shapelast = x.dims[-1:]
if trainable:
init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)
init_beta = tf.constant_initializer(beta)
gamma_t = tf.get_weight(name='gamma', shape=shapelast, initializer=
init_gamma)
beta_t = tf.get_bias(name='beta', shape=shapelast, initializer=
init_beta)
else:
gamma_t = gamma
beta_t = beta
out = tf.nn.batch_normalization(x, m, v, offset=beta_t, scale=gamma_t,
variance_epsilon=epsilon)
return out
@layer
def cnorm(x, labels, klass=None, stddev=0.01, beta=0.0, gamma=1.0, epsilon=
1e-05):
"""
conditional instance normalization (by label index)
for learning embedding value of beta and gamma
# https://arxiv.org/pdf/1610.07629.pdf for conditional instance normalization
:param x:
:param labels: [B,]
:param klass: size of embedding var
:param gamma: initial_gamma
:param stddev: stddev for gamma random init
:param beta: initial beta value
:param epsilon: 1e-5 for var_epsilon
:return:
"""
assert klass is not None
init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)
init_beta = tf.constant_initializer(beta)
shape = [1] * x.ndim
shape[0] = klass
shape[-1] = x.dims[-1]
beta_v = tf.get_weight(name='beta', shape=shape, initializer=init_beta)
gamma_v = tf.get_weight(name='gamma', shape=shape, initializer=init_gamma)
beta_l = tf.nn.embedding_lookup(beta_v, labels)
gamma_l = tf.nn.embedding_lookup(gamma_v, labels)
return inorm(x, beta=beta_l, gamma=gamma_l, epsilon=epsilon)
@layer
def lnorm(x, center=True, scale=True, activation_fn=None, reuse=None,
variables_collections=None, outputs_collections=None, trainable=True,
begin_norm_axis=1, begin_params_axis=-1, scope=None, **kwargs):
"""
# layer normalization
:param x:
:return:
"""
return tf.contrib.layers.layer_norm(x, center=center, scale=scale,
activation_fn=activation_fn, reuse=reuse, variables_collections=
variables_collections, outputs_collections=outputs_collections,
trainable=trainable, begin_norm_axis=begin_norm_axis,
begin_params_axis=begin_params_axis, scope=None, **kwargs)
@layer
def gnorm(x, group):
"""
group normalization
:param x: [N, ...., C]
:param int group: G,
:return:
"""
shape = list(x.dims)
if shape[0] is None:
shape[0] = -1
ch = shape[-1]
shape[-1] = ch // group
shape.append(group)
assert ch // group * group == ch
x = tf.reshape(x, shape)
x_n = lnorm(x)
shape = shape[:-1]
shape[-1] = ch
x = tf.reshape(x_n, shape)
return x
@layer
def dropout(x, keep_prob=0.5, is_training=None, noise_shape=None, seed=None):
if keep_prob == 1.0:
return x
def _dropout():
return tf.nn.dropout(x, keep_prob, noise_shape, seed)
if is_training is None:
is_training = x.graph.is_training
else:
is_training = tf.convert_to_tensor(is_training)
return tf.cond(is_training, _dropout, lambda : x)
@layer
def dense(x, outdim, initializer=tf.glorot_uniform, bias=False, name=None):
"""
out = dense( shape=shape, init=None, paramset=None)
:param x: tensor
:param bias:
:param outdim: output_size
:param initializer:
:param name:
:return: layer | output | (output, params)
"""
if x.ndim == 4:
x = x.flat2d()
assert x.ndim == 2
outshape = not isinstance(outdim, int)
if outshape:
dim = [-1] + list(outdim)
outdim = np.prod(outdim)
shape = [x.dims[-1], outdim]
W = tf.get_weight('W', shape=shape, initializer=initializer(shape))
out = x.dot(W)
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer())
out = tf.nn.bias_add(out, b)
if outshape:
out = out.reshape(dim)
return tf.identity(out, name=name)
@layer
def bias(x, initializer=tf.zeros_initializer, name=None):
outdim = x.dims[-1]
b = tf.get_bias('b', shape=(outdim,), initializer=initializer())
return tf.nn.bias_add(x, b, name=name)
def _pool_kernel_stide(dim, kernel, stride):
if isinstance(kernel, int):
kernel = [kernel] * dim
if isinstance(stride, int):
stride = [stride] * dim
assert len(kernel) == dim and len(stride) == dim
return [1] + list(kernel) + [1], [1] + list(stride) + [1]
@layer
def maxpool(x, kernel=2, stride=None, padding='SAME'):
nd = x.ndim - 2
stride = kernel if stride is None else stride
kernel, stride = _pool_kernel_stide(nd, kernel, stride)
if nd == 2:
return tf.nn.max_pool(x, kernel, stride, padding)
elif nd == 3:
return tf.nn.max_pool3d(x, kernel, stride, padding)
else:
raise ValueError('maxpool support {0}? '.format(nd))
@layer
def maxpool_where(x, kernel, stride=None, pads=None, padding='SAME', keep=None
):
assert stride is None and padding == 'SAME'
stride = kernel
pooled = maxpool(x, kernel, stride=stride, padding=padding)
mask = where_pooled(x, pooled, kernel, pads=pads)
if keep is None:
return pooled, mask
else:
keep.append(mask)
return pooled
@layer
def where_pooled(x, pooled, kernel=None, pads=None):
"""
return mask
:param x:
:param pooled:
:param kernel:
:param pads:
:return:
"""
assert x.ndim == 4
import math
if kernel is None:
kernel = [math.ceil(float(d) / float(p)) for d, p in zip(x.dims,
pooled.zip)]
repeat = pooled.repeats(kernel, axis=[1, 2])
elif isinstance(kernel, (tuple, list)):
repeat = pooled.repeats(kernel, axis=[1, 2])
else:
repeat = pooled.repeats([kernel, kernel], axis=[1, 2])
if pads is not None:
repeat = repeat.pad(pads, axis=[1, 2])
dim = x.dims
sameshaped = repeat[:dim[0], :dim[1], :dim[2], :dim[3]]
mask = tf.equal(x, sameshaped).to_float()
return mask
<|reserved_special_token_0|>
@layer
def unpool_repeat(x, kernel):
""" upsample by repeating"""
if not isinstance(kernel, (list, tuple)) and isinstance(kernel, int):
kernel = [kernel] * (x.ndim - 2)
return x.repeats(kernel, axis=list(range(1, x.ndim - 2)))
@layer
def avgpool(x, kernel, stride, padding='SAME'):
nd = x.ndim - 2
kernel, stride = _pool_kernel_stide(nd, kernel, stride)
if nd == 2:
return tf.nn.avg_pool(x, kernel, stride, padding)
elif nd == 3:
return tf.nn.avg_pool3d(x, kernel, stride, padding)
else:
raise ValueError('avgpool support {0}? '.format(nd))
@layer
def gpool(x, keepdims=True):
"""
global_avgpool
:param x:
:param keepdims:
:return:
"""
axis = list(range(1, x.ndim - 1))
return x.mean(axis=axis, keepdims=keepdims)
def _atrous1d(x, kernel, rate, padding='SAME'):
"""
cf https://www.tensorflow.org/versions/r0.11/api_docs/python/nn.html#atrous_conv2d
:param x: [batch, time, channel]
:param kernel: [1, 1, inchannel, outchannel]
:param rate: dialtion rate
:param padding: 'same' or 'valid'
:param bias:
:return:
"""
if rate == 1:
out = tf.nn.conv1d(x, kernel, stride=(1, 1, 1), padding=padding)
return out
if padding == 'SAME':
filter_width = kernel.dims[0]
filter_width_up = filter_width + (filter_width - 1) * (rate - 1)
pad = filter_width_up - 1
pad_left = pad // 2
pad_right = pad - pad_left
elif padding == 'VALID':
pad_left = 0
pad_right = 0
else:
raise ValueError('Invalid padding')
in_width = x.dims[1] + pad_left + pad_right
pad_right_extra = (rate - in_width % rate) % rate
pads = [(0, 0), (pad_left, pad_right + pad_right_extra), (0, 0)]
out = x.time_to_batch(rate, pads)
out = tf.nn.conv1d(out, kernel, stride=(1, 1, 1), padding='VALID')
crops = [(0, 0), (0, pad_right_extra), (0, 0)]
out = out.batch_to_time(rate, crops)
return out
@layer
def atrous(x, outdim, kernel, rate, pad=0, padding='SAME', initializer=tf.
he_uniform, bias=None, **kwargs):
assert isinstance(pad, int)
nd = x.ndim - 2
if pad:
pads = [(0, 0)] + [(pad, pad)] * nd + [(0, 0)]
x = tf.pad(x, pads, mode='CONSTANT')
kernel = _kernel_shape(nd, kernel, x.dims[-1], outdim)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),
**kwargs)
if nd == 1:
out = _atrous1d(x, W, rate, padding=padding)
elif nd == 2:
out = tf.nn.atrous_conv2d(x, W, rate, padding)
else:
raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))
if bias is not None:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
return out
<|reserved_special_token_0|>
@layer
def deconv(x, outdim, kernel, stride=1, padding='SAME', initializer=tf.
he_uniform, bias=False, extra=None, **kwargs):
nd = x.ndim - 2
out_shape = _deconv_outshape(nd, x.dims, outdim, kernel, stride,
padding, extra)
oshape = tf.TensorShape(out_shape)
if out_shape[0] is None:
out_shape[0] = tf.shape(x)[0]
out_shape = tf.stack(out_shape)
kernel_shape = _kernel_shape(nd, kernel, outdim, x.dims[-1])
stride = _stride_shape(nd, stride)
W = tf.get_weight('W', shape=kernel_shape, initializer=initializer(
kernel_shape))
if nd == 2:
out = tf.nn.conv2d_transpose(x, W, out_shape, strides=stride,
padding=padding)
elif nd == 3:
out = tf.nn.conv3d_transpose(x, W, out_shape, strides=stride,
padding=padding)
else:
raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
out.set_shape(oshape)
return out
@layer
def dwconv(x, kernel, multiplier=1, stride=1, pad=0, padding='SAME',
initializer=tf.he_uniform, bias=False, **kwargs):
if pad:
pads = [(0, 0), (pad, pad), (pad, pad), (0, 0)]
x = tf.pad(x, pads, mode='CONSTANT')
kernel = _kernel_shape(2, kernel, x.dims[-1], multiplier)
stride = _stride_shape(2, stride)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),
**kwargs)
out = tf.nn.depthwise_conv2d(x, W, stride, padding)
if bias:
outdim = kernel[2] * multiplier
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
return out
@layer
def subpixel(x, kernel, factor=2, stride=1, pad=0, padding='SAME',
initializer=tf.he_uniform, bias=False, **kwargs):
from .ireshape import channel_to_space
assert x.ndim == 4
indim = x.dims[-1]
outdim = indim * factor * factor
kernel = _kernel_shape(2, kernel, indim, outdim)
stride = _stride_shape(2, stride)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel))
out = tf.nn.conv2d(x, W, stride, padding=padding)
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.
zeros_initializer())
out = tf.nn.bias_add(out, b)
out = channel_to_space(out, factor)
return out
@layer
def leaky(x, slope=0.01, name=None):
"""
leaky_relu
see also pleaky
:param x:
:param slope: 0.01 default
:return:
"""
return tf.maximum(x, x * slope, name=name)
@layer
def pleaky(x):
"""
parametric leakyrelu
:param x:
:return:
"""
alpha = tf.get_bias('alpha', shape=(), initializer=tf.
constant_initializer(0.01))
return tf.maximum(x, x * alpha)
<|reserved_special_token_0|>
@layer
def sizedown(x, factors=(2, 2), extras=(0, 0), method=ResizeMethod.
NEAREST_NEIGHBOR, align_corners=False):
inshape = x.dims
if isinstance(factors, int):
factors = factors, factors
if isinstance(extras, int):
extras = extras, extras
hw = [inshape[1] // factors[0] + extras[0], inshape[2] // factors[1] +
extras[1]]
return tf.image.resize_images(x, hw, method=method, align_corners=
align_corners)
@layer
def keep(t, keepto, collection=None):
"""
append to list and return t as is
:param t: tensor
:param keepto: list
:return:
"""
if collection is not None:
tf.add_to_collection(collection, t)
keepto.append(t)
return t
@layer
def collect(t, collection='activation'):
"""
append to list and return t as is
:param t: tensor
:param collection:
:return:
"""
tf.add_to_collection(collection, t)
return t
@layer
def iname(t, name):
return tf.identity(t, name=name)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from tensorflow.python.ops.image_ops_impl import ResizeMethod
import sflow.core as tf
from sflow.core import layer
import numpy as np
# region arg helper
def _kernel_shape(nd, k, indim, outdim):
if isinstance(k, int):
k = [k for _ in range(nd)]
k = list(k)
assert len(k) == nd
k.extend([indim, outdim])
return k
def _stride_shape(nd, s):
"""
:param nd:
:param s: int | list | tuple
:return:
"""
if isinstance(s, int):
s = [s for _ in range(nd)]
s = list(s)
assert len(s) == nd
s = [1] + s + [1]
return s
# endregion
# region conv
# @layer
# @patchmethod(tf.Tensor, tf.Variable)
@layer
def conv(x, outdim, kernel=3, stride=1, pad=0, padding='SAME', mode='CONSTANT',
initializer=tf.he_uniform, bias=False, **kwargs):
nd = x.ndim
if nd == 3:
return conv1d(x, outdim, kernel, stride=stride, pad=pad, padding=padding, mode=mode,
initializer=initializer, bias=bias, **kwargs)
elif nd == 4:
return conv2d(x, outdim, kernel, stride=stride, pad=pad, padding=padding, mode=mode,
initializer=initializer, bias=bias, **kwargs)
elif nd == 5:
return conv3d(x, outdim, kernel, stride=stride, pad=pad, padding=padding, mode=mode,
initializer=initializer, bias=bias, **kwargs)
else:
raise ValueError('conv for {nd}? nd <= 5'.format(nd=nd))
@layer
def conv1d(x, outdim, kernel, stride=1, pad=0, padding='SAME', mode='CONSTANT',
initializer=tf.he_uniform, bias=False, **kwargs):
kernel = _kernel_shape(1, kernel, x.dims[-1], outdim)
pads = None
if padding == 'SAME' and mode != 'CONSTANT':
# pad manually
half = (kernel[0] - 1) // 2
pads = [(0, 0), (pad + half, pad + kernel[0] - 1 - half), (0, 0)]
padding = 'VALID' # change to valid because manually padded
elif pad:
pads = [(0, 0), (pad, pad), (0, 0)]
if pads is not None:
x = tf.pad(x, pads, mode=mode)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel), **kwargs)
out = tf.nn.conv1d(x, W, stride, padding)
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
return out
@layer
def conv2d(x, outdim, kernel, stride=1, pad=0, padding='SAME', mode='CONSTANT',
initializer=tf.he_uniform, bias=False, **kwargs):
kernel = _kernel_shape(2, kernel, x.dims[-1], outdim)
stride = _stride_shape(2, stride)
pads = None
if padding == 'SAME' and mode != 'CONSTANT':
# pad manually
half = ((kernel[0] - 1) // 2, (kernel[1] - 1) // 2)
pads = [(0, 0),
(pad + half[0], pad + kernel[0] - 1 - half[0]),
(pad + half[1], pad + kernel[1] - 1 - half[1]), (0, 0)]
padding = 'VALID' # change to valid because manually padded
elif pad:
pads = [(0, 0), (pad, pad), (pad, pad), (0, 0)]
if pads is not None:
x = tf.pad(x, pads, mode=mode)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel), **kwargs)
out = tf.nn.conv2d(x, W, stride, padding)
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
return out
@layer
def conv3d(x, outdim, kernel, stride=1, pad=0, padding='SAME', mode='CONSTANT',
initializer=tf.he_uniform, bias=False, **kwargs):
kernel = _kernel_shape(3, kernel, x.dims[-1], outdim)
stride = _stride_shape(3, stride) # stride 5-dim
pads = None
if padding == 'SAME' and mode != 'CONSTANT':
# pad manually
half = ((kernel[0] - 1) // 2, (kernel[1] - 1) // 2, (kernel[2] - 1) // 2)
pads = [(0, 0),
(pad + half[0], pad + kernel[0] - 1 - half[0]),
(pad + half[1], pad + kernel[1] - 1 - half[1]),
(pad + half[2], pad + kernel[2] - 1 - half[2]), (0, 0)]
padding = 'VALID' # change to valid because manually padded
elif pad:
pads = [(0, 0), (pad, pad), (pad, pad), (pad, pad), (0, 0)]
if pads is not None:
x = tf.pad(x, pads, mode=mode)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel), **kwargs)
out = tf.nn.conv3d(x, W, stride, padding)
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
return out
# endregion
# region normalization
@layer
def bn(x, stddev=0.002, beta=0.0, gamma=1.0, epsilon=1e-5, momentum=0.99, axis=-1, training=None, **kwargs):
if kwargs.pop('scale', True):
init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)
else:
init_gamma = None
if kwargs.pop('center', True):
init_beta = tf.constant_initializer(beta)
else:
init_beta = None
reuse = tf.get_variable_scope().reuse
if training is None and (reuse or kwargs.get('reuse', False)):
training = False
elif training is None:
training = x.graph.is_training
# reuse = reuse is None or reuse is True
out = tf.layers.batch_normalization(x, axis=axis, momentum=momentum, epsilon=epsilon,
beta_initializer=init_beta,
gamma_initializer=init_gamma,
moving_mean_initializer=tf.zeros_initializer(),
moving_variance_initializer=tf.ones_initializer(),
training=training,
**kwargs
)
return out
@layer
def renorm(x, axis=-1, momentum=0.99, epsilon=0.001, training=None,
gamma=1.0, beta=0.0, stddev=0.002,
renorm_momentum=0.99, renorm_clipping=None,
**kwargs):
"""
https://arxiv.org/abs/1702.03275
https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization
:param x:
:param dict renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to scalar Tensors
used to clip the renorm correction. The correction (r, d) is used as corrected_value = normalized_value * r + d,
with r clipped to [rmin, rmax], and d to [-dmax, dmax].
Missing rmax, rmin, dmax are set to inf, 0, inf, respectively.
:return:
"""
init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)
init_beta = tf.constant_initializer(beta)
reuse = tf.get_variable_scope().reuse
if training is None and (reuse or kwargs.get('reuse', False)):
training = False
elif training is None:
training = x.graph.is_training
if renorm_clipping is not None:
if renorm_clipping.get('rmin', None) is None:
rmax = renorm_clipping.get('rmax', None)
if rmax is not None and not np.isinf(rmax):
rmin = 1 / rmax
renorm_clipping['rmin'] = rmin
out = tf.layers.batch_normalization(x, axis=axis, momentum=momentum, epsilon=epsilon,
beta_initializer=init_beta,
gamma_initializer=init_gamma,
training=training,
renorm=True,
renorm_clipping=renorm_clipping,
renorm_momentum=renorm_momentum,
**kwargs
)
return out
@layer
def inorm(x, beta=0.0, gamma=1.0, stddev=0.002, epsilon=1e-5, axis=None, trainable=True, **kwargs):
"""
instance normalization normalization for (W,H)
same output not regard to trainmode
# https://arxiv.org/pdf/1607.08022.pdf for instance normalization
# z = gamma * (x-m)/s + beta
# note gamma, beta
:param x: [BHWC] is common case
:param gamma:
:param beta:
:param epsilon:
:return:
"""
axes = list(range(1, 1 + x.ndim-2)) # axes = [1,2] for BWHC except batch, channel
m, v = tf.nn.moments(x, axes=axes, keep_dims=True)
shapelast = x.dims[-1:]
if trainable:
init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)
init_beta = tf.constant_initializer(beta)
gamma_t = tf.get_weight(name='gamma', shape=shapelast, initializer=init_gamma)
beta_t = tf.get_bias(name='beta', shape=shapelast, initializer=init_beta)
else:
gamma_t = gamma
beta_t = beta
# out = (x - m) / tf.sqrt(v + epsilon)
# out = tf.nn.batch_normalization(x, m, v, beta, gamma, epsilon)
out = tf.nn.batch_normalization(x, m, v, offset=beta_t, scale=gamma_t, variance_epsilon=epsilon)
return out
@layer
def cnorm(x, labels, klass=None, stddev=0.01, beta=0.0, gamma=1.0, epsilon=1e-5):
"""
conditional instance normalization (by label index)
for learning embedding value of beta and gamma
# https://arxiv.org/pdf/1610.07629.pdf for conditional instance normalization
:param x:
:param labels: [B,]
:param klass: size of embedding var
:param gamma: initial_gamma
:param stddev: stddev for gamma random init
:param beta: initial beta value
:param epsilon: 1e-5 for var_epsilon
:return:
"""
# total klass count needs !!
assert klass is not None
init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)
init_beta = tf.constant_initializer(beta)
# params
shape = [1] * x.ndim
shape[0] = klass
shape[-1] = x.dims[-1] # ones but last channel axis
# [klass, 1, 1, C] for [BHWC] data
beta_v = tf.get_weight(name='beta', shape=shape, initializer=init_beta)
gamma_v = tf.get_weight(name='gamma', shape=shape, initializer=init_gamma)
# conditioned by label
# gather
beta_l = tf.nn.embedding_lookup(beta_v, labels)
gamma_l = tf.nn.embedding_lookup(gamma_v, labels)
return inorm(x, beta=beta_l, gamma=gamma_l, epsilon=epsilon)
@layer
def lnorm(x, center=True, scale=True, activation_fn=None, reuse=None,
variables_collections=None, outputs_collections=None,
trainable=True, begin_norm_axis=1, begin_params_axis=-1,
scope=None,
**kwargs):
"""
# layer normalization
:param x:
:return:
"""
return tf.contrib.layers.layer_norm(x, center=center, scale=scale,
activation_fn=activation_fn, reuse=reuse,
variables_collections=variables_collections,
outputs_collections=outputs_collections,
trainable=trainable,
begin_norm_axis=begin_norm_axis,
begin_params_axis=begin_params_axis,
scope=None,
**kwargs)
@layer
def gnorm(x, group):
"""
group normalization
:param x: [N, ...., C]
:param int group: G,
:return:
"""
# def GroupNorm(x, gamma, beta, G, eps=1e−5):
# # x: input features with shape [N,C,H,W]
# # gamma, beta: scale and offset, with shape [1,C,1,1]
# # G: number of groups for GN
# N, C, H, W = x.shape
# x = tf.reshape(x, [N, G, C // G, H, W])
# mean, var = tf.nn.moments(x, [2, 3, 4], keep dims=True)
# x = (x − mean) / tf.sqrt(var + eps)
# x = tf.reshape(x, [N, C, H, W])
# return x ∗ gamma + beta
shape = list(x.dims)
if shape[0] is None:
shape[0] = -1
ch = shape[-1]
shape[-1] = ch // group
shape.append(group)
# todo : 나누어 안떨어진다면! ch // group 안떨어진다..
assert (ch // group) * group == ch
x = tf.reshape(x, shape)
x_n = lnorm(x)
# restore original shape
shape = shape[:-1]
shape[-1] = ch
x = tf.reshape(x_n, shape)
return x
# endregion
# region dense and dropout
@layer
def dropout(x, keep_prob=0.5, is_training=None, noise_shape=None, seed=None):
if keep_prob == 1.0:
return x
def _dropout():
return tf.nn.dropout(x, keep_prob, noise_shape, seed)
if is_training is None:
is_training = x.graph.is_training
else:
is_training = tf.convert_to_tensor(is_training)
return tf.cond(is_training, _dropout, lambda: x)
@layer
def dense(x, outdim, initializer=tf.glorot_uniform, bias=False, name=None):
"""
out = dense( shape=shape, init=None, paramset=None)
:param x: tensor
:param bias:
:param outdim: output_size
:param initializer:
:param name:
:return: layer | output | (output, params)
"""
if x.ndim == 4:
x = x.flat2d()
assert x.ndim == 2
outshape = not isinstance(outdim, int)
if outshape:
dim = [-1] + list(outdim)
outdim = np.prod(outdim)
shape = [x.dims[-1], outdim]
W = tf.get_weight('W', shape=shape, initializer=initializer(shape))
# W = tf.get_weight('W', initializer=initializer(shape))
out = x.dot(W)
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.zeros_initializer())
out = tf.nn.bias_add(out, b)
if outshape:
# make reshape
out = out.reshape(dim)
return tf.identity(out, name=name)
# endregion
@layer
def bias(x, initializer=tf.zeros_initializer, name=None):
outdim = x.dims[-1]
b = tf.get_bias('b', shape=(outdim,), initializer=initializer())
return tf.nn.bias_add(x, b, name=name)
# region pooling
def _pool_kernel_stide(dim, kernel, stride):
if isinstance(kernel, int):
kernel = [kernel] * dim
if isinstance(stride, int):
stride = [stride] * dim
assert len(kernel) == dim and len(stride) == dim
return [1] + list(kernel) + [1], [1] + list(stride) + [1]
@layer
def maxpool(x, kernel=2, stride=None, padding='SAME'):
nd = x.ndim - 2
stride = kernel if stride is None else stride
kernel, stride = _pool_kernel_stide(nd, kernel, stride)
if nd == 2:
return tf.nn.max_pool(x, kernel, stride, padding)
elif nd == 3:
return tf.nn.max_pool3d(x, kernel, stride, padding)
else:
raise ValueError('maxpool support {0}? '.format(nd))
@layer
def maxpool_where(x, kernel, stride=None, pads=None, padding='SAME', keep=None):
# assume kernel == stride
assert stride is None and padding == 'SAME'
stride = kernel
pooled = maxpool(x, kernel, stride=stride, padding=padding)
mask = where_pooled(x, pooled, kernel, pads=pads)
if keep is None:
return pooled, mask
else:
keep.append(mask)
return pooled
@layer
def where_pooled(x, pooled, kernel=None, pads=None):
"""
return mask
:param x:
:param pooled:
:param kernel:
:param pads:
:return:
"""
# todo : add 3d support
assert x.ndim == 4
import math
if kernel is None:
kernel = [math.ceil(float(d) / float(p)) for d, p in zip(x.dims, pooled.zip)]
repeat = pooled.repeats(kernel, axis=[1, 2])
elif isinstance(kernel, (tuple, list)):
repeat = pooled.repeats(kernel, axis=[1, 2])
else:
repeat = pooled.repeats([kernel, kernel], axis=[1, 2])
if pads is not None:
repeat = repeat.pad(pads, axis=[1, 2])
# crop need
dim = x.dims
sameshaped = repeat[:dim[0], :dim[1], :dim[2], :dim[3]]
mask = tf.equal(x, sameshaped).to_float()
return mask
@layer
def unpool_where(x, mask, kernel, padding='SAME'):
"""
unpool with maxpool mask
:param x:
:param mask:
:param kernel:
:param padding:
:return:
"""
# really not a option yet
# assert stride is None
assert padding == 'SAME'
nd = x.ndim
if nd == 4:
if isinstance(kernel, int):
kernel = (kernel, kernel)
unpooled = x.repeats(kernel, axis=(1, 2))
elif nd == 5:
if isinstance(kernel, int):
kernel = (kernel, kernel, kernel)
unpooled = x.repeats(kernel, axis=(1, 2, 3))
else:
raise ValueError('unsupported nd {0}'.format(nd))
return unpooled * mask
@layer
def unpool_zero(x, kernel):
""" upsample by inserting zeros.. """
if not isinstance(kernel, (list, tuple)) and isinstance(kernel, int):
kernel = [kernel] * (x.ndim - 2)
out = x
for axis in range(1, x.ndim-2):
out = out.insert_zero(kernel[axis-1], axis=axis)
return out
@layer
def unpool_repeat(x, kernel):
""" upsample by repeating"""
if not isinstance(kernel, (list, tuple)) and isinstance(kernel, int):
kernel = [kernel] * (x.ndim - 2)
return x.repeats(kernel, axis=list(range(1, x.ndim-2)))
@layer
def avgpool(x, kernel, stride, padding='SAME'):
nd = x.ndim - 2
kernel, stride = _pool_kernel_stide(nd, kernel, stride)
if nd == 2:
return tf.nn.avg_pool(x, kernel, stride, padding)
elif nd == 3:
return tf.nn.avg_pool3d(x, kernel, stride, padding)
else:
raise ValueError('avgpool support {0}? '.format(nd))
@layer
def gpool(x, keepdims=True):
"""
global_avgpool
:param x:
:param keepdims:
:return:
"""
# http://arxiv.org/pdf/1312.4400.pdf
axis = list(range(1, x.ndim-1))
return x.mean(axis=axis, keepdims=keepdims)
# endregion
# region atrous convolution
# def atrous2d(x, )
def _atrous1d(x, kernel, rate, padding='SAME'):
"""
cf https://www.tensorflow.org/versions/r0.11/api_docs/python/nn.html#atrous_conv2d
:param x: [batch, time, channel]
:param kernel: [1, 1, inchannel, outchannel]
:param rate: dialtion rate
:param padding: 'same' or 'valid'
:param bias:
:return:
"""
# from ireshape import time_to_batch, batch_to_time
# atrous_conv1d implementation
if rate == 1:
# same to normal conv1d
out = tf.nn.conv1d(x, kernel, stride=(1, 1, 1), padding=padding)
return out
# if 'same'
if padding == 'SAME':
filter_width = kernel.dims[0]
# temporal dimension of the filter and the upsampled filter in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_width_up = filter_width + (filter_width - 1) * (rate - 1)
pad = filter_width_up - 1
# When pad is odd, we pad more to right
pad_left = pad // 2
pad_right = pad - pad_left
elif padding == 'VALID':
pad_left = 0
pad_right = 0
else:
raise ValueError('Invalid padding')
in_width = x.dims[1] + pad_left + pad_right
# more padding so that rate divides the width of the input
pad_right_extra = (rate - in_width % rate) % rate
pads = [(0, 0), (pad_left, pad_right + pad_right_extra), (0, 0)]
out = x.time_to_batch(rate, pads)
out = tf.nn.conv1d(out, kernel, stride=(1, 1, 1), padding='VALID')
# if bias is not None:
# bias=bias,
crops = [(0, 0), (0, pad_right_extra), (0, 0)]
# temporary test this
out = out.batch_to_time(rate, crops)
return out
@layer
def atrous(x, outdim, kernel, rate, pad=0, padding='SAME',
initializer=tf.he_uniform, bias=None, **kwargs):
# todo rate per axis?
assert isinstance(pad, int)
nd = x.ndim - 2
if pad:
pads = [(0, 0)] + [(pad, pad)] * nd + [(0, 0)]
x = tf.pad(x, pads, mode='CONSTANT')
kernel = _kernel_shape(nd, kernel, x.dims[-1], outdim)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel), **kwargs)
if nd == 1:
out = _atrous1d(x, W, rate, padding=padding)
elif nd == 2:
out = tf.nn.atrous_conv2d(x, W, rate, padding)
else:
raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))
if bias is not None:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
return out
# endregion
# region deconv
def _deconv_outshape(nd, inshape, outdim, kernel, stride, padding, extra_shape=0):
# conv2d case (filter = kernel)
# output = (input + stride - 1)//stride # SAME ? filter?
# output = (input + stride - filter)//stride # VALID
# 위 식 inverse
# output = (input * stride) - stride + 1 + extra
# todo : through check need ??
# => max일경우 (output - 1) * stride + 1 - stride
# output = (input * stride) - stride + filter + extra # VALID
# 단, 0 <= extra < stride
if isinstance(kernel, int):
kernel = [kernel] * nd
if isinstance(stride, int):
stride = [stride] * nd
if extra_shape is None:
extra_shape = 0
if isinstance(extra_shape, int):
extra_shape = [extra_shape] * nd
outshape = [None] * nd
if padding == 'SAME':
for i in range(0, nd):
outshape[i] = inshape[i+1] * stride[i] + extra_shape[0]
elif padding == 'VALID':
# assert -stride[0] < extra_shape[0] < stride[0]
# assert -stride[1] < extra_shape[1] < stride[1]
for i in range(0, nd):
outshape[i] = (inshape[i+1] * stride[i]) - stride[i] + kernel[i] + extra_shape[i]
else:
raise ValueError('unknown padding option {0}'.format(padding))
return [inshape[0]] + outshape + [outdim]
@layer
def deconv(x, outdim, kernel, stride=1, padding='SAME',
initializer=tf.he_uniform, bias=False, extra=None, **kwargs):
nd = x.ndim - 2
out_shape = _deconv_outshape(nd, x.dims, outdim, kernel, stride, padding, extra)
oshape = tf.TensorShape(out_shape)
if out_shape[0] is None:
out_shape[0] = tf.shape(x)[0]
out_shape = tf.stack(out_shape)
kernel_shape = _kernel_shape(nd, kernel, outdim, x.dims[-1]) # swap in and out channel
stride = _stride_shape(nd, stride) # stride
W = tf.get_weight('W', shape=kernel_shape, initializer=initializer(kernel_shape))
if nd == 2:
out = tf.nn.conv2d_transpose(x, W, out_shape, strides=stride, padding=padding)
elif nd == 3:
out = tf.nn.conv3d_transpose(x, W, out_shape, strides=stride, padding=padding)
else:
raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
out.set_shape(oshape)
return out
# endregion
# region depthwise
@layer
def dwconv(x, kernel, multiplier=1, stride=1, pad=0, padding='SAME',
initializer=tf.he_uniform, bias=False, **kwargs):
if pad:
pads = [(0, 0), (pad, pad), (pad, pad), (0, 0)]
x = tf.pad(x, pads, mode='CONSTANT')
kernel = _kernel_shape(2, kernel, x.dims[-1], multiplier)
stride = _stride_shape(2, stride)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel), **kwargs)
out = tf.nn.depthwise_conv2d(x, W, stride, padding)
if bias:
outdim = kernel[2] * multiplier
b = tf.get_bias('b', shape=(outdim,), initializer=tf.zeros_initializer(), **kwargs)
out = tf.nn.bias_add(out, b)
return out
# endregion
# region subpixel
@layer
def subpixel(x, kernel, factor=2, stride=1, pad=0, padding='SAME',
initializer=tf.he_uniform, bias=False, **kwargs):
from .ireshape import channel_to_space
assert x.ndim == 4 # implemented for 4D tensor
indim = x.dims[-1]
outdim = indim * factor * factor
kernel = _kernel_shape(2, kernel, indim, outdim)
stride = _stride_shape(2, stride)
W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel))
out = tf.nn.conv2d(x, W, stride, padding=padding)
if bias:
b = tf.get_bias('b', shape=(outdim,), initializer=tf.zeros_initializer())
out = tf.nn.bias_add(out, b)
# periodic shuffle
out = channel_to_space(out, factor)
return out
# endregion
# region activation
@layer
def leaky(x, slope=0.01, name=None):
"""
leaky_relu
see also pleaky
:param x:
:param slope: 0.01 default
:return:
"""
return tf.maximum(x, x*slope, name=name)
@layer
def pleaky(x):
"""
parametric leakyrelu
:param x:
:return:
"""
alpha = tf.get_bias('alpha', shape=(), initializer=tf.constant_initializer(0.01))
return tf.maximum(x, x * alpha)
# endregion
# region resize images
@layer
def sizeup(x, factor=(2, 2), extras=(0, 0), method=ResizeMethod.NEAREST_NEIGHBOR, align_corners=False):
inshape = x.dims
if isinstance(factor, int):
factor = (factor, factor)
if isinstance(extras, int):
extras = (extras, extras)
hw = [inshape[1] * factor[0] + extras[0], inshape[2] * factor[1] + extras[1]]
return tf.image.resize_images(x, hw, method=method, align_corners=align_corners)
@layer
def sizedown(x, factors=(2, 2), extras=(0, 0), method=ResizeMethod.NEAREST_NEIGHBOR, align_corners=False):
inshape = x.dims
if isinstance(factors, int):
factors = (factors, factors)
if isinstance(extras, int):
extras = (extras, extras)
hw = [inshape[1] // factors[0] + extras[0], inshape[2] // factors[1] + extras[1]]
return tf.image.resize_images(x, hw, method=method, align_corners=align_corners)
# endregion
# region collecting utils
@layer
def keep(t, keepto, collection=None):
"""
append to list and return t as is
:param t: tensor
:param keepto: list
:return:
"""
if collection is not None:
tf.add_to_collection(collection, t)
keepto.append(t)
return t
@layer
def collect(t, collection='activation'):
"""
append to list and return t as is
:param t: tensor
:param collection:
:return:
"""
tf.add_to_collection(collection, t)
return t
# endregion
# region util
@layer
def iname(t, name):
return tf.identity(t, name=name)
# endregion
|
flexible
|
{
"blob_id": "940c3b4a2b96907644c0f12deddd8aba4086a0f0",
"index": 5131,
"step-1": "<mask token>\n\n\ndef _kernel_shape(nd, k, indim, outdim):\n if isinstance(k, int):\n k = [k for _ in range(nd)]\n k = list(k)\n assert len(k) == nd\n k.extend([indim, outdim])\n return k\n\n\n<mask token>\n\n\n@layer\ndef conv(x, outdim, kernel=3, stride=1, pad=0, padding='SAME', mode=\n 'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):\n nd = x.ndim\n if nd == 3:\n return conv1d(x, outdim, kernel, stride=stride, pad=pad, padding=\n padding, mode=mode, initializer=initializer, bias=bias, **kwargs)\n elif nd == 4:\n return conv2d(x, outdim, kernel, stride=stride, pad=pad, padding=\n padding, mode=mode, initializer=initializer, bias=bias, **kwargs)\n elif nd == 5:\n return conv3d(x, outdim, kernel, stride=stride, pad=pad, padding=\n padding, mode=mode, initializer=initializer, bias=bias, **kwargs)\n else:\n raise ValueError('conv for {nd}? nd <= 5'.format(nd=nd))\n\n\n@layer\ndef conv1d(x, outdim, kernel, stride=1, pad=0, padding='SAME', mode=\n 'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):\n kernel = _kernel_shape(1, kernel, x.dims[-1], outdim)\n pads = None\n if padding == 'SAME' and mode != 'CONSTANT':\n half = (kernel[0] - 1) // 2\n pads = [(0, 0), (pad + half, pad + kernel[0] - 1 - half), (0, 0)]\n padding = 'VALID'\n elif pad:\n pads = [(0, 0), (pad, pad), (0, 0)]\n if pads is not None:\n x = tf.pad(x, pads, mode=mode)\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),\n **kwargs)\n out = tf.nn.conv1d(x, W, stride, padding)\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n return out\n\n\n@layer\ndef conv2d(x, outdim, kernel, stride=1, pad=0, padding='SAME', mode=\n 'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):\n kernel = _kernel_shape(2, kernel, x.dims[-1], outdim)\n stride = _stride_shape(2, stride)\n pads = None\n if padding == 'SAME' and mode != 'CONSTANT':\n half = (kernel[0] - 1) // 2, (kernel[1] - 1) // 2\n pads = [(0, 0), (pad + half[0], pad + kernel[0] - 1 - half[0]), (\n pad + half[1], pad + kernel[1] - 1 - half[1]), (0, 0)]\n padding = 'VALID'\n elif pad:\n pads = [(0, 0), (pad, pad), (pad, pad), (0, 0)]\n if pads is not None:\n x = tf.pad(x, pads, mode=mode)\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),\n **kwargs)\n out = tf.nn.conv2d(x, W, stride, padding)\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n return out\n\n\n<mask token>\n\n\n@layer\ndef renorm(x, axis=-1, momentum=0.99, epsilon=0.001, training=None, gamma=\n 1.0, beta=0.0, stddev=0.002, renorm_momentum=0.99, renorm_clipping=None,\n **kwargs):\n \"\"\"\n https://arxiv.org/abs/1702.03275\n https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization\n :param x:\n :param dict renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to scalar Tensors\n used to clip the renorm correction. The correction (r, d) is used as corrected_value = normalized_value * r + d,\n with r clipped to [rmin, rmax], and d to [-dmax, dmax].\n Missing rmax, rmin, dmax are set to inf, 0, inf, respectively.\n :return:\n \"\"\"\n init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)\n init_beta = tf.constant_initializer(beta)\n reuse = tf.get_variable_scope().reuse\n if training is None and (reuse or kwargs.get('reuse', False)):\n training = False\n elif training is None:\n training = x.graph.is_training\n if renorm_clipping is not None:\n if renorm_clipping.get('rmin', None) is None:\n rmax = renorm_clipping.get('rmax', None)\n if rmax is not None and not np.isinf(rmax):\n rmin = 1 / rmax\n renorm_clipping['rmin'] = rmin\n out = tf.layers.batch_normalization(x, axis=axis, momentum=momentum,\n epsilon=epsilon, beta_initializer=init_beta, gamma_initializer=\n init_gamma, training=training, renorm=True, renorm_clipping=\n renorm_clipping, renorm_momentum=renorm_momentum, **kwargs)\n return out\n\n\n<mask token>\n\n\n@layer\ndef lnorm(x, center=True, scale=True, activation_fn=None, reuse=None,\n variables_collections=None, outputs_collections=None, trainable=True,\n begin_norm_axis=1, begin_params_axis=-1, scope=None, **kwargs):\n \"\"\"\n # layer normalization\n :param x:\n :return:\n \"\"\"\n return tf.contrib.layers.layer_norm(x, center=center, scale=scale,\n activation_fn=activation_fn, reuse=reuse, variables_collections=\n variables_collections, outputs_collections=outputs_collections,\n trainable=trainable, begin_norm_axis=begin_norm_axis,\n begin_params_axis=begin_params_axis, scope=None, **kwargs)\n\n\n<mask token>\n\n\n@layer\ndef dropout(x, keep_prob=0.5, is_training=None, noise_shape=None, seed=None):\n if keep_prob == 1.0:\n return x\n\n def _dropout():\n return tf.nn.dropout(x, keep_prob, noise_shape, seed)\n if is_training is None:\n is_training = x.graph.is_training\n else:\n is_training = tf.convert_to_tensor(is_training)\n return tf.cond(is_training, _dropout, lambda : x)\n\n\n@layer\ndef dense(x, outdim, initializer=tf.glorot_uniform, bias=False, name=None):\n \"\"\"\n out = dense( shape=shape, init=None, paramset=None)\n :param x: tensor\n :param bias:\n :param outdim: output_size\n :param initializer:\n :param name:\n :return: layer | output | (output, params)\n \"\"\"\n if x.ndim == 4:\n x = x.flat2d()\n assert x.ndim == 2\n outshape = not isinstance(outdim, int)\n if outshape:\n dim = [-1] + list(outdim)\n outdim = np.prod(outdim)\n shape = [x.dims[-1], outdim]\n W = tf.get_weight('W', shape=shape, initializer=initializer(shape))\n out = x.dot(W)\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer())\n out = tf.nn.bias_add(out, b)\n if outshape:\n out = out.reshape(dim)\n return tf.identity(out, name=name)\n\n\n@layer\ndef bias(x, initializer=tf.zeros_initializer, name=None):\n outdim = x.dims[-1]\n b = tf.get_bias('b', shape=(outdim,), initializer=initializer())\n return tf.nn.bias_add(x, b, name=name)\n\n\ndef _pool_kernel_stide(dim, kernel, stride):\n if isinstance(kernel, int):\n kernel = [kernel] * dim\n if isinstance(stride, int):\n stride = [stride] * dim\n assert len(kernel) == dim and len(stride) == dim\n return [1] + list(kernel) + [1], [1] + list(stride) + [1]\n\n\n@layer\ndef maxpool(x, kernel=2, stride=None, padding='SAME'):\n nd = x.ndim - 2\n stride = kernel if stride is None else stride\n kernel, stride = _pool_kernel_stide(nd, kernel, stride)\n if nd == 2:\n return tf.nn.max_pool(x, kernel, stride, padding)\n elif nd == 3:\n return tf.nn.max_pool3d(x, kernel, stride, padding)\n else:\n raise ValueError('maxpool support {0}? '.format(nd))\n\n\n@layer\ndef maxpool_where(x, kernel, stride=None, pads=None, padding='SAME', keep=None\n ):\n assert stride is None and padding == 'SAME'\n stride = kernel\n pooled = maxpool(x, kernel, stride=stride, padding=padding)\n mask = where_pooled(x, pooled, kernel, pads=pads)\n if keep is None:\n return pooled, mask\n else:\n keep.append(mask)\n return pooled\n\n\n@layer\ndef where_pooled(x, pooled, kernel=None, pads=None):\n \"\"\"\n return mask\n :param x:\n :param pooled:\n :param kernel:\n :param pads:\n :return:\n \"\"\"\n assert x.ndim == 4\n import math\n if kernel is None:\n kernel = [math.ceil(float(d) / float(p)) for d, p in zip(x.dims,\n pooled.zip)]\n repeat = pooled.repeats(kernel, axis=[1, 2])\n elif isinstance(kernel, (tuple, list)):\n repeat = pooled.repeats(kernel, axis=[1, 2])\n else:\n repeat = pooled.repeats([kernel, kernel], axis=[1, 2])\n if pads is not None:\n repeat = repeat.pad(pads, axis=[1, 2])\n dim = x.dims\n sameshaped = repeat[:dim[0], :dim[1], :dim[2], :dim[3]]\n mask = tf.equal(x, sameshaped).to_float()\n return mask\n\n\n<mask token>\n\n\n@layer\ndef unpool_repeat(x, kernel):\n \"\"\" upsample by repeating\"\"\"\n if not isinstance(kernel, (list, tuple)) and isinstance(kernel, int):\n kernel = [kernel] * (x.ndim - 2)\n return x.repeats(kernel, axis=list(range(1, x.ndim - 2)))\n\n\n@layer\ndef avgpool(x, kernel, stride, padding='SAME'):\n nd = x.ndim - 2\n kernel, stride = _pool_kernel_stide(nd, kernel, stride)\n if nd == 2:\n return tf.nn.avg_pool(x, kernel, stride, padding)\n elif nd == 3:\n return tf.nn.avg_pool3d(x, kernel, stride, padding)\n else:\n raise ValueError('avgpool support {0}? '.format(nd))\n\n\n@layer\ndef gpool(x, keepdims=True):\n \"\"\"\n global_avgpool\n :param x:\n :param keepdims:\n :return:\n \"\"\"\n axis = list(range(1, x.ndim - 1))\n return x.mean(axis=axis, keepdims=keepdims)\n\n\ndef _atrous1d(x, kernel, rate, padding='SAME'):\n \"\"\"\n cf https://www.tensorflow.org/versions/r0.11/api_docs/python/nn.html#atrous_conv2d\n :param x: [batch, time, channel]\n :param kernel: [1, 1, inchannel, outchannel]\n :param rate: dialtion rate\n :param padding: 'same' or 'valid'\n :param bias:\n :return:\n \"\"\"\n if rate == 1:\n out = tf.nn.conv1d(x, kernel, stride=(1, 1, 1), padding=padding)\n return out\n if padding == 'SAME':\n filter_width = kernel.dims[0]\n filter_width_up = filter_width + (filter_width - 1) * (rate - 1)\n pad = filter_width_up - 1\n pad_left = pad // 2\n pad_right = pad - pad_left\n elif padding == 'VALID':\n pad_left = 0\n pad_right = 0\n else:\n raise ValueError('Invalid padding')\n in_width = x.dims[1] + pad_left + pad_right\n pad_right_extra = (rate - in_width % rate) % rate\n pads = [(0, 0), (pad_left, pad_right + pad_right_extra), (0, 0)]\n out = x.time_to_batch(rate, pads)\n out = tf.nn.conv1d(out, kernel, stride=(1, 1, 1), padding='VALID')\n crops = [(0, 0), (0, pad_right_extra), (0, 0)]\n out = out.batch_to_time(rate, crops)\n return out\n\n\n@layer\ndef atrous(x, outdim, kernel, rate, pad=0, padding='SAME', initializer=tf.\n he_uniform, bias=None, **kwargs):\n assert isinstance(pad, int)\n nd = x.ndim - 2\n if pad:\n pads = [(0, 0)] + [(pad, pad)] * nd + [(0, 0)]\n x = tf.pad(x, pads, mode='CONSTANT')\n kernel = _kernel_shape(nd, kernel, x.dims[-1], outdim)\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),\n **kwargs)\n if nd == 1:\n out = _atrous1d(x, W, rate, padding=padding)\n elif nd == 2:\n out = tf.nn.atrous_conv2d(x, W, rate, padding)\n else:\n raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))\n if bias is not None:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n return out\n\n\n<mask token>\n\n\n@layer\ndef deconv(x, outdim, kernel, stride=1, padding='SAME', initializer=tf.\n he_uniform, bias=False, extra=None, **kwargs):\n nd = x.ndim - 2\n out_shape = _deconv_outshape(nd, x.dims, outdim, kernel, stride,\n padding, extra)\n oshape = tf.TensorShape(out_shape)\n if out_shape[0] is None:\n out_shape[0] = tf.shape(x)[0]\n out_shape = tf.stack(out_shape)\n kernel_shape = _kernel_shape(nd, kernel, outdim, x.dims[-1])\n stride = _stride_shape(nd, stride)\n W = tf.get_weight('W', shape=kernel_shape, initializer=initializer(\n kernel_shape))\n if nd == 2:\n out = tf.nn.conv2d_transpose(x, W, out_shape, strides=stride,\n padding=padding)\n elif nd == 3:\n out = tf.nn.conv3d_transpose(x, W, out_shape, strides=stride,\n padding=padding)\n else:\n raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n out.set_shape(oshape)\n return out\n\n\n@layer\ndef dwconv(x, kernel, multiplier=1, stride=1, pad=0, padding='SAME',\n initializer=tf.he_uniform, bias=False, **kwargs):\n if pad:\n pads = [(0, 0), (pad, pad), (pad, pad), (0, 0)]\n x = tf.pad(x, pads, mode='CONSTANT')\n kernel = _kernel_shape(2, kernel, x.dims[-1], multiplier)\n stride = _stride_shape(2, stride)\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),\n **kwargs)\n out = tf.nn.depthwise_conv2d(x, W, stride, padding)\n if bias:\n outdim = kernel[2] * multiplier\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n return out\n\n\n<mask token>\n\n\n@layer\ndef keep(t, keepto, collection=None):\n \"\"\"\n append to list and return t as is\n :param t: tensor\n :param keepto: list\n :return:\n \"\"\"\n if collection is not None:\n tf.add_to_collection(collection, t)\n keepto.append(t)\n return t\n\n\n<mask token>\n\n\n@layer\ndef iname(t, name):\n return tf.identity(t, name=name)\n",
"step-2": "<mask token>\n\n\ndef _kernel_shape(nd, k, indim, outdim):\n if isinstance(k, int):\n k = [k for _ in range(nd)]\n k = list(k)\n assert len(k) == nd\n k.extend([indim, outdim])\n return k\n\n\ndef _stride_shape(nd, s):\n \"\"\"\n\n :param nd:\n :param s: int | list | tuple\n :return:\n \"\"\"\n if isinstance(s, int):\n s = [s for _ in range(nd)]\n s = list(s)\n assert len(s) == nd\n s = [1] + s + [1]\n return s\n\n\n@layer\ndef conv(x, outdim, kernel=3, stride=1, pad=0, padding='SAME', mode=\n 'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):\n nd = x.ndim\n if nd == 3:\n return conv1d(x, outdim, kernel, stride=stride, pad=pad, padding=\n padding, mode=mode, initializer=initializer, bias=bias, **kwargs)\n elif nd == 4:\n return conv2d(x, outdim, kernel, stride=stride, pad=pad, padding=\n padding, mode=mode, initializer=initializer, bias=bias, **kwargs)\n elif nd == 5:\n return conv3d(x, outdim, kernel, stride=stride, pad=pad, padding=\n padding, mode=mode, initializer=initializer, bias=bias, **kwargs)\n else:\n raise ValueError('conv for {nd}? nd <= 5'.format(nd=nd))\n\n\n@layer\ndef conv1d(x, outdim, kernel, stride=1, pad=0, padding='SAME', mode=\n 'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):\n kernel = _kernel_shape(1, kernel, x.dims[-1], outdim)\n pads = None\n if padding == 'SAME' and mode != 'CONSTANT':\n half = (kernel[0] - 1) // 2\n pads = [(0, 0), (pad + half, pad + kernel[0] - 1 - half), (0, 0)]\n padding = 'VALID'\n elif pad:\n pads = [(0, 0), (pad, pad), (0, 0)]\n if pads is not None:\n x = tf.pad(x, pads, mode=mode)\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),\n **kwargs)\n out = tf.nn.conv1d(x, W, stride, padding)\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n return out\n\n\n@layer\ndef conv2d(x, outdim, kernel, stride=1, pad=0, padding='SAME', mode=\n 'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):\n kernel = _kernel_shape(2, kernel, x.dims[-1], outdim)\n stride = _stride_shape(2, stride)\n pads = None\n if padding == 'SAME' and mode != 'CONSTANT':\n half = (kernel[0] - 1) // 2, (kernel[1] - 1) // 2\n pads = [(0, 0), (pad + half[0], pad + kernel[0] - 1 - half[0]), (\n pad + half[1], pad + kernel[1] - 1 - half[1]), (0, 0)]\n padding = 'VALID'\n elif pad:\n pads = [(0, 0), (pad, pad), (pad, pad), (0, 0)]\n if pads is not None:\n x = tf.pad(x, pads, mode=mode)\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),\n **kwargs)\n out = tf.nn.conv2d(x, W, stride, padding)\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n return out\n\n\n<mask token>\n\n\n@layer\ndef renorm(x, axis=-1, momentum=0.99, epsilon=0.001, training=None, gamma=\n 1.0, beta=0.0, stddev=0.002, renorm_momentum=0.99, renorm_clipping=None,\n **kwargs):\n \"\"\"\n https://arxiv.org/abs/1702.03275\n https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization\n :param x:\n :param dict renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to scalar Tensors\n used to clip the renorm correction. The correction (r, d) is used as corrected_value = normalized_value * r + d,\n with r clipped to [rmin, rmax], and d to [-dmax, dmax].\n Missing rmax, rmin, dmax are set to inf, 0, inf, respectively.\n :return:\n \"\"\"\n init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)\n init_beta = tf.constant_initializer(beta)\n reuse = tf.get_variable_scope().reuse\n if training is None and (reuse or kwargs.get('reuse', False)):\n training = False\n elif training is None:\n training = x.graph.is_training\n if renorm_clipping is not None:\n if renorm_clipping.get('rmin', None) is None:\n rmax = renorm_clipping.get('rmax', None)\n if rmax is not None and not np.isinf(rmax):\n rmin = 1 / rmax\n renorm_clipping['rmin'] = rmin\n out = tf.layers.batch_normalization(x, axis=axis, momentum=momentum,\n epsilon=epsilon, beta_initializer=init_beta, gamma_initializer=\n init_gamma, training=training, renorm=True, renorm_clipping=\n renorm_clipping, renorm_momentum=renorm_momentum, **kwargs)\n return out\n\n\n<mask token>\n\n\n@layer\ndef cnorm(x, labels, klass=None, stddev=0.01, beta=0.0, gamma=1.0, epsilon=\n 1e-05):\n \"\"\"\n conditional instance normalization (by label index)\n for learning embedding value of beta and gamma\n # https://arxiv.org/pdf/1610.07629.pdf for conditional instance normalization\n :param x:\n :param labels: [B,]\n :param klass: size of embedding var\n :param gamma: initial_gamma\n :param stddev: stddev for gamma random init\n :param beta: initial beta value\n :param epsilon: 1e-5 for var_epsilon\n :return:\n \"\"\"\n assert klass is not None\n init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)\n init_beta = tf.constant_initializer(beta)\n shape = [1] * x.ndim\n shape[0] = klass\n shape[-1] = x.dims[-1]\n beta_v = tf.get_weight(name='beta', shape=shape, initializer=init_beta)\n gamma_v = tf.get_weight(name='gamma', shape=shape, initializer=init_gamma)\n beta_l = tf.nn.embedding_lookup(beta_v, labels)\n gamma_l = tf.nn.embedding_lookup(gamma_v, labels)\n return inorm(x, beta=beta_l, gamma=gamma_l, epsilon=epsilon)\n\n\n@layer\ndef lnorm(x, center=True, scale=True, activation_fn=None, reuse=None,\n variables_collections=None, outputs_collections=None, trainable=True,\n begin_norm_axis=1, begin_params_axis=-1, scope=None, **kwargs):\n \"\"\"\n # layer normalization\n :param x:\n :return:\n \"\"\"\n return tf.contrib.layers.layer_norm(x, center=center, scale=scale,\n activation_fn=activation_fn, reuse=reuse, variables_collections=\n variables_collections, outputs_collections=outputs_collections,\n trainable=trainable, begin_norm_axis=begin_norm_axis,\n begin_params_axis=begin_params_axis, scope=None, **kwargs)\n\n\n@layer\ndef gnorm(x, group):\n \"\"\"\n group normalization\n\n :param x: [N, ...., C]\n :param int group: G,\n :return:\n \"\"\"\n shape = list(x.dims)\n if shape[0] is None:\n shape[0] = -1\n ch = shape[-1]\n shape[-1] = ch // group\n shape.append(group)\n assert ch // group * group == ch\n x = tf.reshape(x, shape)\n x_n = lnorm(x)\n shape = shape[:-1]\n shape[-1] = ch\n x = tf.reshape(x_n, shape)\n return x\n\n\n@layer\ndef dropout(x, keep_prob=0.5, is_training=None, noise_shape=None, seed=None):\n if keep_prob == 1.0:\n return x\n\n def _dropout():\n return tf.nn.dropout(x, keep_prob, noise_shape, seed)\n if is_training is None:\n is_training = x.graph.is_training\n else:\n is_training = tf.convert_to_tensor(is_training)\n return tf.cond(is_training, _dropout, lambda : x)\n\n\n@layer\ndef dense(x, outdim, initializer=tf.glorot_uniform, bias=False, name=None):\n \"\"\"\n out = dense( shape=shape, init=None, paramset=None)\n :param x: tensor\n :param bias:\n :param outdim: output_size\n :param initializer:\n :param name:\n :return: layer | output | (output, params)\n \"\"\"\n if x.ndim == 4:\n x = x.flat2d()\n assert x.ndim == 2\n outshape = not isinstance(outdim, int)\n if outshape:\n dim = [-1] + list(outdim)\n outdim = np.prod(outdim)\n shape = [x.dims[-1], outdim]\n W = tf.get_weight('W', shape=shape, initializer=initializer(shape))\n out = x.dot(W)\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer())\n out = tf.nn.bias_add(out, b)\n if outshape:\n out = out.reshape(dim)\n return tf.identity(out, name=name)\n\n\n@layer\ndef bias(x, initializer=tf.zeros_initializer, name=None):\n outdim = x.dims[-1]\n b = tf.get_bias('b', shape=(outdim,), initializer=initializer())\n return tf.nn.bias_add(x, b, name=name)\n\n\ndef _pool_kernel_stide(dim, kernel, stride):\n if isinstance(kernel, int):\n kernel = [kernel] * dim\n if isinstance(stride, int):\n stride = [stride] * dim\n assert len(kernel) == dim and len(stride) == dim\n return [1] + list(kernel) + [1], [1] + list(stride) + [1]\n\n\n@layer\ndef maxpool(x, kernel=2, stride=None, padding='SAME'):\n nd = x.ndim - 2\n stride = kernel if stride is None else stride\n kernel, stride = _pool_kernel_stide(nd, kernel, stride)\n if nd == 2:\n return tf.nn.max_pool(x, kernel, stride, padding)\n elif nd == 3:\n return tf.nn.max_pool3d(x, kernel, stride, padding)\n else:\n raise ValueError('maxpool support {0}? '.format(nd))\n\n\n@layer\ndef maxpool_where(x, kernel, stride=None, pads=None, padding='SAME', keep=None\n ):\n assert stride is None and padding == 'SAME'\n stride = kernel\n pooled = maxpool(x, kernel, stride=stride, padding=padding)\n mask = where_pooled(x, pooled, kernel, pads=pads)\n if keep is None:\n return pooled, mask\n else:\n keep.append(mask)\n return pooled\n\n\n@layer\ndef where_pooled(x, pooled, kernel=None, pads=None):\n \"\"\"\n return mask\n :param x:\n :param pooled:\n :param kernel:\n :param pads:\n :return:\n \"\"\"\n assert x.ndim == 4\n import math\n if kernel is None:\n kernel = [math.ceil(float(d) / float(p)) for d, p in zip(x.dims,\n pooled.zip)]\n repeat = pooled.repeats(kernel, axis=[1, 2])\n elif isinstance(kernel, (tuple, list)):\n repeat = pooled.repeats(kernel, axis=[1, 2])\n else:\n repeat = pooled.repeats([kernel, kernel], axis=[1, 2])\n if pads is not None:\n repeat = repeat.pad(pads, axis=[1, 2])\n dim = x.dims\n sameshaped = repeat[:dim[0], :dim[1], :dim[2], :dim[3]]\n mask = tf.equal(x, sameshaped).to_float()\n return mask\n\n\n<mask token>\n\n\n@layer\ndef unpool_repeat(x, kernel):\n \"\"\" upsample by repeating\"\"\"\n if not isinstance(kernel, (list, tuple)) and isinstance(kernel, int):\n kernel = [kernel] * (x.ndim - 2)\n return x.repeats(kernel, axis=list(range(1, x.ndim - 2)))\n\n\n@layer\ndef avgpool(x, kernel, stride, padding='SAME'):\n nd = x.ndim - 2\n kernel, stride = _pool_kernel_stide(nd, kernel, stride)\n if nd == 2:\n return tf.nn.avg_pool(x, kernel, stride, padding)\n elif nd == 3:\n return tf.nn.avg_pool3d(x, kernel, stride, padding)\n else:\n raise ValueError('avgpool support {0}? '.format(nd))\n\n\n@layer\ndef gpool(x, keepdims=True):\n \"\"\"\n global_avgpool\n :param x:\n :param keepdims:\n :return:\n \"\"\"\n axis = list(range(1, x.ndim - 1))\n return x.mean(axis=axis, keepdims=keepdims)\n\n\ndef _atrous1d(x, kernel, rate, padding='SAME'):\n \"\"\"\n cf https://www.tensorflow.org/versions/r0.11/api_docs/python/nn.html#atrous_conv2d\n :param x: [batch, time, channel]\n :param kernel: [1, 1, inchannel, outchannel]\n :param rate: dialtion rate\n :param padding: 'same' or 'valid'\n :param bias:\n :return:\n \"\"\"\n if rate == 1:\n out = tf.nn.conv1d(x, kernel, stride=(1, 1, 1), padding=padding)\n return out\n if padding == 'SAME':\n filter_width = kernel.dims[0]\n filter_width_up = filter_width + (filter_width - 1) * (rate - 1)\n pad = filter_width_up - 1\n pad_left = pad // 2\n pad_right = pad - pad_left\n elif padding == 'VALID':\n pad_left = 0\n pad_right = 0\n else:\n raise ValueError('Invalid padding')\n in_width = x.dims[1] + pad_left + pad_right\n pad_right_extra = (rate - in_width % rate) % rate\n pads = [(0, 0), (pad_left, pad_right + pad_right_extra), (0, 0)]\n out = x.time_to_batch(rate, pads)\n out = tf.nn.conv1d(out, kernel, stride=(1, 1, 1), padding='VALID')\n crops = [(0, 0), (0, pad_right_extra), (0, 0)]\n out = out.batch_to_time(rate, crops)\n return out\n\n\n@layer\ndef atrous(x, outdim, kernel, rate, pad=0, padding='SAME', initializer=tf.\n he_uniform, bias=None, **kwargs):\n assert isinstance(pad, int)\n nd = x.ndim - 2\n if pad:\n pads = [(0, 0)] + [(pad, pad)] * nd + [(0, 0)]\n x = tf.pad(x, pads, mode='CONSTANT')\n kernel = _kernel_shape(nd, kernel, x.dims[-1], outdim)\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),\n **kwargs)\n if nd == 1:\n out = _atrous1d(x, W, rate, padding=padding)\n elif nd == 2:\n out = tf.nn.atrous_conv2d(x, W, rate, padding)\n else:\n raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))\n if bias is not None:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n return out\n\n\n<mask token>\n\n\n@layer\ndef deconv(x, outdim, kernel, stride=1, padding='SAME', initializer=tf.\n he_uniform, bias=False, extra=None, **kwargs):\n nd = x.ndim - 2\n out_shape = _deconv_outshape(nd, x.dims, outdim, kernel, stride,\n padding, extra)\n oshape = tf.TensorShape(out_shape)\n if out_shape[0] is None:\n out_shape[0] = tf.shape(x)[0]\n out_shape = tf.stack(out_shape)\n kernel_shape = _kernel_shape(nd, kernel, outdim, x.dims[-1])\n stride = _stride_shape(nd, stride)\n W = tf.get_weight('W', shape=kernel_shape, initializer=initializer(\n kernel_shape))\n if nd == 2:\n out = tf.nn.conv2d_transpose(x, W, out_shape, strides=stride,\n padding=padding)\n elif nd == 3:\n out = tf.nn.conv3d_transpose(x, W, out_shape, strides=stride,\n padding=padding)\n else:\n raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n out.set_shape(oshape)\n return out\n\n\n@layer\ndef dwconv(x, kernel, multiplier=1, stride=1, pad=0, padding='SAME',\n initializer=tf.he_uniform, bias=False, **kwargs):\n if pad:\n pads = [(0, 0), (pad, pad), (pad, pad), (0, 0)]\n x = tf.pad(x, pads, mode='CONSTANT')\n kernel = _kernel_shape(2, kernel, x.dims[-1], multiplier)\n stride = _stride_shape(2, stride)\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),\n **kwargs)\n out = tf.nn.depthwise_conv2d(x, W, stride, padding)\n if bias:\n outdim = kernel[2] * multiplier\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n return out\n\n\n<mask token>\n\n\n@layer\ndef sizedown(x, factors=(2, 2), extras=(0, 0), method=ResizeMethod.\n NEAREST_NEIGHBOR, align_corners=False):\n inshape = x.dims\n if isinstance(factors, int):\n factors = factors, factors\n if isinstance(extras, int):\n extras = extras, extras\n hw = [inshape[1] // factors[0] + extras[0], inshape[2] // factors[1] +\n extras[1]]\n return tf.image.resize_images(x, hw, method=method, align_corners=\n align_corners)\n\n\n@layer\ndef keep(t, keepto, collection=None):\n \"\"\"\n append to list and return t as is\n :param t: tensor\n :param keepto: list\n :return:\n \"\"\"\n if collection is not None:\n tf.add_to_collection(collection, t)\n keepto.append(t)\n return t\n\n\n<mask token>\n\n\n@layer\ndef iname(t, name):\n return tf.identity(t, name=name)\n",
"step-3": "<mask token>\n\n\ndef _kernel_shape(nd, k, indim, outdim):\n if isinstance(k, int):\n k = [k for _ in range(nd)]\n k = list(k)\n assert len(k) == nd\n k.extend([indim, outdim])\n return k\n\n\ndef _stride_shape(nd, s):\n \"\"\"\n\n :param nd:\n :param s: int | list | tuple\n :return:\n \"\"\"\n if isinstance(s, int):\n s = [s for _ in range(nd)]\n s = list(s)\n assert len(s) == nd\n s = [1] + s + [1]\n return s\n\n\n@layer\ndef conv(x, outdim, kernel=3, stride=1, pad=0, padding='SAME', mode=\n 'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):\n nd = x.ndim\n if nd == 3:\n return conv1d(x, outdim, kernel, stride=stride, pad=pad, padding=\n padding, mode=mode, initializer=initializer, bias=bias, **kwargs)\n elif nd == 4:\n return conv2d(x, outdim, kernel, stride=stride, pad=pad, padding=\n padding, mode=mode, initializer=initializer, bias=bias, **kwargs)\n elif nd == 5:\n return conv3d(x, outdim, kernel, stride=stride, pad=pad, padding=\n padding, mode=mode, initializer=initializer, bias=bias, **kwargs)\n else:\n raise ValueError('conv for {nd}? nd <= 5'.format(nd=nd))\n\n\n@layer\ndef conv1d(x, outdim, kernel, stride=1, pad=0, padding='SAME', mode=\n 'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):\n kernel = _kernel_shape(1, kernel, x.dims[-1], outdim)\n pads = None\n if padding == 'SAME' and mode != 'CONSTANT':\n half = (kernel[0] - 1) // 2\n pads = [(0, 0), (pad + half, pad + kernel[0] - 1 - half), (0, 0)]\n padding = 'VALID'\n elif pad:\n pads = [(0, 0), (pad, pad), (0, 0)]\n if pads is not None:\n x = tf.pad(x, pads, mode=mode)\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),\n **kwargs)\n out = tf.nn.conv1d(x, W, stride, padding)\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n return out\n\n\n@layer\ndef conv2d(x, outdim, kernel, stride=1, pad=0, padding='SAME', mode=\n 'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):\n kernel = _kernel_shape(2, kernel, x.dims[-1], outdim)\n stride = _stride_shape(2, stride)\n pads = None\n if padding == 'SAME' and mode != 'CONSTANT':\n half = (kernel[0] - 1) // 2, (kernel[1] - 1) // 2\n pads = [(0, 0), (pad + half[0], pad + kernel[0] - 1 - half[0]), (\n pad + half[1], pad + kernel[1] - 1 - half[1]), (0, 0)]\n padding = 'VALID'\n elif pad:\n pads = [(0, 0), (pad, pad), (pad, pad), (0, 0)]\n if pads is not None:\n x = tf.pad(x, pads, mode=mode)\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),\n **kwargs)\n out = tf.nn.conv2d(x, W, stride, padding)\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n return out\n\n\n<mask token>\n\n\n@layer\ndef renorm(x, axis=-1, momentum=0.99, epsilon=0.001, training=None, gamma=\n 1.0, beta=0.0, stddev=0.002, renorm_momentum=0.99, renorm_clipping=None,\n **kwargs):\n \"\"\"\n https://arxiv.org/abs/1702.03275\n https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization\n :param x:\n :param dict renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to scalar Tensors\n used to clip the renorm correction. The correction (r, d) is used as corrected_value = normalized_value * r + d,\n with r clipped to [rmin, rmax], and d to [-dmax, dmax].\n Missing rmax, rmin, dmax are set to inf, 0, inf, respectively.\n :return:\n \"\"\"\n init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)\n init_beta = tf.constant_initializer(beta)\n reuse = tf.get_variable_scope().reuse\n if training is None and (reuse or kwargs.get('reuse', False)):\n training = False\n elif training is None:\n training = x.graph.is_training\n if renorm_clipping is not None:\n if renorm_clipping.get('rmin', None) is None:\n rmax = renorm_clipping.get('rmax', None)\n if rmax is not None and not np.isinf(rmax):\n rmin = 1 / rmax\n renorm_clipping['rmin'] = rmin\n out = tf.layers.batch_normalization(x, axis=axis, momentum=momentum,\n epsilon=epsilon, beta_initializer=init_beta, gamma_initializer=\n init_gamma, training=training, renorm=True, renorm_clipping=\n renorm_clipping, renorm_momentum=renorm_momentum, **kwargs)\n return out\n\n\n@layer\ndef inorm(x, beta=0.0, gamma=1.0, stddev=0.002, epsilon=1e-05, axis=None,\n trainable=True, **kwargs):\n \"\"\"\n instance normalization normalization for (W,H)\n same output not regard to trainmode\n # https://arxiv.org/pdf/1607.08022.pdf for instance normalization\n # z = gamma * (x-m)/s + beta\n # note gamma, beta\n :param x: [BHWC] is common case\n :param gamma:\n :param beta:\n :param epsilon:\n :return:\n \"\"\"\n axes = list(range(1, 1 + x.ndim - 2))\n m, v = tf.nn.moments(x, axes=axes, keep_dims=True)\n shapelast = x.dims[-1:]\n if trainable:\n init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)\n init_beta = tf.constant_initializer(beta)\n gamma_t = tf.get_weight(name='gamma', shape=shapelast, initializer=\n init_gamma)\n beta_t = tf.get_bias(name='beta', shape=shapelast, initializer=\n init_beta)\n else:\n gamma_t = gamma\n beta_t = beta\n out = tf.nn.batch_normalization(x, m, v, offset=beta_t, scale=gamma_t,\n variance_epsilon=epsilon)\n return out\n\n\n@layer\ndef cnorm(x, labels, klass=None, stddev=0.01, beta=0.0, gamma=1.0, epsilon=\n 1e-05):\n \"\"\"\n conditional instance normalization (by label index)\n for learning embedding value of beta and gamma\n # https://arxiv.org/pdf/1610.07629.pdf for conditional instance normalization\n :param x:\n :param labels: [B,]\n :param klass: size of embedding var\n :param gamma: initial_gamma\n :param stddev: stddev for gamma random init\n :param beta: initial beta value\n :param epsilon: 1e-5 for var_epsilon\n :return:\n \"\"\"\n assert klass is not None\n init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)\n init_beta = tf.constant_initializer(beta)\n shape = [1] * x.ndim\n shape[0] = klass\n shape[-1] = x.dims[-1]\n beta_v = tf.get_weight(name='beta', shape=shape, initializer=init_beta)\n gamma_v = tf.get_weight(name='gamma', shape=shape, initializer=init_gamma)\n beta_l = tf.nn.embedding_lookup(beta_v, labels)\n gamma_l = tf.nn.embedding_lookup(gamma_v, labels)\n return inorm(x, beta=beta_l, gamma=gamma_l, epsilon=epsilon)\n\n\n@layer\ndef lnorm(x, center=True, scale=True, activation_fn=None, reuse=None,\n variables_collections=None, outputs_collections=None, trainable=True,\n begin_norm_axis=1, begin_params_axis=-1, scope=None, **kwargs):\n \"\"\"\n # layer normalization\n :param x:\n :return:\n \"\"\"\n return tf.contrib.layers.layer_norm(x, center=center, scale=scale,\n activation_fn=activation_fn, reuse=reuse, variables_collections=\n variables_collections, outputs_collections=outputs_collections,\n trainable=trainable, begin_norm_axis=begin_norm_axis,\n begin_params_axis=begin_params_axis, scope=None, **kwargs)\n\n\n@layer\ndef gnorm(x, group):\n \"\"\"\n group normalization\n\n :param x: [N, ...., C]\n :param int group: G,\n :return:\n \"\"\"\n shape = list(x.dims)\n if shape[0] is None:\n shape[0] = -1\n ch = shape[-1]\n shape[-1] = ch // group\n shape.append(group)\n assert ch // group * group == ch\n x = tf.reshape(x, shape)\n x_n = lnorm(x)\n shape = shape[:-1]\n shape[-1] = ch\n x = tf.reshape(x_n, shape)\n return x\n\n\n@layer\ndef dropout(x, keep_prob=0.5, is_training=None, noise_shape=None, seed=None):\n if keep_prob == 1.0:\n return x\n\n def _dropout():\n return tf.nn.dropout(x, keep_prob, noise_shape, seed)\n if is_training is None:\n is_training = x.graph.is_training\n else:\n is_training = tf.convert_to_tensor(is_training)\n return tf.cond(is_training, _dropout, lambda : x)\n\n\n@layer\ndef dense(x, outdim, initializer=tf.glorot_uniform, bias=False, name=None):\n \"\"\"\n out = dense( shape=shape, init=None, paramset=None)\n :param x: tensor\n :param bias:\n :param outdim: output_size\n :param initializer:\n :param name:\n :return: layer | output | (output, params)\n \"\"\"\n if x.ndim == 4:\n x = x.flat2d()\n assert x.ndim == 2\n outshape = not isinstance(outdim, int)\n if outshape:\n dim = [-1] + list(outdim)\n outdim = np.prod(outdim)\n shape = [x.dims[-1], outdim]\n W = tf.get_weight('W', shape=shape, initializer=initializer(shape))\n out = x.dot(W)\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer())\n out = tf.nn.bias_add(out, b)\n if outshape:\n out = out.reshape(dim)\n return tf.identity(out, name=name)\n\n\n@layer\ndef bias(x, initializer=tf.zeros_initializer, name=None):\n outdim = x.dims[-1]\n b = tf.get_bias('b', shape=(outdim,), initializer=initializer())\n return tf.nn.bias_add(x, b, name=name)\n\n\ndef _pool_kernel_stide(dim, kernel, stride):\n if isinstance(kernel, int):\n kernel = [kernel] * dim\n if isinstance(stride, int):\n stride = [stride] * dim\n assert len(kernel) == dim and len(stride) == dim\n return [1] + list(kernel) + [1], [1] + list(stride) + [1]\n\n\n@layer\ndef maxpool(x, kernel=2, stride=None, padding='SAME'):\n nd = x.ndim - 2\n stride = kernel if stride is None else stride\n kernel, stride = _pool_kernel_stide(nd, kernel, stride)\n if nd == 2:\n return tf.nn.max_pool(x, kernel, stride, padding)\n elif nd == 3:\n return tf.nn.max_pool3d(x, kernel, stride, padding)\n else:\n raise ValueError('maxpool support {0}? '.format(nd))\n\n\n@layer\ndef maxpool_where(x, kernel, stride=None, pads=None, padding='SAME', keep=None\n ):\n assert stride is None and padding == 'SAME'\n stride = kernel\n pooled = maxpool(x, kernel, stride=stride, padding=padding)\n mask = where_pooled(x, pooled, kernel, pads=pads)\n if keep is None:\n return pooled, mask\n else:\n keep.append(mask)\n return pooled\n\n\n@layer\ndef where_pooled(x, pooled, kernel=None, pads=None):\n \"\"\"\n return mask\n :param x:\n :param pooled:\n :param kernel:\n :param pads:\n :return:\n \"\"\"\n assert x.ndim == 4\n import math\n if kernel is None:\n kernel = [math.ceil(float(d) / float(p)) for d, p in zip(x.dims,\n pooled.zip)]\n repeat = pooled.repeats(kernel, axis=[1, 2])\n elif isinstance(kernel, (tuple, list)):\n repeat = pooled.repeats(kernel, axis=[1, 2])\n else:\n repeat = pooled.repeats([kernel, kernel], axis=[1, 2])\n if pads is not None:\n repeat = repeat.pad(pads, axis=[1, 2])\n dim = x.dims\n sameshaped = repeat[:dim[0], :dim[1], :dim[2], :dim[3]]\n mask = tf.equal(x, sameshaped).to_float()\n return mask\n\n\n<mask token>\n\n\n@layer\ndef unpool_repeat(x, kernel):\n \"\"\" upsample by repeating\"\"\"\n if not isinstance(kernel, (list, tuple)) and isinstance(kernel, int):\n kernel = [kernel] * (x.ndim - 2)\n return x.repeats(kernel, axis=list(range(1, x.ndim - 2)))\n\n\n@layer\ndef avgpool(x, kernel, stride, padding='SAME'):\n nd = x.ndim - 2\n kernel, stride = _pool_kernel_stide(nd, kernel, stride)\n if nd == 2:\n return tf.nn.avg_pool(x, kernel, stride, padding)\n elif nd == 3:\n return tf.nn.avg_pool3d(x, kernel, stride, padding)\n else:\n raise ValueError('avgpool support {0}? '.format(nd))\n\n\n@layer\ndef gpool(x, keepdims=True):\n \"\"\"\n global_avgpool\n :param x:\n :param keepdims:\n :return:\n \"\"\"\n axis = list(range(1, x.ndim - 1))\n return x.mean(axis=axis, keepdims=keepdims)\n\n\ndef _atrous1d(x, kernel, rate, padding='SAME'):\n \"\"\"\n cf https://www.tensorflow.org/versions/r0.11/api_docs/python/nn.html#atrous_conv2d\n :param x: [batch, time, channel]\n :param kernel: [1, 1, inchannel, outchannel]\n :param rate: dialtion rate\n :param padding: 'same' or 'valid'\n :param bias:\n :return:\n \"\"\"\n if rate == 1:\n out = tf.nn.conv1d(x, kernel, stride=(1, 1, 1), padding=padding)\n return out\n if padding == 'SAME':\n filter_width = kernel.dims[0]\n filter_width_up = filter_width + (filter_width - 1) * (rate - 1)\n pad = filter_width_up - 1\n pad_left = pad // 2\n pad_right = pad - pad_left\n elif padding == 'VALID':\n pad_left = 0\n pad_right = 0\n else:\n raise ValueError('Invalid padding')\n in_width = x.dims[1] + pad_left + pad_right\n pad_right_extra = (rate - in_width % rate) % rate\n pads = [(0, 0), (pad_left, pad_right + pad_right_extra), (0, 0)]\n out = x.time_to_batch(rate, pads)\n out = tf.nn.conv1d(out, kernel, stride=(1, 1, 1), padding='VALID')\n crops = [(0, 0), (0, pad_right_extra), (0, 0)]\n out = out.batch_to_time(rate, crops)\n return out\n\n\n@layer\ndef atrous(x, outdim, kernel, rate, pad=0, padding='SAME', initializer=tf.\n he_uniform, bias=None, **kwargs):\n assert isinstance(pad, int)\n nd = x.ndim - 2\n if pad:\n pads = [(0, 0)] + [(pad, pad)] * nd + [(0, 0)]\n x = tf.pad(x, pads, mode='CONSTANT')\n kernel = _kernel_shape(nd, kernel, x.dims[-1], outdim)\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),\n **kwargs)\n if nd == 1:\n out = _atrous1d(x, W, rate, padding=padding)\n elif nd == 2:\n out = tf.nn.atrous_conv2d(x, W, rate, padding)\n else:\n raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))\n if bias is not None:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n return out\n\n\n<mask token>\n\n\n@layer\ndef deconv(x, outdim, kernel, stride=1, padding='SAME', initializer=tf.\n he_uniform, bias=False, extra=None, **kwargs):\n nd = x.ndim - 2\n out_shape = _deconv_outshape(nd, x.dims, outdim, kernel, stride,\n padding, extra)\n oshape = tf.TensorShape(out_shape)\n if out_shape[0] is None:\n out_shape[0] = tf.shape(x)[0]\n out_shape = tf.stack(out_shape)\n kernel_shape = _kernel_shape(nd, kernel, outdim, x.dims[-1])\n stride = _stride_shape(nd, stride)\n W = tf.get_weight('W', shape=kernel_shape, initializer=initializer(\n kernel_shape))\n if nd == 2:\n out = tf.nn.conv2d_transpose(x, W, out_shape, strides=stride,\n padding=padding)\n elif nd == 3:\n out = tf.nn.conv3d_transpose(x, W, out_shape, strides=stride,\n padding=padding)\n else:\n raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n out.set_shape(oshape)\n return out\n\n\n@layer\ndef dwconv(x, kernel, multiplier=1, stride=1, pad=0, padding='SAME',\n initializer=tf.he_uniform, bias=False, **kwargs):\n if pad:\n pads = [(0, 0), (pad, pad), (pad, pad), (0, 0)]\n x = tf.pad(x, pads, mode='CONSTANT')\n kernel = _kernel_shape(2, kernel, x.dims[-1], multiplier)\n stride = _stride_shape(2, stride)\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),\n **kwargs)\n out = tf.nn.depthwise_conv2d(x, W, stride, padding)\n if bias:\n outdim = kernel[2] * multiplier\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n return out\n\n\n@layer\ndef subpixel(x, kernel, factor=2, stride=1, pad=0, padding='SAME',\n initializer=tf.he_uniform, bias=False, **kwargs):\n from .ireshape import channel_to_space\n assert x.ndim == 4\n indim = x.dims[-1]\n outdim = indim * factor * factor\n kernel = _kernel_shape(2, kernel, indim, outdim)\n stride = _stride_shape(2, stride)\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel))\n out = tf.nn.conv2d(x, W, stride, padding=padding)\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer())\n out = tf.nn.bias_add(out, b)\n out = channel_to_space(out, factor)\n return out\n\n\n@layer\ndef leaky(x, slope=0.01, name=None):\n \"\"\"\n leaky_relu\n see also pleaky\n :param x:\n :param slope: 0.01 default\n :return:\n \"\"\"\n return tf.maximum(x, x * slope, name=name)\n\n\n<mask token>\n\n\n@layer\ndef sizedown(x, factors=(2, 2), extras=(0, 0), method=ResizeMethod.\n NEAREST_NEIGHBOR, align_corners=False):\n inshape = x.dims\n if isinstance(factors, int):\n factors = factors, factors\n if isinstance(extras, int):\n extras = extras, extras\n hw = [inshape[1] // factors[0] + extras[0], inshape[2] // factors[1] +\n extras[1]]\n return tf.image.resize_images(x, hw, method=method, align_corners=\n align_corners)\n\n\n@layer\ndef keep(t, keepto, collection=None):\n \"\"\"\n append to list and return t as is\n :param t: tensor\n :param keepto: list\n :return:\n \"\"\"\n if collection is not None:\n tf.add_to_collection(collection, t)\n keepto.append(t)\n return t\n\n\n<mask token>\n\n\n@layer\ndef iname(t, name):\n return tf.identity(t, name=name)\n",
"step-4": "<mask token>\n\n\ndef _kernel_shape(nd, k, indim, outdim):\n if isinstance(k, int):\n k = [k for _ in range(nd)]\n k = list(k)\n assert len(k) == nd\n k.extend([indim, outdim])\n return k\n\n\ndef _stride_shape(nd, s):\n \"\"\"\n\n :param nd:\n :param s: int | list | tuple\n :return:\n \"\"\"\n if isinstance(s, int):\n s = [s for _ in range(nd)]\n s = list(s)\n assert len(s) == nd\n s = [1] + s + [1]\n return s\n\n\n@layer\ndef conv(x, outdim, kernel=3, stride=1, pad=0, padding='SAME', mode=\n 'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):\n nd = x.ndim\n if nd == 3:\n return conv1d(x, outdim, kernel, stride=stride, pad=pad, padding=\n padding, mode=mode, initializer=initializer, bias=bias, **kwargs)\n elif nd == 4:\n return conv2d(x, outdim, kernel, stride=stride, pad=pad, padding=\n padding, mode=mode, initializer=initializer, bias=bias, **kwargs)\n elif nd == 5:\n return conv3d(x, outdim, kernel, stride=stride, pad=pad, padding=\n padding, mode=mode, initializer=initializer, bias=bias, **kwargs)\n else:\n raise ValueError('conv for {nd}? nd <= 5'.format(nd=nd))\n\n\n@layer\ndef conv1d(x, outdim, kernel, stride=1, pad=0, padding='SAME', mode=\n 'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):\n kernel = _kernel_shape(1, kernel, x.dims[-1], outdim)\n pads = None\n if padding == 'SAME' and mode != 'CONSTANT':\n half = (kernel[0] - 1) // 2\n pads = [(0, 0), (pad + half, pad + kernel[0] - 1 - half), (0, 0)]\n padding = 'VALID'\n elif pad:\n pads = [(0, 0), (pad, pad), (0, 0)]\n if pads is not None:\n x = tf.pad(x, pads, mode=mode)\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),\n **kwargs)\n out = tf.nn.conv1d(x, W, stride, padding)\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n return out\n\n\n@layer\ndef conv2d(x, outdim, kernel, stride=1, pad=0, padding='SAME', mode=\n 'CONSTANT', initializer=tf.he_uniform, bias=False, **kwargs):\n kernel = _kernel_shape(2, kernel, x.dims[-1], outdim)\n stride = _stride_shape(2, stride)\n pads = None\n if padding == 'SAME' and mode != 'CONSTANT':\n half = (kernel[0] - 1) // 2, (kernel[1] - 1) // 2\n pads = [(0, 0), (pad + half[0], pad + kernel[0] - 1 - half[0]), (\n pad + half[1], pad + kernel[1] - 1 - half[1]), (0, 0)]\n padding = 'VALID'\n elif pad:\n pads = [(0, 0), (pad, pad), (pad, pad), (0, 0)]\n if pads is not None:\n x = tf.pad(x, pads, mode=mode)\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),\n **kwargs)\n out = tf.nn.conv2d(x, W, stride, padding)\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n return out\n\n\n<mask token>\n\n\n@layer\ndef bn(x, stddev=0.002, beta=0.0, gamma=1.0, epsilon=1e-05, momentum=0.99,\n axis=-1, training=None, **kwargs):\n if kwargs.pop('scale', True):\n init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)\n else:\n init_gamma = None\n if kwargs.pop('center', True):\n init_beta = tf.constant_initializer(beta)\n else:\n init_beta = None\n reuse = tf.get_variable_scope().reuse\n if training is None and (reuse or kwargs.get('reuse', False)):\n training = False\n elif training is None:\n training = x.graph.is_training\n out = tf.layers.batch_normalization(x, axis=axis, momentum=momentum,\n epsilon=epsilon, beta_initializer=init_beta, gamma_initializer=\n init_gamma, moving_mean_initializer=tf.zeros_initializer(),\n moving_variance_initializer=tf.ones_initializer(), training=\n training, **kwargs)\n return out\n\n\n@layer\ndef renorm(x, axis=-1, momentum=0.99, epsilon=0.001, training=None, gamma=\n 1.0, beta=0.0, stddev=0.002, renorm_momentum=0.99, renorm_clipping=None,\n **kwargs):\n \"\"\"\n https://arxiv.org/abs/1702.03275\n https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization\n :param x:\n :param dict renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to scalar Tensors\n used to clip the renorm correction. The correction (r, d) is used as corrected_value = normalized_value * r + d,\n with r clipped to [rmin, rmax], and d to [-dmax, dmax].\n Missing rmax, rmin, dmax are set to inf, 0, inf, respectively.\n :return:\n \"\"\"\n init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)\n init_beta = tf.constant_initializer(beta)\n reuse = tf.get_variable_scope().reuse\n if training is None and (reuse or kwargs.get('reuse', False)):\n training = False\n elif training is None:\n training = x.graph.is_training\n if renorm_clipping is not None:\n if renorm_clipping.get('rmin', None) is None:\n rmax = renorm_clipping.get('rmax', None)\n if rmax is not None and not np.isinf(rmax):\n rmin = 1 / rmax\n renorm_clipping['rmin'] = rmin\n out = tf.layers.batch_normalization(x, axis=axis, momentum=momentum,\n epsilon=epsilon, beta_initializer=init_beta, gamma_initializer=\n init_gamma, training=training, renorm=True, renorm_clipping=\n renorm_clipping, renorm_momentum=renorm_momentum, **kwargs)\n return out\n\n\n@layer\ndef inorm(x, beta=0.0, gamma=1.0, stddev=0.002, epsilon=1e-05, axis=None,\n trainable=True, **kwargs):\n \"\"\"\n instance normalization normalization for (W,H)\n same output not regard to trainmode\n # https://arxiv.org/pdf/1607.08022.pdf for instance normalization\n # z = gamma * (x-m)/s + beta\n # note gamma, beta\n :param x: [BHWC] is common case\n :param gamma:\n :param beta:\n :param epsilon:\n :return:\n \"\"\"\n axes = list(range(1, 1 + x.ndim - 2))\n m, v = tf.nn.moments(x, axes=axes, keep_dims=True)\n shapelast = x.dims[-1:]\n if trainable:\n init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)\n init_beta = tf.constant_initializer(beta)\n gamma_t = tf.get_weight(name='gamma', shape=shapelast, initializer=\n init_gamma)\n beta_t = tf.get_bias(name='beta', shape=shapelast, initializer=\n init_beta)\n else:\n gamma_t = gamma\n beta_t = beta\n out = tf.nn.batch_normalization(x, m, v, offset=beta_t, scale=gamma_t,\n variance_epsilon=epsilon)\n return out\n\n\n@layer\ndef cnorm(x, labels, klass=None, stddev=0.01, beta=0.0, gamma=1.0, epsilon=\n 1e-05):\n \"\"\"\n conditional instance normalization (by label index)\n for learning embedding value of beta and gamma\n # https://arxiv.org/pdf/1610.07629.pdf for conditional instance normalization\n :param x:\n :param labels: [B,]\n :param klass: size of embedding var\n :param gamma: initial_gamma\n :param stddev: stddev for gamma random init\n :param beta: initial beta value\n :param epsilon: 1e-5 for var_epsilon\n :return:\n \"\"\"\n assert klass is not None\n init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)\n init_beta = tf.constant_initializer(beta)\n shape = [1] * x.ndim\n shape[0] = klass\n shape[-1] = x.dims[-1]\n beta_v = tf.get_weight(name='beta', shape=shape, initializer=init_beta)\n gamma_v = tf.get_weight(name='gamma', shape=shape, initializer=init_gamma)\n beta_l = tf.nn.embedding_lookup(beta_v, labels)\n gamma_l = tf.nn.embedding_lookup(gamma_v, labels)\n return inorm(x, beta=beta_l, gamma=gamma_l, epsilon=epsilon)\n\n\n@layer\ndef lnorm(x, center=True, scale=True, activation_fn=None, reuse=None,\n variables_collections=None, outputs_collections=None, trainable=True,\n begin_norm_axis=1, begin_params_axis=-1, scope=None, **kwargs):\n \"\"\"\n # layer normalization\n :param x:\n :return:\n \"\"\"\n return tf.contrib.layers.layer_norm(x, center=center, scale=scale,\n activation_fn=activation_fn, reuse=reuse, variables_collections=\n variables_collections, outputs_collections=outputs_collections,\n trainable=trainable, begin_norm_axis=begin_norm_axis,\n begin_params_axis=begin_params_axis, scope=None, **kwargs)\n\n\n@layer\ndef gnorm(x, group):\n \"\"\"\n group normalization\n\n :param x: [N, ...., C]\n :param int group: G,\n :return:\n \"\"\"\n shape = list(x.dims)\n if shape[0] is None:\n shape[0] = -1\n ch = shape[-1]\n shape[-1] = ch // group\n shape.append(group)\n assert ch // group * group == ch\n x = tf.reshape(x, shape)\n x_n = lnorm(x)\n shape = shape[:-1]\n shape[-1] = ch\n x = tf.reshape(x_n, shape)\n return x\n\n\n@layer\ndef dropout(x, keep_prob=0.5, is_training=None, noise_shape=None, seed=None):\n if keep_prob == 1.0:\n return x\n\n def _dropout():\n return tf.nn.dropout(x, keep_prob, noise_shape, seed)\n if is_training is None:\n is_training = x.graph.is_training\n else:\n is_training = tf.convert_to_tensor(is_training)\n return tf.cond(is_training, _dropout, lambda : x)\n\n\n@layer\ndef dense(x, outdim, initializer=tf.glorot_uniform, bias=False, name=None):\n \"\"\"\n out = dense( shape=shape, init=None, paramset=None)\n :param x: tensor\n :param bias:\n :param outdim: output_size\n :param initializer:\n :param name:\n :return: layer | output | (output, params)\n \"\"\"\n if x.ndim == 4:\n x = x.flat2d()\n assert x.ndim == 2\n outshape = not isinstance(outdim, int)\n if outshape:\n dim = [-1] + list(outdim)\n outdim = np.prod(outdim)\n shape = [x.dims[-1], outdim]\n W = tf.get_weight('W', shape=shape, initializer=initializer(shape))\n out = x.dot(W)\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer())\n out = tf.nn.bias_add(out, b)\n if outshape:\n out = out.reshape(dim)\n return tf.identity(out, name=name)\n\n\n@layer\ndef bias(x, initializer=tf.zeros_initializer, name=None):\n outdim = x.dims[-1]\n b = tf.get_bias('b', shape=(outdim,), initializer=initializer())\n return tf.nn.bias_add(x, b, name=name)\n\n\ndef _pool_kernel_stide(dim, kernel, stride):\n if isinstance(kernel, int):\n kernel = [kernel] * dim\n if isinstance(stride, int):\n stride = [stride] * dim\n assert len(kernel) == dim and len(stride) == dim\n return [1] + list(kernel) + [1], [1] + list(stride) + [1]\n\n\n@layer\ndef maxpool(x, kernel=2, stride=None, padding='SAME'):\n nd = x.ndim - 2\n stride = kernel if stride is None else stride\n kernel, stride = _pool_kernel_stide(nd, kernel, stride)\n if nd == 2:\n return tf.nn.max_pool(x, kernel, stride, padding)\n elif nd == 3:\n return tf.nn.max_pool3d(x, kernel, stride, padding)\n else:\n raise ValueError('maxpool support {0}? '.format(nd))\n\n\n@layer\ndef maxpool_where(x, kernel, stride=None, pads=None, padding='SAME', keep=None\n ):\n assert stride is None and padding == 'SAME'\n stride = kernel\n pooled = maxpool(x, kernel, stride=stride, padding=padding)\n mask = where_pooled(x, pooled, kernel, pads=pads)\n if keep is None:\n return pooled, mask\n else:\n keep.append(mask)\n return pooled\n\n\n@layer\ndef where_pooled(x, pooled, kernel=None, pads=None):\n \"\"\"\n return mask\n :param x:\n :param pooled:\n :param kernel:\n :param pads:\n :return:\n \"\"\"\n assert x.ndim == 4\n import math\n if kernel is None:\n kernel = [math.ceil(float(d) / float(p)) for d, p in zip(x.dims,\n pooled.zip)]\n repeat = pooled.repeats(kernel, axis=[1, 2])\n elif isinstance(kernel, (tuple, list)):\n repeat = pooled.repeats(kernel, axis=[1, 2])\n else:\n repeat = pooled.repeats([kernel, kernel], axis=[1, 2])\n if pads is not None:\n repeat = repeat.pad(pads, axis=[1, 2])\n dim = x.dims\n sameshaped = repeat[:dim[0], :dim[1], :dim[2], :dim[3]]\n mask = tf.equal(x, sameshaped).to_float()\n return mask\n\n\n<mask token>\n\n\n@layer\ndef unpool_repeat(x, kernel):\n \"\"\" upsample by repeating\"\"\"\n if not isinstance(kernel, (list, tuple)) and isinstance(kernel, int):\n kernel = [kernel] * (x.ndim - 2)\n return x.repeats(kernel, axis=list(range(1, x.ndim - 2)))\n\n\n@layer\ndef avgpool(x, kernel, stride, padding='SAME'):\n nd = x.ndim - 2\n kernel, stride = _pool_kernel_stide(nd, kernel, stride)\n if nd == 2:\n return tf.nn.avg_pool(x, kernel, stride, padding)\n elif nd == 3:\n return tf.nn.avg_pool3d(x, kernel, stride, padding)\n else:\n raise ValueError('avgpool support {0}? '.format(nd))\n\n\n@layer\ndef gpool(x, keepdims=True):\n \"\"\"\n global_avgpool\n :param x:\n :param keepdims:\n :return:\n \"\"\"\n axis = list(range(1, x.ndim - 1))\n return x.mean(axis=axis, keepdims=keepdims)\n\n\ndef _atrous1d(x, kernel, rate, padding='SAME'):\n \"\"\"\n cf https://www.tensorflow.org/versions/r0.11/api_docs/python/nn.html#atrous_conv2d\n :param x: [batch, time, channel]\n :param kernel: [1, 1, inchannel, outchannel]\n :param rate: dialtion rate\n :param padding: 'same' or 'valid'\n :param bias:\n :return:\n \"\"\"\n if rate == 1:\n out = tf.nn.conv1d(x, kernel, stride=(1, 1, 1), padding=padding)\n return out\n if padding == 'SAME':\n filter_width = kernel.dims[0]\n filter_width_up = filter_width + (filter_width - 1) * (rate - 1)\n pad = filter_width_up - 1\n pad_left = pad // 2\n pad_right = pad - pad_left\n elif padding == 'VALID':\n pad_left = 0\n pad_right = 0\n else:\n raise ValueError('Invalid padding')\n in_width = x.dims[1] + pad_left + pad_right\n pad_right_extra = (rate - in_width % rate) % rate\n pads = [(0, 0), (pad_left, pad_right + pad_right_extra), (0, 0)]\n out = x.time_to_batch(rate, pads)\n out = tf.nn.conv1d(out, kernel, stride=(1, 1, 1), padding='VALID')\n crops = [(0, 0), (0, pad_right_extra), (0, 0)]\n out = out.batch_to_time(rate, crops)\n return out\n\n\n@layer\ndef atrous(x, outdim, kernel, rate, pad=0, padding='SAME', initializer=tf.\n he_uniform, bias=None, **kwargs):\n assert isinstance(pad, int)\n nd = x.ndim - 2\n if pad:\n pads = [(0, 0)] + [(pad, pad)] * nd + [(0, 0)]\n x = tf.pad(x, pads, mode='CONSTANT')\n kernel = _kernel_shape(nd, kernel, x.dims[-1], outdim)\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),\n **kwargs)\n if nd == 1:\n out = _atrous1d(x, W, rate, padding=padding)\n elif nd == 2:\n out = tf.nn.atrous_conv2d(x, W, rate, padding)\n else:\n raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))\n if bias is not None:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n return out\n\n\n<mask token>\n\n\n@layer\ndef deconv(x, outdim, kernel, stride=1, padding='SAME', initializer=tf.\n he_uniform, bias=False, extra=None, **kwargs):\n nd = x.ndim - 2\n out_shape = _deconv_outshape(nd, x.dims, outdim, kernel, stride,\n padding, extra)\n oshape = tf.TensorShape(out_shape)\n if out_shape[0] is None:\n out_shape[0] = tf.shape(x)[0]\n out_shape = tf.stack(out_shape)\n kernel_shape = _kernel_shape(nd, kernel, outdim, x.dims[-1])\n stride = _stride_shape(nd, stride)\n W = tf.get_weight('W', shape=kernel_shape, initializer=initializer(\n kernel_shape))\n if nd == 2:\n out = tf.nn.conv2d_transpose(x, W, out_shape, strides=stride,\n padding=padding)\n elif nd == 3:\n out = tf.nn.conv3d_transpose(x, W, out_shape, strides=stride,\n padding=padding)\n else:\n raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n out.set_shape(oshape)\n return out\n\n\n@layer\ndef dwconv(x, kernel, multiplier=1, stride=1, pad=0, padding='SAME',\n initializer=tf.he_uniform, bias=False, **kwargs):\n if pad:\n pads = [(0, 0), (pad, pad), (pad, pad), (0, 0)]\n x = tf.pad(x, pads, mode='CONSTANT')\n kernel = _kernel_shape(2, kernel, x.dims[-1], multiplier)\n stride = _stride_shape(2, stride)\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel),\n **kwargs)\n out = tf.nn.depthwise_conv2d(x, W, stride, padding)\n if bias:\n outdim = kernel[2] * multiplier\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n return out\n\n\n@layer\ndef subpixel(x, kernel, factor=2, stride=1, pad=0, padding='SAME',\n initializer=tf.he_uniform, bias=False, **kwargs):\n from .ireshape import channel_to_space\n assert x.ndim == 4\n indim = x.dims[-1]\n outdim = indim * factor * factor\n kernel = _kernel_shape(2, kernel, indim, outdim)\n stride = _stride_shape(2, stride)\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel))\n out = tf.nn.conv2d(x, W, stride, padding=padding)\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.\n zeros_initializer())\n out = tf.nn.bias_add(out, b)\n out = channel_to_space(out, factor)\n return out\n\n\n@layer\ndef leaky(x, slope=0.01, name=None):\n \"\"\"\n leaky_relu\n see also pleaky\n :param x:\n :param slope: 0.01 default\n :return:\n \"\"\"\n return tf.maximum(x, x * slope, name=name)\n\n\n@layer\ndef pleaky(x):\n \"\"\"\n parametric leakyrelu\n :param x:\n :return:\n \"\"\"\n alpha = tf.get_bias('alpha', shape=(), initializer=tf.\n constant_initializer(0.01))\n return tf.maximum(x, x * alpha)\n\n\n<mask token>\n\n\n@layer\ndef sizedown(x, factors=(2, 2), extras=(0, 0), method=ResizeMethod.\n NEAREST_NEIGHBOR, align_corners=False):\n inshape = x.dims\n if isinstance(factors, int):\n factors = factors, factors\n if isinstance(extras, int):\n extras = extras, extras\n hw = [inshape[1] // factors[0] + extras[0], inshape[2] // factors[1] +\n extras[1]]\n return tf.image.resize_images(x, hw, method=method, align_corners=\n align_corners)\n\n\n@layer\ndef keep(t, keepto, collection=None):\n \"\"\"\n append to list and return t as is\n :param t: tensor\n :param keepto: list\n :return:\n \"\"\"\n if collection is not None:\n tf.add_to_collection(collection, t)\n keepto.append(t)\n return t\n\n\n@layer\ndef collect(t, collection='activation'):\n \"\"\"\n append to list and return t as is\n :param t: tensor\n :param collection:\n :return:\n \"\"\"\n tf.add_to_collection(collection, t)\n return t\n\n\n@layer\ndef iname(t, name):\n return tf.identity(t, name=name)\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom tensorflow.python.ops.image_ops_impl import ResizeMethod\n\nimport sflow.core as tf\nfrom sflow.core import layer\nimport numpy as np\n\n# region arg helper\n\n\ndef _kernel_shape(nd, k, indim, outdim):\n if isinstance(k, int):\n k = [k for _ in range(nd)]\n k = list(k)\n assert len(k) == nd\n k.extend([indim, outdim])\n return k\n\n\ndef _stride_shape(nd, s):\n \"\"\"\n\n :param nd:\n :param s: int | list | tuple\n :return:\n \"\"\"\n if isinstance(s, int):\n s = [s for _ in range(nd)]\n s = list(s)\n assert len(s) == nd\n s = [1] + s + [1]\n return s\n\n# endregion\n\n# region conv\n\n\n# @layer\n# @patchmethod(tf.Tensor, tf.Variable)\n\n@layer\ndef conv(x, outdim, kernel=3, stride=1, pad=0, padding='SAME', mode='CONSTANT',\n initializer=tf.he_uniform, bias=False, **kwargs):\n nd = x.ndim\n if nd == 3:\n return conv1d(x, outdim, kernel, stride=stride, pad=pad, padding=padding, mode=mode,\n initializer=initializer, bias=bias, **kwargs)\n elif nd == 4:\n return conv2d(x, outdim, kernel, stride=stride, pad=pad, padding=padding, mode=mode,\n initializer=initializer, bias=bias, **kwargs)\n\n elif nd == 5:\n return conv3d(x, outdim, kernel, stride=stride, pad=pad, padding=padding, mode=mode,\n initializer=initializer, bias=bias, **kwargs)\n else:\n raise ValueError('conv for {nd}? nd <= 5'.format(nd=nd))\n\n\n@layer\ndef conv1d(x, outdim, kernel, stride=1, pad=0, padding='SAME', mode='CONSTANT',\n initializer=tf.he_uniform, bias=False, **kwargs):\n\n kernel = _kernel_shape(1, kernel, x.dims[-1], outdim)\n\n pads = None\n if padding == 'SAME' and mode != 'CONSTANT':\n # pad manually\n half = (kernel[0] - 1) // 2\n pads = [(0, 0), (pad + half, pad + kernel[0] - 1 - half), (0, 0)]\n padding = 'VALID' # change to valid because manually padded\n elif pad:\n pads = [(0, 0), (pad, pad), (0, 0)]\n if pads is not None:\n x = tf.pad(x, pads, mode=mode)\n\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel), **kwargs)\n out = tf.nn.conv1d(x, W, stride, padding)\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n\n return out\n\n\n@layer\ndef conv2d(x, outdim, kernel, stride=1, pad=0, padding='SAME', mode='CONSTANT',\n initializer=tf.he_uniform, bias=False, **kwargs):\n\n kernel = _kernel_shape(2, kernel, x.dims[-1], outdim)\n stride = _stride_shape(2, stride)\n\n pads = None\n if padding == 'SAME' and mode != 'CONSTANT':\n # pad manually\n half = ((kernel[0] - 1) // 2, (kernel[1] - 1) // 2)\n pads = [(0, 0),\n (pad + half[0], pad + kernel[0] - 1 - half[0]),\n (pad + half[1], pad + kernel[1] - 1 - half[1]), (0, 0)]\n padding = 'VALID' # change to valid because manually padded\n elif pad:\n pads = [(0, 0), (pad, pad), (pad, pad), (0, 0)]\n if pads is not None:\n x = tf.pad(x, pads, mode=mode)\n\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel), **kwargs)\n out = tf.nn.conv2d(x, W, stride, padding)\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n\n return out\n\n\n@layer\ndef conv3d(x, outdim, kernel, stride=1, pad=0, padding='SAME', mode='CONSTANT',\n initializer=tf.he_uniform, bias=False, **kwargs):\n\n kernel = _kernel_shape(3, kernel, x.dims[-1], outdim)\n stride = _stride_shape(3, stride) # stride 5-dim\n\n pads = None\n if padding == 'SAME' and mode != 'CONSTANT':\n # pad manually\n half = ((kernel[0] - 1) // 2, (kernel[1] - 1) // 2, (kernel[2] - 1) // 2)\n pads = [(0, 0),\n (pad + half[0], pad + kernel[0] - 1 - half[0]),\n (pad + half[1], pad + kernel[1] - 1 - half[1]),\n (pad + half[2], pad + kernel[2] - 1 - half[2]), (0, 0)]\n padding = 'VALID' # change to valid because manually padded\n elif pad:\n pads = [(0, 0), (pad, pad), (pad, pad), (pad, pad), (0, 0)]\n if pads is not None:\n x = tf.pad(x, pads, mode=mode)\n\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel), **kwargs)\n out = tf.nn.conv3d(x, W, stride, padding)\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n\n return out\n\n\n# endregion\n\n# region normalization\n\n\n@layer\ndef bn(x, stddev=0.002, beta=0.0, gamma=1.0, epsilon=1e-5, momentum=0.99, axis=-1, training=None, **kwargs):\n if kwargs.pop('scale', True):\n init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)\n else:\n init_gamma = None\n if kwargs.pop('center', True):\n init_beta = tf.constant_initializer(beta)\n else:\n init_beta = None\n\n reuse = tf.get_variable_scope().reuse\n if training is None and (reuse or kwargs.get('reuse', False)):\n training = False\n elif training is None:\n training = x.graph.is_training\n # reuse = reuse is None or reuse is True\n out = tf.layers.batch_normalization(x, axis=axis, momentum=momentum, epsilon=epsilon,\n beta_initializer=init_beta,\n gamma_initializer=init_gamma,\n moving_mean_initializer=tf.zeros_initializer(),\n moving_variance_initializer=tf.ones_initializer(),\n training=training,\n **kwargs\n )\n return out\n\n\n@layer\ndef renorm(x, axis=-1, momentum=0.99, epsilon=0.001, training=None,\n gamma=1.0, beta=0.0, stddev=0.002,\n renorm_momentum=0.99, renorm_clipping=None,\n **kwargs):\n \"\"\"\n https://arxiv.org/abs/1702.03275\n https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization\n :param x:\n :param dict renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to scalar Tensors\n used to clip the renorm correction. The correction (r, d) is used as corrected_value = normalized_value * r + d,\n with r clipped to [rmin, rmax], and d to [-dmax, dmax].\n Missing rmax, rmin, dmax are set to inf, 0, inf, respectively.\n :return:\n \"\"\"\n init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)\n init_beta = tf.constant_initializer(beta)\n\n reuse = tf.get_variable_scope().reuse\n if training is None and (reuse or kwargs.get('reuse', False)):\n training = False\n elif training is None:\n training = x.graph.is_training\n\n if renorm_clipping is not None:\n if renorm_clipping.get('rmin', None) is None:\n rmax = renorm_clipping.get('rmax', None)\n if rmax is not None and not np.isinf(rmax):\n rmin = 1 / rmax\n renorm_clipping['rmin'] = rmin\n\n out = tf.layers.batch_normalization(x, axis=axis, momentum=momentum, epsilon=epsilon,\n beta_initializer=init_beta,\n gamma_initializer=init_gamma,\n training=training,\n renorm=True,\n renorm_clipping=renorm_clipping,\n renorm_momentum=renorm_momentum,\n **kwargs\n )\n return out\n\n\n@layer\ndef inorm(x, beta=0.0, gamma=1.0, stddev=0.002, epsilon=1e-5, axis=None, trainable=True, **kwargs):\n \"\"\"\n instance normalization normalization for (W,H)\n same output not regard to trainmode\n # https://arxiv.org/pdf/1607.08022.pdf for instance normalization\n # z = gamma * (x-m)/s + beta\n # note gamma, beta\n :param x: [BHWC] is common case\n :param gamma:\n :param beta:\n :param epsilon:\n :return:\n \"\"\"\n axes = list(range(1, 1 + x.ndim-2)) # axes = [1,2] for BWHC except batch, channel\n m, v = tf.nn.moments(x, axes=axes, keep_dims=True)\n\n shapelast = x.dims[-1:]\n if trainable:\n init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)\n init_beta = tf.constant_initializer(beta)\n gamma_t = tf.get_weight(name='gamma', shape=shapelast, initializer=init_gamma)\n beta_t = tf.get_bias(name='beta', shape=shapelast, initializer=init_beta)\n else:\n gamma_t = gamma\n beta_t = beta\n\n # out = (x - m) / tf.sqrt(v + epsilon)\n # out = tf.nn.batch_normalization(x, m, v, beta, gamma, epsilon)\n out = tf.nn.batch_normalization(x, m, v, offset=beta_t, scale=gamma_t, variance_epsilon=epsilon)\n\n return out\n\n\n@layer\ndef cnorm(x, labels, klass=None, stddev=0.01, beta=0.0, gamma=1.0, epsilon=1e-5):\n \"\"\"\n conditional instance normalization (by label index)\n for learning embedding value of beta and gamma\n # https://arxiv.org/pdf/1610.07629.pdf for conditional instance normalization\n :param x:\n :param labels: [B,]\n :param klass: size of embedding var\n :param gamma: initial_gamma\n :param stddev: stddev for gamma random init\n :param beta: initial beta value\n :param epsilon: 1e-5 for var_epsilon\n :return:\n \"\"\"\n # total klass count needs !!\n assert klass is not None\n\n init_gamma = tf.random_normal_initializer(mean=gamma, stddev=stddev)\n init_beta = tf.constant_initializer(beta)\n # params\n shape = [1] * x.ndim\n shape[0] = klass\n shape[-1] = x.dims[-1] # ones but last channel axis\n\n # [klass, 1, 1, C] for [BHWC] data\n beta_v = tf.get_weight(name='beta', shape=shape, initializer=init_beta)\n gamma_v = tf.get_weight(name='gamma', shape=shape, initializer=init_gamma)\n # conditioned by label\n # gather\n beta_l = tf.nn.embedding_lookup(beta_v, labels)\n gamma_l = tf.nn.embedding_lookup(gamma_v, labels)\n\n return inorm(x, beta=beta_l, gamma=gamma_l, epsilon=epsilon)\n\n\n@layer\ndef lnorm(x, center=True, scale=True, activation_fn=None, reuse=None,\n variables_collections=None, outputs_collections=None,\n trainable=True, begin_norm_axis=1, begin_params_axis=-1,\n scope=None,\n **kwargs):\n \"\"\"\n # layer normalization\n :param x:\n :return:\n \"\"\"\n return tf.contrib.layers.layer_norm(x, center=center, scale=scale,\n activation_fn=activation_fn, reuse=reuse,\n variables_collections=variables_collections,\n outputs_collections=outputs_collections,\n trainable=trainable,\n begin_norm_axis=begin_norm_axis,\n begin_params_axis=begin_params_axis,\n scope=None,\n **kwargs)\n\n@layer\ndef gnorm(x, group):\n \"\"\"\n group normalization\n\n :param x: [N, ...., C]\n :param int group: G,\n :return:\n \"\"\"\n # def GroupNorm(x, gamma, beta, G, eps=1e−5):\n # # x: input features with shape [N,C,H,W]\n # # gamma, beta: scale and offset, with shape [1,C,1,1]\n # # G: number of groups for GN\n # N, C, H, W = x.shape\n # x = tf.reshape(x, [N, G, C // G, H, W])\n # mean, var = tf.nn.moments(x, [2, 3, 4], keep dims=True)\n # x = (x − mean) / tf.sqrt(var + eps)\n # x = tf.reshape(x, [N, C, H, W])\n # return x ∗ gamma + beta\n shape = list(x.dims)\n if shape[0] is None:\n shape[0] = -1\n ch = shape[-1]\n shape[-1] = ch // group\n shape.append(group)\n\n # todo : 나누어 안떨어진다면! ch // group 안떨어진다..\n assert (ch // group) * group == ch\n\n x = tf.reshape(x, shape)\n x_n = lnorm(x)\n\n # restore original shape\n shape = shape[:-1]\n shape[-1] = ch\n x = tf.reshape(x_n, shape)\n\n return x\n\n\n\n\n# endregion\n\n\n# region dense and dropout\n\n\n@layer\ndef dropout(x, keep_prob=0.5, is_training=None, noise_shape=None, seed=None):\n\n if keep_prob == 1.0:\n return x\n\n def _dropout():\n return tf.nn.dropout(x, keep_prob, noise_shape, seed)\n if is_training is None:\n is_training = x.graph.is_training\n else:\n is_training = tf.convert_to_tensor(is_training)\n return tf.cond(is_training, _dropout, lambda: x)\n\n\n@layer\ndef dense(x, outdim, initializer=tf.glorot_uniform, bias=False, name=None):\n \"\"\"\n out = dense( shape=shape, init=None, paramset=None)\n :param x: tensor\n :param bias:\n :param outdim: output_size\n :param initializer:\n :param name:\n :return: layer | output | (output, params)\n \"\"\"\n if x.ndim == 4:\n x = x.flat2d()\n\n assert x.ndim == 2\n\n outshape = not isinstance(outdim, int)\n if outshape:\n dim = [-1] + list(outdim)\n outdim = np.prod(outdim)\n\n shape = [x.dims[-1], outdim]\n W = tf.get_weight('W', shape=shape, initializer=initializer(shape))\n # W = tf.get_weight('W', initializer=initializer(shape))\n out = x.dot(W)\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.zeros_initializer())\n out = tf.nn.bias_add(out, b)\n\n if outshape:\n # make reshape\n out = out.reshape(dim)\n\n return tf.identity(out, name=name)\n\n# endregion\n\n\n@layer\ndef bias(x, initializer=tf.zeros_initializer, name=None):\n outdim = x.dims[-1]\n b = tf.get_bias('b', shape=(outdim,), initializer=initializer())\n return tf.nn.bias_add(x, b, name=name)\n\n\n# region pooling\n\n\ndef _pool_kernel_stide(dim, kernel, stride):\n if isinstance(kernel, int):\n kernel = [kernel] * dim\n if isinstance(stride, int):\n stride = [stride] * dim\n assert len(kernel) == dim and len(stride) == dim\n\n return [1] + list(kernel) + [1], [1] + list(stride) + [1]\n\n\n@layer\ndef maxpool(x, kernel=2, stride=None, padding='SAME'):\n nd = x.ndim - 2\n stride = kernel if stride is None else stride\n kernel, stride = _pool_kernel_stide(nd, kernel, stride)\n if nd == 2:\n return tf.nn.max_pool(x, kernel, stride, padding)\n elif nd == 3:\n return tf.nn.max_pool3d(x, kernel, stride, padding)\n else:\n raise ValueError('maxpool support {0}? '.format(nd))\n\n\n@layer\ndef maxpool_where(x, kernel, stride=None, pads=None, padding='SAME', keep=None):\n\n # assume kernel == stride\n assert stride is None and padding == 'SAME'\n stride = kernel\n pooled = maxpool(x, kernel, stride=stride, padding=padding)\n mask = where_pooled(x, pooled, kernel, pads=pads)\n if keep is None:\n return pooled, mask\n else:\n keep.append(mask)\n return pooled\n\n\n@layer\ndef where_pooled(x, pooled, kernel=None, pads=None):\n \"\"\"\n return mask\n :param x:\n :param pooled:\n :param kernel:\n :param pads:\n :return:\n \"\"\"\n # todo : add 3d support\n assert x.ndim == 4\n import math\n if kernel is None:\n kernel = [math.ceil(float(d) / float(p)) for d, p in zip(x.dims, pooled.zip)]\n repeat = pooled.repeats(kernel, axis=[1, 2])\n elif isinstance(kernel, (tuple, list)):\n repeat = pooled.repeats(kernel, axis=[1, 2])\n else:\n repeat = pooled.repeats([kernel, kernel], axis=[1, 2])\n\n if pads is not None:\n repeat = repeat.pad(pads, axis=[1, 2])\n # crop need\n dim = x.dims\n sameshaped = repeat[:dim[0], :dim[1], :dim[2], :dim[3]]\n mask = tf.equal(x, sameshaped).to_float()\n\n return mask\n\n\n@layer\ndef unpool_where(x, mask, kernel, padding='SAME'):\n \"\"\"\n unpool with maxpool mask\n :param x:\n :param mask:\n :param kernel:\n :param padding:\n :return:\n \"\"\"\n\n # really not a option yet\n # assert stride is None\n assert padding == 'SAME'\n\n nd = x.ndim\n if nd == 4:\n if isinstance(kernel, int):\n kernel = (kernel, kernel)\n unpooled = x.repeats(kernel, axis=(1, 2))\n elif nd == 5:\n if isinstance(kernel, int):\n kernel = (kernel, kernel, kernel)\n unpooled = x.repeats(kernel, axis=(1, 2, 3))\n else:\n raise ValueError('unsupported nd {0}'.format(nd))\n\n return unpooled * mask\n\n\n@layer\ndef unpool_zero(x, kernel):\n \"\"\" upsample by inserting zeros.. \"\"\"\n if not isinstance(kernel, (list, tuple)) and isinstance(kernel, int):\n kernel = [kernel] * (x.ndim - 2)\n\n out = x\n for axis in range(1, x.ndim-2):\n out = out.insert_zero(kernel[axis-1], axis=axis)\n\n return out\n\n\n@layer\ndef unpool_repeat(x, kernel):\n \"\"\" upsample by repeating\"\"\"\n if not isinstance(kernel, (list, tuple)) and isinstance(kernel, int):\n kernel = [kernel] * (x.ndim - 2)\n\n return x.repeats(kernel, axis=list(range(1, x.ndim-2)))\n\n\n@layer\ndef avgpool(x, kernel, stride, padding='SAME'):\n nd = x.ndim - 2\n kernel, stride = _pool_kernel_stide(nd, kernel, stride)\n if nd == 2:\n return tf.nn.avg_pool(x, kernel, stride, padding)\n elif nd == 3:\n return tf.nn.avg_pool3d(x, kernel, stride, padding)\n else:\n raise ValueError('avgpool support {0}? '.format(nd))\n\n\n@layer\ndef gpool(x, keepdims=True):\n \"\"\"\n global_avgpool\n :param x:\n :param keepdims:\n :return:\n \"\"\"\n # http://arxiv.org/pdf/1312.4400.pdf\n axis = list(range(1, x.ndim-1))\n return x.mean(axis=axis, keepdims=keepdims)\n\n\n# endregion\n\n# region atrous convolution\n\n# def atrous2d(x, )\n\ndef _atrous1d(x, kernel, rate, padding='SAME'):\n \"\"\"\n cf https://www.tensorflow.org/versions/r0.11/api_docs/python/nn.html#atrous_conv2d\n :param x: [batch, time, channel]\n :param kernel: [1, 1, inchannel, outchannel]\n :param rate: dialtion rate\n :param padding: 'same' or 'valid'\n :param bias:\n :return:\n \"\"\"\n # from ireshape import time_to_batch, batch_to_time\n\n # atrous_conv1d implementation\n if rate == 1:\n # same to normal conv1d\n out = tf.nn.conv1d(x, kernel, stride=(1, 1, 1), padding=padding)\n return out\n\n # if 'same'\n if padding == 'SAME':\n filter_width = kernel.dims[0]\n # temporal dimension of the filter and the upsampled filter in which we\n # introduce (rate - 1) zeros between consecutive filter values.\n filter_width_up = filter_width + (filter_width - 1) * (rate - 1)\n pad = filter_width_up - 1\n\n # When pad is odd, we pad more to right\n pad_left = pad // 2\n pad_right = pad - pad_left\n elif padding == 'VALID':\n pad_left = 0\n pad_right = 0\n else:\n raise ValueError('Invalid padding')\n\n in_width = x.dims[1] + pad_left + pad_right\n # more padding so that rate divides the width of the input\n pad_right_extra = (rate - in_width % rate) % rate\n pads = [(0, 0), (pad_left, pad_right + pad_right_extra), (0, 0)]\n\n out = x.time_to_batch(rate, pads)\n\n out = tf.nn.conv1d(out, kernel, stride=(1, 1, 1), padding='VALID')\n # if bias is not None:\n # bias=bias,\n\n crops = [(0, 0), (0, pad_right_extra), (0, 0)]\n\n # temporary test this\n out = out.batch_to_time(rate, crops)\n\n return out\n\n\n@layer\ndef atrous(x, outdim, kernel, rate, pad=0, padding='SAME',\n initializer=tf.he_uniform, bias=None, **kwargs):\n # todo rate per axis?\n\n assert isinstance(pad, int)\n nd = x.ndim - 2\n if pad:\n pads = [(0, 0)] + [(pad, pad)] * nd + [(0, 0)]\n x = tf.pad(x, pads, mode='CONSTANT')\n\n kernel = _kernel_shape(nd, kernel, x.dims[-1], outdim)\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel), **kwargs)\n\n if nd == 1:\n out = _atrous1d(x, W, rate, padding=padding)\n elif nd == 2:\n out = tf.nn.atrous_conv2d(x, W, rate, padding)\n else:\n raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))\n\n if bias is not None:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n\n return out\n\n\n# endregion\n\n# region deconv\n\n\ndef _deconv_outshape(nd, inshape, outdim, kernel, stride, padding, extra_shape=0):\n # conv2d case (filter = kernel)\n # output = (input + stride - 1)//stride # SAME ? filter?\n # output = (input + stride - filter)//stride # VALID\n # 위 식 inverse\n # output = (input * stride) - stride + 1 + extra\n # todo : through check need ??\n # => max일경우 (output - 1) * stride + 1 - stride\n # output = (input * stride) - stride + filter + extra # VALID\n # 단, 0 <= extra < stride\n if isinstance(kernel, int):\n kernel = [kernel] * nd\n if isinstance(stride, int):\n stride = [stride] * nd\n if extra_shape is None:\n extra_shape = 0\n if isinstance(extra_shape, int):\n extra_shape = [extra_shape] * nd\n\n outshape = [None] * nd\n if padding == 'SAME':\n for i in range(0, nd):\n outshape[i] = inshape[i+1] * stride[i] + extra_shape[0]\n elif padding == 'VALID':\n # assert -stride[0] < extra_shape[0] < stride[0]\n # assert -stride[1] < extra_shape[1] < stride[1]\n for i in range(0, nd):\n outshape[i] = (inshape[i+1] * stride[i]) - stride[i] + kernel[i] + extra_shape[i]\n else:\n raise ValueError('unknown padding option {0}'.format(padding))\n\n return [inshape[0]] + outshape + [outdim]\n\n\n@layer\ndef deconv(x, outdim, kernel, stride=1, padding='SAME',\n initializer=tf.he_uniform, bias=False, extra=None, **kwargs):\n nd = x.ndim - 2\n out_shape = _deconv_outshape(nd, x.dims, outdim, kernel, stride, padding, extra)\n oshape = tf.TensorShape(out_shape)\n if out_shape[0] is None:\n out_shape[0] = tf.shape(x)[0]\n out_shape = tf.stack(out_shape)\n\n kernel_shape = _kernel_shape(nd, kernel, outdim, x.dims[-1]) # swap in and out channel\n stride = _stride_shape(nd, stride) # stride\n\n W = tf.get_weight('W', shape=kernel_shape, initializer=initializer(kernel_shape))\n\n if nd == 2:\n out = tf.nn.conv2d_transpose(x, W, out_shape, strides=stride, padding=padding)\n elif nd == 3:\n out = tf.nn.conv3d_transpose(x, W, out_shape, strides=stride, padding=padding)\n else:\n raise NotImplementedError('not implementd for ndim [{0}]'.format(nd))\n\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n\n out.set_shape(oshape)\n\n return out\n\n\n# endregion\n\n# region depthwise\n\n@layer\ndef dwconv(x, kernel, multiplier=1, stride=1, pad=0, padding='SAME',\n initializer=tf.he_uniform, bias=False, **kwargs):\n\n if pad:\n pads = [(0, 0), (pad, pad), (pad, pad), (0, 0)]\n x = tf.pad(x, pads, mode='CONSTANT')\n\n kernel = _kernel_shape(2, kernel, x.dims[-1], multiplier)\n stride = _stride_shape(2, stride)\n\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel), **kwargs)\n out = tf.nn.depthwise_conv2d(x, W, stride, padding)\n if bias:\n outdim = kernel[2] * multiplier\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.zeros_initializer(), **kwargs)\n out = tf.nn.bias_add(out, b)\n\n return out\n\n# endregion\n\n# region subpixel\n\n\n@layer\ndef subpixel(x, kernel, factor=2, stride=1, pad=0, padding='SAME',\n initializer=tf.he_uniform, bias=False, **kwargs):\n from .ireshape import channel_to_space\n\n assert x.ndim == 4 # implemented for 4D tensor\n\n indim = x.dims[-1]\n outdim = indim * factor * factor\n\n kernel = _kernel_shape(2, kernel, indim, outdim)\n stride = _stride_shape(2, stride)\n\n W = tf.get_weight('W', shape=kernel, initializer=initializer(kernel))\n out = tf.nn.conv2d(x, W, stride, padding=padding)\n if bias:\n b = tf.get_bias('b', shape=(outdim,), initializer=tf.zeros_initializer())\n out = tf.nn.bias_add(out, b)\n\n # periodic shuffle\n out = channel_to_space(out, factor)\n\n return out\n\n# endregion\n\n\n# region activation\n\n\n@layer\ndef leaky(x, slope=0.01, name=None):\n \"\"\"\n leaky_relu\n see also pleaky\n :param x:\n :param slope: 0.01 default\n :return:\n \"\"\"\n return tf.maximum(x, x*slope, name=name)\n\n\n@layer\ndef pleaky(x):\n \"\"\"\n parametric leakyrelu\n :param x:\n :return:\n \"\"\"\n alpha = tf.get_bias('alpha', shape=(), initializer=tf.constant_initializer(0.01))\n return tf.maximum(x, x * alpha)\n\n# endregion\n\n# region resize images\n\n\n@layer\ndef sizeup(x, factor=(2, 2), extras=(0, 0), method=ResizeMethod.NEAREST_NEIGHBOR, align_corners=False):\n inshape = x.dims\n if isinstance(factor, int):\n factor = (factor, factor)\n if isinstance(extras, int):\n extras = (extras, extras)\n\n hw = [inshape[1] * factor[0] + extras[0], inshape[2] * factor[1] + extras[1]]\n return tf.image.resize_images(x, hw, method=method, align_corners=align_corners)\n\n\n@layer\ndef sizedown(x, factors=(2, 2), extras=(0, 0), method=ResizeMethod.NEAREST_NEIGHBOR, align_corners=False):\n inshape = x.dims\n if isinstance(factors, int):\n factors = (factors, factors)\n if isinstance(extras, int):\n extras = (extras, extras)\n\n hw = [inshape[1] // factors[0] + extras[0], inshape[2] // factors[1] + extras[1]]\n\n return tf.image.resize_images(x, hw, method=method, align_corners=align_corners)\n\n\n# endregion\n\n# region collecting utils\n\n@layer\ndef keep(t, keepto, collection=None):\n \"\"\"\n append to list and return t as is\n :param t: tensor\n :param keepto: list\n :return:\n \"\"\"\n if collection is not None:\n tf.add_to_collection(collection, t)\n keepto.append(t)\n return t\n\n\n@layer\ndef collect(t, collection='activation'):\n \"\"\"\n append to list and return t as is\n :param t: tensor\n :param collection:\n :return:\n \"\"\"\n tf.add_to_collection(collection, t)\n return t\n\n# endregion\n\n# region util\n\n@layer\ndef iname(t, name):\n return tf.identity(t, name=name)\n\n# endregion\n",
"step-ids": [
22,
26,
29,
32,
39
]
}
|
[
22,
26,
29,
32,
39
] |
#!/usr/bin/env python3
import matplotlib
from matplotlib.colors import to_hex
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
import itertools as it
from pathlib import Path
import subprocess
from tqdm import tqdm
from koala import plotting as pl
from koala import phase_diagrams as pd
from koala import pointsets, voronization, flux_finder, graph_color
from koala import example_graphs as eg
import functools
def multi_set_symmetric_difference(sets):
return list(functools.reduce(lambda a,b: a^b, [set(s) for s in sets]))
def flood_iteration_plaquettes(l, plaquettes):
return set(plaquettes) | set(it.chain.from_iterable(l.plaquettes[p].adjacent_plaquettes for p in plaquettes))
def flood_iteration_vertices(l, vertices):
return set(vertices) | set(it.chain.from_iterable(i for v in set(vertices) for i in l.edges.indices[l.vertices.adjacent_edges[v]]))
# imports just for this plot
column_width = 3.375
w = 3.375
black_line_widths = 1.5
matplotlib.rcParams.update({'font.size': 13, 'text.usetex': True, 'font.family': 'serif', 'font.serif': ['Computer Modern']})
matplotlib.rcParams.update({"axes.linewidth": black_line_widths})
line_colors = [to_hex(a) for a in cm.inferno([0.25, 0.5, 0.75])]
rng = np.random.default_rng(seed = 10)
l, coloring, ujk = eg.make_amorphous(8, rng = rng)
# l, coloring, ujk = eg.make_honeycomb(8)
plaquettes = [40,]
vertices = [78,]
subprocess.run(["mkdir", "-p", "./animation"])
for n in tqdm(range(15)):
fig, axes = plt.subplots(nrows=1, ncols=2)
fig.set_size_inches(2 * w, 2/2 * w)
for a in axes: a.set(xticks = [], yticks = [])
# pl.plot_vertex_indices(l, ax = ax)
# pl.plot_edge_indices(l, ax = ax)
# pl.plot_plaquette_indices(l, ax = ax)
if n > 0:
vertices = flood_iteration_vertices(l, vertices)
plaquettes = flood_iteration_plaquettes(l, plaquettes)
ax = axes[0]
multi_edges = multi_set_symmetric_difference([l.vertices.adjacent_edges[v] for v in vertices])
if multi_edges: pl.plot_dual(l, ax = ax, color_scheme = line_colors[1:], subset = multi_edges)
pl.plot_edges(l, ax = ax, color = 'k', subset = multi_edges)
pl.plot_vertices(l, ax = ax, subset = list(vertices), s = 5)
pl.plot_edges(l, ax = ax, alpha = 0.1)
pl.plot_dual(l, ax = ax, color_scheme = line_colors[1:], alpha = 0.1)
ax.set(xticks = [], yticks = [])
ax = axes[1]
plaquette_boolean = np.array([i in plaquettes for i in range(l.n_plaquettes)])
fluxes = 1 - 2*plaquette_boolean
ujk = flux_finder.find_flux_sector(l, fluxes, ujk)
fluxes = flux_finder.fluxes_from_bonds(l, ujk)
pl.plot_edges(l, ax = ax, alpha = 0.1)
pl.plot_dual(l, ax = ax, color_scheme = line_colors[1:], alpha = 0.1)
pl.plot_edges(l, ax = ax, subset = (ujk == -1))
if len(plaquettes) > 1: pl.plot_dual(l, ax = ax, color_scheme = line_colors[1:], subset = (ujk == -1), )
pl.plot_plaquettes(l, subset = fluxes == -1, ax = ax, color_scheme = ["orange", "white"], alpha = 0.5);
ax.set(xticks = [], yticks = [])
fig.tight_layout()
if n == 3:
fig.savefig(f'./{Path.cwd().name}.svg', transparent = True)
fig.savefig(f'./{Path.cwd().name}.pdf')
fig.savefig(f"animation/iteration_{n:03}.svg")
plt.close(fig)
subprocess.run(["magick", "animation/*.svg", f'./{Path.cwd().name}.gif'])
subprocess.run(["convert", "-delay", "100", f'./{Path.cwd().name}.gif', f'./{Path.cwd().name}.gif'])
subprocess.run(["rm", "-r", "./animation"])
|
normal
|
{
"blob_id": "d429f03c0f0c241166d6c0a5a45dc1101bcaec16",
"index": 5878,
"step-1": "<mask token>\n\n\ndef multi_set_symmetric_difference(sets):\n return list(functools.reduce(lambda a, b: a ^ b, [set(s) for s in sets]))\n\n\ndef flood_iteration_plaquettes(l, plaquettes):\n return set(plaquettes) | set(it.chain.from_iterable(l.plaquettes[p].\n adjacent_plaquettes for p in plaquettes))\n\n\ndef flood_iteration_vertices(l, vertices):\n return set(vertices) | set(it.chain.from_iterable(i for v in set(\n vertices) for i in l.edges.indices[l.vertices.adjacent_edges[v]]))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef multi_set_symmetric_difference(sets):\n return list(functools.reduce(lambda a, b: a ^ b, [set(s) for s in sets]))\n\n\ndef flood_iteration_plaquettes(l, plaquettes):\n return set(plaquettes) | set(it.chain.from_iterable(l.plaquettes[p].\n adjacent_plaquettes for p in plaquettes))\n\n\ndef flood_iteration_vertices(l, vertices):\n return set(vertices) | set(it.chain.from_iterable(i for v in set(\n vertices) for i in l.edges.indices[l.vertices.adjacent_edges[v]]))\n\n\n<mask token>\nmatplotlib.rcParams.update({'font.size': 13, 'text.usetex': True,\n 'font.family': 'serif', 'font.serif': ['Computer Modern']})\nmatplotlib.rcParams.update({'axes.linewidth': black_line_widths})\n<mask token>\nsubprocess.run(['mkdir', '-p', './animation'])\nfor n in tqdm(range(15)):\n fig, axes = plt.subplots(nrows=1, ncols=2)\n fig.set_size_inches(2 * w, 2 / 2 * w)\n for a in axes:\n a.set(xticks=[], yticks=[])\n if n > 0:\n vertices = flood_iteration_vertices(l, vertices)\n plaquettes = flood_iteration_plaquettes(l, plaquettes)\n ax = axes[0]\n multi_edges = multi_set_symmetric_difference([l.vertices.adjacent_edges\n [v] for v in vertices])\n if multi_edges:\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], subset=multi_edges\n )\n pl.plot_edges(l, ax=ax, color='k', subset=multi_edges)\n pl.plot_vertices(l, ax=ax, subset=list(vertices), s=5)\n pl.plot_edges(l, ax=ax, alpha=0.1)\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], alpha=0.1)\n ax.set(xticks=[], yticks=[])\n ax = axes[1]\n plaquette_boolean = np.array([(i in plaquettes) for i in range(l.\n n_plaquettes)])\n fluxes = 1 - 2 * plaquette_boolean\n ujk = flux_finder.find_flux_sector(l, fluxes, ujk)\n fluxes = flux_finder.fluxes_from_bonds(l, ujk)\n pl.plot_edges(l, ax=ax, alpha=0.1)\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], alpha=0.1)\n pl.plot_edges(l, ax=ax, subset=ujk == -1)\n if len(plaquettes) > 1:\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], subset=ujk == -1)\n pl.plot_plaquettes(l, subset=fluxes == -1, ax=ax, color_scheme=[\n 'orange', 'white'], alpha=0.5)\n ax.set(xticks=[], yticks=[])\n fig.tight_layout()\n if n == 3:\n fig.savefig(f'./{Path.cwd().name}.svg', transparent=True)\n fig.savefig(f'./{Path.cwd().name}.pdf')\n fig.savefig(f'animation/iteration_{n:03}.svg')\n plt.close(fig)\nsubprocess.run(['magick', 'animation/*.svg', f'./{Path.cwd().name}.gif'])\nsubprocess.run(['convert', '-delay', '100', f'./{Path.cwd().name}.gif',\n f'./{Path.cwd().name}.gif'])\nsubprocess.run(['rm', '-r', './animation'])\n",
"step-3": "<mask token>\n\n\ndef multi_set_symmetric_difference(sets):\n return list(functools.reduce(lambda a, b: a ^ b, [set(s) for s in sets]))\n\n\ndef flood_iteration_plaquettes(l, plaquettes):\n return set(plaquettes) | set(it.chain.from_iterable(l.plaquettes[p].\n adjacent_plaquettes for p in plaquettes))\n\n\ndef flood_iteration_vertices(l, vertices):\n return set(vertices) | set(it.chain.from_iterable(i for v in set(\n vertices) for i in l.edges.indices[l.vertices.adjacent_edges[v]]))\n\n\ncolumn_width = 3.375\nw = 3.375\nblack_line_widths = 1.5\nmatplotlib.rcParams.update({'font.size': 13, 'text.usetex': True,\n 'font.family': 'serif', 'font.serif': ['Computer Modern']})\nmatplotlib.rcParams.update({'axes.linewidth': black_line_widths})\nline_colors = [to_hex(a) for a in cm.inferno([0.25, 0.5, 0.75])]\nrng = np.random.default_rng(seed=10)\nl, coloring, ujk = eg.make_amorphous(8, rng=rng)\nplaquettes = [40]\nvertices = [78]\nsubprocess.run(['mkdir', '-p', './animation'])\nfor n in tqdm(range(15)):\n fig, axes = plt.subplots(nrows=1, ncols=2)\n fig.set_size_inches(2 * w, 2 / 2 * w)\n for a in axes:\n a.set(xticks=[], yticks=[])\n if n > 0:\n vertices = flood_iteration_vertices(l, vertices)\n plaquettes = flood_iteration_plaquettes(l, plaquettes)\n ax = axes[0]\n multi_edges = multi_set_symmetric_difference([l.vertices.adjacent_edges\n [v] for v in vertices])\n if multi_edges:\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], subset=multi_edges\n )\n pl.plot_edges(l, ax=ax, color='k', subset=multi_edges)\n pl.plot_vertices(l, ax=ax, subset=list(vertices), s=5)\n pl.plot_edges(l, ax=ax, alpha=0.1)\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], alpha=0.1)\n ax.set(xticks=[], yticks=[])\n ax = axes[1]\n plaquette_boolean = np.array([(i in plaquettes) for i in range(l.\n n_plaquettes)])\n fluxes = 1 - 2 * plaquette_boolean\n ujk = flux_finder.find_flux_sector(l, fluxes, ujk)\n fluxes = flux_finder.fluxes_from_bonds(l, ujk)\n pl.plot_edges(l, ax=ax, alpha=0.1)\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], alpha=0.1)\n pl.plot_edges(l, ax=ax, subset=ujk == -1)\n if len(plaquettes) > 1:\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], subset=ujk == -1)\n pl.plot_plaquettes(l, subset=fluxes == -1, ax=ax, color_scheme=[\n 'orange', 'white'], alpha=0.5)\n ax.set(xticks=[], yticks=[])\n fig.tight_layout()\n if n == 3:\n fig.savefig(f'./{Path.cwd().name}.svg', transparent=True)\n fig.savefig(f'./{Path.cwd().name}.pdf')\n fig.savefig(f'animation/iteration_{n:03}.svg')\n plt.close(fig)\nsubprocess.run(['magick', 'animation/*.svg', f'./{Path.cwd().name}.gif'])\nsubprocess.run(['convert', '-delay', '100', f'./{Path.cwd().name}.gif',\n f'./{Path.cwd().name}.gif'])\nsubprocess.run(['rm', '-r', './animation'])\n",
"step-4": "import matplotlib\nfrom matplotlib.colors import to_hex\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport itertools as it\nfrom pathlib import Path\nimport subprocess\nfrom tqdm import tqdm\nfrom koala import plotting as pl\nfrom koala import phase_diagrams as pd\nfrom koala import pointsets, voronization, flux_finder, graph_color\nfrom koala import example_graphs as eg\nimport functools\n\n\ndef multi_set_symmetric_difference(sets):\n return list(functools.reduce(lambda a, b: a ^ b, [set(s) for s in sets]))\n\n\ndef flood_iteration_plaquettes(l, plaquettes):\n return set(plaquettes) | set(it.chain.from_iterable(l.plaquettes[p].\n adjacent_plaquettes for p in plaquettes))\n\n\ndef flood_iteration_vertices(l, vertices):\n return set(vertices) | set(it.chain.from_iterable(i for v in set(\n vertices) for i in l.edges.indices[l.vertices.adjacent_edges[v]]))\n\n\ncolumn_width = 3.375\nw = 3.375\nblack_line_widths = 1.5\nmatplotlib.rcParams.update({'font.size': 13, 'text.usetex': True,\n 'font.family': 'serif', 'font.serif': ['Computer Modern']})\nmatplotlib.rcParams.update({'axes.linewidth': black_line_widths})\nline_colors = [to_hex(a) for a in cm.inferno([0.25, 0.5, 0.75])]\nrng = np.random.default_rng(seed=10)\nl, coloring, ujk = eg.make_amorphous(8, rng=rng)\nplaquettes = [40]\nvertices = [78]\nsubprocess.run(['mkdir', '-p', './animation'])\nfor n in tqdm(range(15)):\n fig, axes = plt.subplots(nrows=1, ncols=2)\n fig.set_size_inches(2 * w, 2 / 2 * w)\n for a in axes:\n a.set(xticks=[], yticks=[])\n if n > 0:\n vertices = flood_iteration_vertices(l, vertices)\n plaquettes = flood_iteration_plaquettes(l, plaquettes)\n ax = axes[0]\n multi_edges = multi_set_symmetric_difference([l.vertices.adjacent_edges\n [v] for v in vertices])\n if multi_edges:\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], subset=multi_edges\n )\n pl.plot_edges(l, ax=ax, color='k', subset=multi_edges)\n pl.plot_vertices(l, ax=ax, subset=list(vertices), s=5)\n pl.plot_edges(l, ax=ax, alpha=0.1)\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], alpha=0.1)\n ax.set(xticks=[], yticks=[])\n ax = axes[1]\n plaquette_boolean = np.array([(i in plaquettes) for i in range(l.\n n_plaquettes)])\n fluxes = 1 - 2 * plaquette_boolean\n ujk = flux_finder.find_flux_sector(l, fluxes, ujk)\n fluxes = flux_finder.fluxes_from_bonds(l, ujk)\n pl.plot_edges(l, ax=ax, alpha=0.1)\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], alpha=0.1)\n pl.plot_edges(l, ax=ax, subset=ujk == -1)\n if len(plaquettes) > 1:\n pl.plot_dual(l, ax=ax, color_scheme=line_colors[1:], subset=ujk == -1)\n pl.plot_plaquettes(l, subset=fluxes == -1, ax=ax, color_scheme=[\n 'orange', 'white'], alpha=0.5)\n ax.set(xticks=[], yticks=[])\n fig.tight_layout()\n if n == 3:\n fig.savefig(f'./{Path.cwd().name}.svg', transparent=True)\n fig.savefig(f'./{Path.cwd().name}.pdf')\n fig.savefig(f'animation/iteration_{n:03}.svg')\n plt.close(fig)\nsubprocess.run(['magick', 'animation/*.svg', f'./{Path.cwd().name}.gif'])\nsubprocess.run(['convert', '-delay', '100', f'./{Path.cwd().name}.gif',\n f'./{Path.cwd().name}.gif'])\nsubprocess.run(['rm', '-r', './animation'])\n",
"step-5": "#!/usr/bin/env python3\n\nimport matplotlib\nfrom matplotlib.colors import to_hex\nfrom matplotlib import cm\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport itertools as it\nfrom pathlib import Path\nimport subprocess\nfrom tqdm import tqdm\n\nfrom koala import plotting as pl\nfrom koala import phase_diagrams as pd\nfrom koala import pointsets, voronization, flux_finder, graph_color\nfrom koala import example_graphs as eg\n\nimport functools\n\ndef multi_set_symmetric_difference(sets):\n return list(functools.reduce(lambda a,b: a^b, [set(s) for s in sets]))\n\ndef flood_iteration_plaquettes(l, plaquettes):\n return set(plaquettes) | set(it.chain.from_iterable(l.plaquettes[p].adjacent_plaquettes for p in plaquettes))\n\ndef flood_iteration_vertices(l, vertices):\n return set(vertices) | set(it.chain.from_iterable(i for v in set(vertices) for i in l.edges.indices[l.vertices.adjacent_edges[v]]))\n\n\n# imports just for this plot\n\ncolumn_width = 3.375\nw = 3.375\nblack_line_widths = 1.5\n\nmatplotlib.rcParams.update({'font.size': 13, 'text.usetex': True, 'font.family': 'serif', 'font.serif': ['Computer Modern']})\nmatplotlib.rcParams.update({\"axes.linewidth\": black_line_widths})\n\nline_colors = [to_hex(a) for a in cm.inferno([0.25, 0.5, 0.75])]\n\nrng = np.random.default_rng(seed = 10)\nl, coloring, ujk = eg.make_amorphous(8, rng = rng)\n# l, coloring, ujk = eg.make_honeycomb(8)\n\nplaquettes = [40,]\nvertices = [78,]\n\nsubprocess.run([\"mkdir\", \"-p\", \"./animation\"])\n\nfor n in tqdm(range(15)):\n fig, axes = plt.subplots(nrows=1, ncols=2)\n fig.set_size_inches(2 * w, 2/2 * w)\n for a in axes: a.set(xticks = [], yticks = [])\n\n # pl.plot_vertex_indices(l, ax = ax)\n # pl.plot_edge_indices(l, ax = ax)\n # pl.plot_plaquette_indices(l, ax = ax)\n \n if n > 0:\n vertices = flood_iteration_vertices(l, vertices)\n plaquettes = flood_iteration_plaquettes(l, plaquettes)\n \n ax = axes[0]\n \n multi_edges = multi_set_symmetric_difference([l.vertices.adjacent_edges[v] for v in vertices])\n \n if multi_edges: pl.plot_dual(l, ax = ax, color_scheme = line_colors[1:], subset = multi_edges)\n pl.plot_edges(l, ax = ax, color = 'k', subset = multi_edges)\n pl.plot_vertices(l, ax = ax, subset = list(vertices), s = 5)\n\n pl.plot_edges(l, ax = ax, alpha = 0.1)\n pl.plot_dual(l, ax = ax, color_scheme = line_colors[1:], alpha = 0.1)\n\n ax.set(xticks = [], yticks = [])\n \n ax = axes[1]\n\n plaquette_boolean = np.array([i in plaquettes for i in range(l.n_plaquettes)])\n\n fluxes = 1 - 2*plaquette_boolean\n ujk = flux_finder.find_flux_sector(l, fluxes, ujk)\n fluxes = flux_finder.fluxes_from_bonds(l, ujk)\n\n pl.plot_edges(l, ax = ax, alpha = 0.1)\n pl.plot_dual(l, ax = ax, color_scheme = line_colors[1:], alpha = 0.1)\n \n pl.plot_edges(l, ax = ax, subset = (ujk == -1))\n if len(plaquettes) > 1: pl.plot_dual(l, ax = ax, color_scheme = line_colors[1:], subset = (ujk == -1), )\n pl.plot_plaquettes(l, subset = fluxes == -1, ax = ax, color_scheme = [\"orange\", \"white\"], alpha = 0.5);\n ax.set(xticks = [], yticks = [])\n \n fig.tight_layout()\n if n == 3: \n fig.savefig(f'./{Path.cwd().name}.svg', transparent = True)\n fig.savefig(f'./{Path.cwd().name}.pdf')\n fig.savefig(f\"animation/iteration_{n:03}.svg\")\n plt.close(fig)\n\nsubprocess.run([\"magick\", \"animation/*.svg\", f'./{Path.cwd().name}.gif'])\nsubprocess.run([\"convert\", \"-delay\", \"100\", f'./{Path.cwd().name}.gif', f'./{Path.cwd().name}.gif'])\nsubprocess.run([\"rm\", \"-r\", \"./animation\"])",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import numpy as np
import scipy
class node(object):
"""docstring for node"""
def __init__(self, feature_idx):
super(node, self).__init__()
self.children = None
self.j = feature_idx
self.c = None
self.vals = None
class decisionTree(object):
"""docstring for decisionTree"""
def __init__(self, arg):
super(decisionTree, self).__init__()
self.arg = arg
def fit(self, X, y):
pass
def predict(self, X):
pass
def information_gain(x, y):
def entropy(x):
px = prob(x, return_counts=False)
return scipy.stats.entropy(px)
def prob(x, return_labels=True):
x_unique, counts = np.unique(x, return_counts=True)
px = counts / x.shape[0]
if return_labels: return px, x_unique
return px
def conditional_prob(x, y):
"""
calculate p(x|y)
"""
def conditional_entropy(x, y, px=None):
pass
|
normal
|
{
"blob_id": "c05994471d6608b5e48b71d253304a43100d583f",
"index": 7296,
"step-1": "import numpy as np\nimport scipy\n\n\nclass node(object):\n\t\"\"\"docstring for node\"\"\"\n\tdef __init__(self, feature_idx):\n\t\tsuper(node, self).__init__()\n\t\tself.children = None\n\t\tself.j = feature_idx\n\t\tself.c = None\n\t\tself.vals = None\n\nclass decisionTree(object):\n\t\"\"\"docstring for decisionTree\"\"\"\n\tdef __init__(self, arg):\n\t\tsuper(decisionTree, self).__init__()\n\t\tself.arg = arg\n\n\tdef fit(self, X, y):\n\t\tpass\n\n\tdef predict(self, X):\n\t\tpass\n\t\t\n\ndef information_gain(x, y):\n\t\n\n\ndef entropy(x):\n\tpx = prob(x, return_counts=False)\n\treturn scipy.stats.entropy(px)\n\ndef prob(x, return_labels=True):\n\tx_unique, counts = np.unique(x, return_counts=True)\n\tpx = counts / x.shape[0]\n\tif return_labels: return px, x_unique\n\treturn px\n\ndef conditional_prob(x, y):\n\t\"\"\"\n\tcalculate p(x|y)\n\t\"\"\"\n\t\n\ndef conditional_entropy(x, y, px=None):\n\tpass",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#求11+12+13+。。。+m
m = int(input('请输入一个数:'))
S = m
for x in range(11,m):
S = S+x
print('sum =',S)
|
normal
|
{
"blob_id": "49ffa225d433ef2263159ba2145da5ba2a95d1f2",
"index": 4664,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor x in range(11, m):\n S = S + x\nprint('sum =', S)\n",
"step-3": "m = int(input('请输入一个数:'))\nS = m\nfor x in range(11, m):\n S = S + x\nprint('sum =', S)\n",
"step-4": "#求11+12+13+。。。+m\nm = int(input('请输入一个数:'))\nS = m\nfor x in range(11,m):\n S = S+x\nprint('sum =',S)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = patterns('', url('^douban/books$', views.BookList.as_view()))
<|reserved_special_token_1|>
from django.conf.urls import patterns, url
import views
urlpatterns = patterns('', url('^douban/books$', views.BookList.as_view()))
<|reserved_special_token_1|>
#coding: utf-8
from django.conf.urls import patterns, url
import views
urlpatterns = patterns('',
url(r'^douban/books$', views.BookList.as_view()),
)
|
flexible
|
{
"blob_id": "93418e554893db4eb888396e8d6f60a8364d9ee3",
"index": 8560,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = patterns('', url('^douban/books$', views.BookList.as_view()))\n",
"step-3": "from django.conf.urls import patterns, url\nimport views\nurlpatterns = patterns('', url('^douban/books$', views.BookList.as_view()))\n",
"step-4": "#coding: utf-8\n\nfrom django.conf.urls import patterns, url\n\nimport views\n\nurlpatterns = patterns('',\n url(r'^douban/books$', views.BookList.as_view()),\n)\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
"""
Created on Tue Mai 15 11:34:22 2018
@author: Diogo Leite
"""
from SQL_obj_new.Dataset_config_dataset_new_sql import _DS_config_DS_SQL
class Dataset_conf_ds(object):
"""
This class treat the datasets configuration connection tables object has it exists in DATASET_CONF_DS table database
NOTE: It consistes on a conection class (N to N) to know for each dataset with a given configuration
By default, all FK are in the lasts positions in the parameters declaration
"""
def __init__(self, id_ds_conf_ds = -1, value_configuration = -1, FK_id_configuration_DCT_DCD = -1, FK_id_dataset_DS_DCD = -1):
"""
Constructor of the DDI_interactionDB object. All the parameters have a default value
:param id_ds_conf_ds: id of the configurations dataset - -1 if unknown
:param value_configuration: value of the bins - -1 if unknown
:param FK_id_configuration_DCT_DCD: FK of the configurations (see table DATASET_CONFIGURATIONS_TYPES)- -1 if unknown
:param FK_id_dataset_DS_DCD: FK of the dataset (see table DATASETS)
:type id_ds_conf_ds: int - not required
:type value_configuration: int - not required
:type FK_id_configuration_DCT_DCD: text (date format) - required
:type FK_id_dataset_DS_DCD: int - required
"""
self.id_ds_conf_ds = id_ds_conf_ds
self.value_configuration = value_configuration
self.FK_id_configuration_DCT_DCD = FK_id_configuration_DCT_DCD
self.FK_id_dataset_DS_DCD = FK_id_dataset_DS_DCD
def get_all_datasets_conf_ds():
"""
return an array with all the configurations of datasets in the database
:return: array of datasets configurations
:rtype: array(DDI_interaction_DB)
"""
listOfDatasetDSConfig = []
sqlObj = _DS_config_DS_SQL()
results = sqlObj.select_all_DDI_DB()
for element in results:
listOfDatasetDSConfig.append(Dataset_conf_ds(element[0], element[1], element[2], element[3]))
return listOfDatasetDSConfig
def create_ds_config_ds(self):
"""
Insert a dataset configuration of Dataset in the database return it id
The ds_conf_ds contain:
- value of the creation
- FK of the configuration
- FK of the dataset
:return: id Dataset_conf_ds
:rtype int
"""
sqlObj = _DS_config_DS_SQL()
value_id_ds_conf_ds = sqlObj.insert_DS_conf_DS_return_id_if_not_exists(self.value_configuration, self.FK_id_configuration_DCT_DCD, self.FK_id_dataset_DS_DCD)
self.id_ds_conf_ds = value_id_ds_conf_ds
return value_id_ds_conf_ds
|
normal
|
{
"blob_id": "76d2c3f74e8fae160396b4015ccec478dba97b87",
"index": 7422,
"step-1": "<mask token>\n\n\nclass Dataset_conf_ds(object):\n <mask token>\n\n def __init__(self, id_ds_conf_ds=-1, value_configuration=-1,\n FK_id_configuration_DCT_DCD=-1, FK_id_dataset_DS_DCD=-1):\n \"\"\"\n Constructor of the DDI_interactionDB object. All the parameters have a default value\n\n :param id_ds_conf_ds: id of the configurations dataset - -1 if unknown\n :param value_configuration: value of the bins - -1 if unknown\n :param FK_id_configuration_DCT_DCD: FK of the configurations (see table DATASET_CONFIGURATIONS_TYPES)- -1 if unknown\n :param FK_id_dataset_DS_DCD: FK of the dataset (see table DATASETS)\n\n :type id_ds_conf_ds: int - not required\n :type value_configuration: int - not required\n :type FK_id_configuration_DCT_DCD: text (date format) - required \n :type FK_id_dataset_DS_DCD: int - required \n \"\"\"\n self.id_ds_conf_ds = id_ds_conf_ds\n self.value_configuration = value_configuration\n self.FK_id_configuration_DCT_DCD = FK_id_configuration_DCT_DCD\n self.FK_id_dataset_DS_DCD = FK_id_dataset_DS_DCD\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Dataset_conf_ds(object):\n <mask token>\n\n def __init__(self, id_ds_conf_ds=-1, value_configuration=-1,\n FK_id_configuration_DCT_DCD=-1, FK_id_dataset_DS_DCD=-1):\n \"\"\"\n Constructor of the DDI_interactionDB object. All the parameters have a default value\n\n :param id_ds_conf_ds: id of the configurations dataset - -1 if unknown\n :param value_configuration: value of the bins - -1 if unknown\n :param FK_id_configuration_DCT_DCD: FK of the configurations (see table DATASET_CONFIGURATIONS_TYPES)- -1 if unknown\n :param FK_id_dataset_DS_DCD: FK of the dataset (see table DATASETS)\n\n :type id_ds_conf_ds: int - not required\n :type value_configuration: int - not required\n :type FK_id_configuration_DCT_DCD: text (date format) - required \n :type FK_id_dataset_DS_DCD: int - required \n \"\"\"\n self.id_ds_conf_ds = id_ds_conf_ds\n self.value_configuration = value_configuration\n self.FK_id_configuration_DCT_DCD = FK_id_configuration_DCT_DCD\n self.FK_id_dataset_DS_DCD = FK_id_dataset_DS_DCD\n <mask token>\n\n def create_ds_config_ds(self):\n \"\"\"\n Insert a dataset configuration of Dataset in the database return it id\n The ds_conf_ds contain:\n - value of the creation\n - FK of the configuration\n - FK of the dataset\n\n :return: id Dataset_conf_ds\n :rtype int\n \"\"\"\n sqlObj = _DS_config_DS_SQL()\n value_id_ds_conf_ds = sqlObj.insert_DS_conf_DS_return_id_if_not_exists(\n self.value_configuration, self.FK_id_configuration_DCT_DCD,\n self.FK_id_dataset_DS_DCD)\n self.id_ds_conf_ds = value_id_ds_conf_ds\n return value_id_ds_conf_ds\n",
"step-3": "<mask token>\n\n\nclass Dataset_conf_ds(object):\n \"\"\"\n This class treat the datasets configuration connection tables object has it exists in DATASET_CONF_DS table database\n\n NOTE: It consistes on a conection class (N to N) to know for each dataset with a given configuration\n\n By default, all FK are in the lasts positions in the parameters declaration\n \"\"\"\n\n def __init__(self, id_ds_conf_ds=-1, value_configuration=-1,\n FK_id_configuration_DCT_DCD=-1, FK_id_dataset_DS_DCD=-1):\n \"\"\"\n Constructor of the DDI_interactionDB object. All the parameters have a default value\n\n :param id_ds_conf_ds: id of the configurations dataset - -1 if unknown\n :param value_configuration: value of the bins - -1 if unknown\n :param FK_id_configuration_DCT_DCD: FK of the configurations (see table DATASET_CONFIGURATIONS_TYPES)- -1 if unknown\n :param FK_id_dataset_DS_DCD: FK of the dataset (see table DATASETS)\n\n :type id_ds_conf_ds: int - not required\n :type value_configuration: int - not required\n :type FK_id_configuration_DCT_DCD: text (date format) - required \n :type FK_id_dataset_DS_DCD: int - required \n \"\"\"\n self.id_ds_conf_ds = id_ds_conf_ds\n self.value_configuration = value_configuration\n self.FK_id_configuration_DCT_DCD = FK_id_configuration_DCT_DCD\n self.FK_id_dataset_DS_DCD = FK_id_dataset_DS_DCD\n\n def get_all_datasets_conf_ds():\n \"\"\"\n return an array with all the configurations of datasets in the database\n\n :return: array of datasets configurations\n :rtype: array(DDI_interaction_DB)\n \"\"\"\n listOfDatasetDSConfig = []\n sqlObj = _DS_config_DS_SQL()\n results = sqlObj.select_all_DDI_DB()\n for element in results:\n listOfDatasetDSConfig.append(Dataset_conf_ds(element[0],\n element[1], element[2], element[3]))\n return listOfDatasetDSConfig\n\n def create_ds_config_ds(self):\n \"\"\"\n Insert a dataset configuration of Dataset in the database return it id\n The ds_conf_ds contain:\n - value of the creation\n - FK of the configuration\n - FK of the dataset\n\n :return: id Dataset_conf_ds\n :rtype int\n \"\"\"\n sqlObj = _DS_config_DS_SQL()\n value_id_ds_conf_ds = sqlObj.insert_DS_conf_DS_return_id_if_not_exists(\n self.value_configuration, self.FK_id_configuration_DCT_DCD,\n self.FK_id_dataset_DS_DCD)\n self.id_ds_conf_ds = value_id_ds_conf_ds\n return value_id_ds_conf_ds\n",
"step-4": "<mask token>\nfrom SQL_obj_new.Dataset_config_dataset_new_sql import _DS_config_DS_SQL\n\n\nclass Dataset_conf_ds(object):\n \"\"\"\n This class treat the datasets configuration connection tables object has it exists in DATASET_CONF_DS table database\n\n NOTE: It consistes on a conection class (N to N) to know for each dataset with a given configuration\n\n By default, all FK are in the lasts positions in the parameters declaration\n \"\"\"\n\n def __init__(self, id_ds_conf_ds=-1, value_configuration=-1,\n FK_id_configuration_DCT_DCD=-1, FK_id_dataset_DS_DCD=-1):\n \"\"\"\n Constructor of the DDI_interactionDB object. All the parameters have a default value\n\n :param id_ds_conf_ds: id of the configurations dataset - -1 if unknown\n :param value_configuration: value of the bins - -1 if unknown\n :param FK_id_configuration_DCT_DCD: FK of the configurations (see table DATASET_CONFIGURATIONS_TYPES)- -1 if unknown\n :param FK_id_dataset_DS_DCD: FK of the dataset (see table DATASETS)\n\n :type id_ds_conf_ds: int - not required\n :type value_configuration: int - not required\n :type FK_id_configuration_DCT_DCD: text (date format) - required \n :type FK_id_dataset_DS_DCD: int - required \n \"\"\"\n self.id_ds_conf_ds = id_ds_conf_ds\n self.value_configuration = value_configuration\n self.FK_id_configuration_DCT_DCD = FK_id_configuration_DCT_DCD\n self.FK_id_dataset_DS_DCD = FK_id_dataset_DS_DCD\n\n def get_all_datasets_conf_ds():\n \"\"\"\n return an array with all the configurations of datasets in the database\n\n :return: array of datasets configurations\n :rtype: array(DDI_interaction_DB)\n \"\"\"\n listOfDatasetDSConfig = []\n sqlObj = _DS_config_DS_SQL()\n results = sqlObj.select_all_DDI_DB()\n for element in results:\n listOfDatasetDSConfig.append(Dataset_conf_ds(element[0],\n element[1], element[2], element[3]))\n return listOfDatasetDSConfig\n\n def create_ds_config_ds(self):\n \"\"\"\n Insert a dataset configuration of Dataset in the database return it id\n The ds_conf_ds contain:\n - value of the creation\n - FK of the configuration\n - FK of the dataset\n\n :return: id Dataset_conf_ds\n :rtype int\n \"\"\"\n sqlObj = _DS_config_DS_SQL()\n value_id_ds_conf_ds = sqlObj.insert_DS_conf_DS_return_id_if_not_exists(\n self.value_configuration, self.FK_id_configuration_DCT_DCD,\n self.FK_id_dataset_DS_DCD)\n self.id_ds_conf_ds = value_id_ds_conf_ds\n return value_id_ds_conf_ds\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mai 15 11:34:22 2018\n\n@author: Diogo Leite\n\"\"\"\n\nfrom SQL_obj_new.Dataset_config_dataset_new_sql import _DS_config_DS_SQL\n\nclass Dataset_conf_ds(object):\n \"\"\"\n This class treat the datasets configuration connection tables object has it exists in DATASET_CONF_DS table database\n\n NOTE: It consistes on a conection class (N to N) to know for each dataset with a given configuration\n\n By default, all FK are in the lasts positions in the parameters declaration\n \"\"\" \n\n def __init__(self, id_ds_conf_ds = -1, value_configuration = -1, FK_id_configuration_DCT_DCD = -1, FK_id_dataset_DS_DCD = -1):\n \"\"\"\n Constructor of the DDI_interactionDB object. All the parameters have a default value\n\n :param id_ds_conf_ds: id of the configurations dataset - -1 if unknown\n :param value_configuration: value of the bins - -1 if unknown\n :param FK_id_configuration_DCT_DCD: FK of the configurations (see table DATASET_CONFIGURATIONS_TYPES)- -1 if unknown\n :param FK_id_dataset_DS_DCD: FK of the dataset (see table DATASETS)\n\n :type id_ds_conf_ds: int - not required\n :type value_configuration: int - not required\n :type FK_id_configuration_DCT_DCD: text (date format) - required \n :type FK_id_dataset_DS_DCD: int - required \n \"\"\"\n\n self.id_ds_conf_ds = id_ds_conf_ds\n self.value_configuration = value_configuration\n self.FK_id_configuration_DCT_DCD = FK_id_configuration_DCT_DCD\n self.FK_id_dataset_DS_DCD = FK_id_dataset_DS_DCD\n\n def get_all_datasets_conf_ds():\n \"\"\"\n return an array with all the configurations of datasets in the database\n\n :return: array of datasets configurations\n :rtype: array(DDI_interaction_DB)\n \"\"\"\n listOfDatasetDSConfig = []\n sqlObj = _DS_config_DS_SQL()\n results = sqlObj.select_all_DDI_DB()\n for element in results:\n listOfDatasetDSConfig.append(Dataset_conf_ds(element[0], element[1], element[2], element[3]))\n return listOfDatasetDSConfig\n\n def create_ds_config_ds(self):\n \"\"\"\n Insert a dataset configuration of Dataset in the database return it id\n The ds_conf_ds contain:\n - value of the creation\n - FK of the configuration\n - FK of the dataset\n\n :return: id Dataset_conf_ds\n :rtype int\n \"\"\"\n\n\n sqlObj = _DS_config_DS_SQL()\n value_id_ds_conf_ds = sqlObj.insert_DS_conf_DS_return_id_if_not_exists(self.value_configuration, self.FK_id_configuration_DCT_DCD, self.FK_id_dataset_DS_DCD)\n \n self.id_ds_conf_ds = value_id_ds_conf_ds\n return value_id_ds_conf_ds",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
#LIBRERIAS
import cv2
import numpy as np
#FUNCION: recibe una imagen y te devuelve las coordenadas de las caras
def face_detector(img, face_cascade, eye_cascade, face_f):
#variables face_f
xf = face_f[0]
yf = face_f[1]
wf = face_f[2]
hf = face_f[3]
#variables img
xi = 0
yi = 0
wi = img.shape[1]
hi = img.shape[0]
#apertura de face_f con relacion a la img
c = float(0.1) #esto es un 10 %
print("face_f: ", xf, xf + wf, yf, yf + hf)
#roi_i = img[yf: yf + hf, xf: xf + wf]
#cv2.imshow("roi_i", roi_i)
if xf != xi or yf != yi or wf != wi or hf != hi: #(tendre que ver si AND o OR)
#face_f no es igual a img, hace falta la apertura
y1 = yf - round(c * hf)
y2 = yf + hf + round(c * hf)
x1 = xf - round(c * wf)
x2 = xf + wf + round(c * wf)
roi_f = img[y1: y2, x1: x2]
print("Face apertura: ", x1, x2, y1, y2)
cv2.imshow('Face apertura',roi_f)
else:
#face_f es igual a img, no hace falta la apertura
roi_f = img[face_f[1] : face_f[1] + face_f[3], face_f[0] : face_f[0] + face_f[2]]
#cv2.imshow('roi_f',roi_f)
#paso el roi_f a gris para un mejor tratamiento
gray_img = cv2.cvtColor(roi_f,cv2.COLOR_BGR2GRAY)
cv2.imshow("gray_img",gray_img)
#aplicar el clasificador de caras sobre la imagen y guardo el resultado en faces: seran la x, y, height y width
faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.04, minNeighbors=5)
print("Faces: ", faces)
if type(faces) == np.ndarray:
flag = -1
for x,y,w,h in faces:
flag = flag + 1
#print("Face: ", x,y,w,h)
if w >= 100 and w <= 125 and h >= 100 and h <= 125:
print("Entro en el if de tamaño")
#Region Of Interest
print("Face: ", x,y,w,h)
roi_gray = gray_img[y:y+h, x:x+w]
cv2.imshow("roi_gray", roi_gray)
#aplico el clasificador de ojos sobre la imagen de interes que se supone que es una cara y guardo el resultado en eyes
eyes = eye_cascade.detectMultiScale(roi_gray)
c_eyes = 0
for ex,ey,ew,eh in eyes:
c_eyes = c_eyes + 1
if c_eyes >= 2: #si hay mínimo dos ojos (a veces la boca abierta la detecta como un tercer ojo), es una cara
print("faces[flag]", faces[flag])
return faces[flag]
|
normal
|
{
"blob_id": "1df3a5dc8ed767e20d34c2836eed79872a21a016",
"index": 9948,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef face_detector(img, face_cascade, eye_cascade, face_f):\n xf = face_f[0]\n yf = face_f[1]\n wf = face_f[2]\n hf = face_f[3]\n xi = 0\n yi = 0\n wi = img.shape[1]\n hi = img.shape[0]\n c = float(0.1)\n print('face_f: ', xf, xf + wf, yf, yf + hf)\n if xf != xi or yf != yi or wf != wi or hf != hi:\n y1 = yf - round(c * hf)\n y2 = yf + hf + round(c * hf)\n x1 = xf - round(c * wf)\n x2 = xf + wf + round(c * wf)\n roi_f = img[y1:y2, x1:x2]\n print('Face apertura: ', x1, x2, y1, y2)\n cv2.imshow('Face apertura', roi_f)\n else:\n roi_f = img[face_f[1]:face_f[1] + face_f[3], face_f[0]:face_f[0] +\n face_f[2]]\n gray_img = cv2.cvtColor(roi_f, cv2.COLOR_BGR2GRAY)\n cv2.imshow('gray_img', gray_img)\n faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.04,\n minNeighbors=5)\n print('Faces: ', faces)\n if type(faces) == np.ndarray:\n flag = -1\n for x, y, w, h in faces:\n flag = flag + 1\n if w >= 100 and w <= 125 and h >= 100 and h <= 125:\n print('Entro en el if de tamaño')\n print('Face: ', x, y, w, h)\n roi_gray = gray_img[y:y + h, x:x + w]\n cv2.imshow('roi_gray', roi_gray)\n eyes = eye_cascade.detectMultiScale(roi_gray)\n c_eyes = 0\n for ex, ey, ew, eh in eyes:\n c_eyes = c_eyes + 1\n if c_eyes >= 2:\n print('faces[flag]', faces[flag])\n return faces[flag]\n",
"step-3": "import cv2\nimport numpy as np\n\n\ndef face_detector(img, face_cascade, eye_cascade, face_f):\n xf = face_f[0]\n yf = face_f[1]\n wf = face_f[2]\n hf = face_f[3]\n xi = 0\n yi = 0\n wi = img.shape[1]\n hi = img.shape[0]\n c = float(0.1)\n print('face_f: ', xf, xf + wf, yf, yf + hf)\n if xf != xi or yf != yi or wf != wi or hf != hi:\n y1 = yf - round(c * hf)\n y2 = yf + hf + round(c * hf)\n x1 = xf - round(c * wf)\n x2 = xf + wf + round(c * wf)\n roi_f = img[y1:y2, x1:x2]\n print('Face apertura: ', x1, x2, y1, y2)\n cv2.imshow('Face apertura', roi_f)\n else:\n roi_f = img[face_f[1]:face_f[1] + face_f[3], face_f[0]:face_f[0] +\n face_f[2]]\n gray_img = cv2.cvtColor(roi_f, cv2.COLOR_BGR2GRAY)\n cv2.imshow('gray_img', gray_img)\n faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.04,\n minNeighbors=5)\n print('Faces: ', faces)\n if type(faces) == np.ndarray:\n flag = -1\n for x, y, w, h in faces:\n flag = flag + 1\n if w >= 100 and w <= 125 and h >= 100 and h <= 125:\n print('Entro en el if de tamaño')\n print('Face: ', x, y, w, h)\n roi_gray = gray_img[y:y + h, x:x + w]\n cv2.imshow('roi_gray', roi_gray)\n eyes = eye_cascade.detectMultiScale(roi_gray)\n c_eyes = 0\n for ex, ey, ew, eh in eyes:\n c_eyes = c_eyes + 1\n if c_eyes >= 2:\n print('faces[flag]', faces[flag])\n return faces[flag]\n",
"step-4": "#LIBRERIAS\nimport cv2\nimport numpy as np\n\n#FUNCION: recibe una imagen y te devuelve las coordenadas de las caras\ndef face_detector(img, face_cascade, eye_cascade, face_f): \n\n #variables face_f\n xf = face_f[0]\n yf = face_f[1]\n wf = face_f[2]\n hf = face_f[3]\n \n #variables img\n xi = 0\n yi = 0\n wi = img.shape[1]\n hi = img.shape[0]\n\n #apertura de face_f con relacion a la img\n c = float(0.1) #esto es un 10 %\n \n print(\"face_f: \", xf, xf + wf, yf, yf + hf)\n #roi_i = img[yf: yf + hf, xf: xf + wf]\n #cv2.imshow(\"roi_i\", roi_i)\n\n if xf != xi or yf != yi or wf != wi or hf != hi: #(tendre que ver si AND o OR)\n #face_f no es igual a img, hace falta la apertura\n \n y1 = yf - round(c * hf)\n y2 = yf + hf + round(c * hf)\n x1 = xf - round(c * wf)\n x2 = xf + wf + round(c * wf)\n\n roi_f = img[y1: y2, x1: x2]\n \n print(\"Face apertura: \", x1, x2, y1, y2)\n cv2.imshow('Face apertura',roi_f)\n\n else:\n\n #face_f es igual a img, no hace falta la apertura\n \n roi_f = img[face_f[1] : face_f[1] + face_f[3], face_f[0] : face_f[0] + face_f[2]]\n\n #cv2.imshow('roi_f',roi_f)\n\n\n\n #paso el roi_f a gris para un mejor tratamiento\n gray_img = cv2.cvtColor(roi_f,cv2.COLOR_BGR2GRAY)\n cv2.imshow(\"gray_img\",gray_img)\n \n #aplicar el clasificador de caras sobre la imagen y guardo el resultado en faces: seran la x, y, height y width\n faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.04, minNeighbors=5)\n print(\"Faces: \", faces)\n\n if type(faces) == np.ndarray:\n\n flag = -1\n\n for x,y,w,h in faces:\n\n flag = flag + 1\n\n #print(\"Face: \", x,y,w,h)\n \n if w >= 100 and w <= 125 and h >= 100 and h <= 125:\n print(\"Entro en el if de tamaño\")\n #Region Of Interest\n print(\"Face: \", x,y,w,h)\n roi_gray = gray_img[y:y+h, x:x+w]\n \n cv2.imshow(\"roi_gray\", roi_gray)\n\n #aplico el clasificador de ojos sobre la imagen de interes que se supone que es una cara y guardo el resultado en eyes\n eyes = eye_cascade.detectMultiScale(roi_gray)\n \n c_eyes = 0\n\n for ex,ey,ew,eh in eyes:\n \n c_eyes = c_eyes + 1\n\n if c_eyes >= 2: #si hay mínimo dos ojos (a veces la boca abierta la detecta como un tercer ojo), es una cara\n print(\"faces[flag]\", faces[flag])\n return faces[flag]\n \n \n \n \n ",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import requests
def get(url):
return requests.get(url).text
|
normal
|
{
"blob_id": "671ecf23df1da659d186014afa738d0608ad404d",
"index": 9251,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get(url):\n return requests.get(url).text\n",
"step-3": "import requests\n\n\ndef get(url):\n return requests.get(url).text\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/python2.7
import sys
import datetime
import psycopg2
import json
import collections
from pprint import pprint
from pyral import Rally, rallyWorkset
import copy
import os
import argparse
from ConfigParser import SafeConfigParser
import traceback
global rally
global server_name
"""
WARNING: This was hacked together and set up to do some quick and
dirty work. Please make no judgements on the quality of the
code.
"""
## TODO: Convert all the lookups to use the cached information
## DONE: Adjust the command line so the environment can be passed
## DONE: Import the data again, this time using an order field, then order the query
##
## Since we are creating all the objects, perhaps if we log them it will be faster. The information will be cached and we won't need to make additional queries
## This will save on server performance and make the script faster
story_project_ref = {}
story_ref = {}
testcase_ref = {}
defect_project_ref = {}
defect_ref = {}
portfolio_item_ref = {}
workspace_name = ""
user_names = {}
project_names = {}
debug = 1
# get the first instance of a user
## Get's a user ref for Rally
## First time, it will query the system and add it to the dictionary
## Subsequent calls will have cached user information, speeding up the system
def getUserRef(user_name):
global rally
global server_name
global debug
# If we need to work on another instance, say integration or partners, we need to change the email address of the users
if server_name == "integrations" or server_name == "partners":
user_name = user_name.replace("@acme.com", "@" + server_name + ".acme.com")
if debug:
print(user_names.items())
if user_name in user_names:
if debug:
print("Found %s" % user_name)
value = user_names[user_name]
else:
if debug:
print("Adding name %s " %user_name)
value = rally.getUserInfo(username=user_name).pop(0).ref
user_names[user_name] = value
return value
## Get's a project ref for Rally
## First time, it will query the system and add it to the dictionary
## Subsequent calls will have cached information, speeding up the system
def getProjectRef(project_name):
global rally
global project_names
if debug:
print("Items:\n")
print(project_names.items())
#let's build a list of project names and reference ids, so we don't have to query the system each time.
if project_name in project_names:
if debug:
print("Found %s" % project_name)
value = project_names[project_name]
else:
if debug:
print("Adding name %s " %project_name)
try:
value = rally.getProject(project_name).ref
project_names[project_name] = value
except Exception, details:
sys.stderr.write("ERROR: %s \n" % details)
sys.exit(1)
return value
def getIterationRef(piName):
global rally
if debug:
print "Getting Iteration"
collection = rally.get('Iteration')
assert collection.__class__.__name__ == 'RallyRESTResponse'
if not collection.errors:
for pe in collection:
name = '%s' % pe.Name
if debug:
print pe.Name
if(name == piName):
#print pe.oid, pe.Name
return pe.oid
def getPortfolioItemInitiativeRef(piName):
global rally
if debug:
print "Getting Initiative Ref"
collection = rally.getCollection("https://us1.rallydev.com/slm/webservice/v2.0/portfolioitem/initiative?")
assert collection.__class__.__name__ == 'RallyRESTResponse'
if not collection.errors:
for pe in collection:
name = '%s' % pe.Name
if(name == piName):
#print pe.oid, pe.Name
return pe.oid
def getPortfolioItemThemeRef(piName):
global rally
if debug:
print "Getting Theme Ref"
collection = rally.getCollection("https://us1.rallydev.com/slm/webservice/v2.0/portfolioitem/theme?")
assert collection.__class__.__name__ == 'RallyRESTResponse'
if not collection.errors:
for pe in collection:
name = '%s' % pe.Name
if(name == piName):
#print pe.oid, pe.Name
return pe.oid
def getPortfolioItemFeatureRef(piName):
global rally
if debug:
print "Getting Feature Ref"
collection = rally.getCollection("https://us1.rallydev.com/slm/webservice/v2.0/portfolioitem/feature?")
#pprint(collection)
assert collection.__class__.__name__ == 'RallyRESTResponse'
if not collection.errors:
for pe in collection:
name = '%s' % pe.Name
if debug:
print pe.Name
if(name == piName):
if debug:
print "Feature Found"
#print pe.oid, pe.Name
return pe.oid
def getPreliminaryEstimateRef(object_value):
global rally
if debug:
print "Getting Prelim Estiamte"
collection = rally.getCollection("https://us1.rallydev.com/slm/webservice/v2.0/preliminaryestimate?")
#pprint(collection)
assert collection.__class__.__name__ == 'RallyRESTResponse'
if not collection.errors:
for pe in collection:
name = '%s' % pe.Name
if(name == object_value):
#print pe.ref, pe.Name, pe.oid
return pe.ref
def getReleaseRef(object_value):
global rally
if debug:
print "Getting Release Data"
collection = rally.get('Release')
#pprint(collection)
assert collection.__class__.__name__ == 'RallyRESTResponse'
if not collection.errors:
for pe in collection:
name = '%s' % pe.Name
if(name == object_value):
#print pe.ref, pe.Name, pe.Project
return pe.ref
def getUserStoryRef(object_value):
global rally
if debug:
print "Getting User Story Data"
#print "Scope is : " + rally.getProject().Name
args = {"projectScopeDown" : "True", "Project" : "Online Store"}
collection = rally.get('UserStory', kwargs=args)
#pprint(collection)
assert collection.__class__.__name__ == 'RallyRESTResponse'
if not collection.errors:
for pe in collection:
name = '%s' % pe.Name
if debug:
print pe.Name, pe.Project
if(name == object_value):
#print pe.ref, pe.Name, pe.Project
return pe.ref
def getTestFolderRef(object_value):
global rally
debug = 0
if debug:
print "Getting TestFolder Data"
collection = rally.getCollection('https://us1.rallydev.com/slm/webservice/v2.0/testfolder?')
pprint(collection)
assert collection.__class__.__name__ == 'RallyRESTResponse'
if not collection.errors:
for pe in collection:
name = '%s' % pe.Name
if debug:
print "peName is %s" % pe.Name
if(name == object_value):
#print pe.ref, pe.Name, pe.Project
return pe.ref
def getTestCaseRef(object_value):
global rally
debug = 0
if debug:
print "Getting TestCase Data"
collection = rally.getCollection('https://us1.rallydev.com/slm/webservice/v2.0/testcase?')
#pprint(collection)
assert collection.__class__.__name__ == 'RallyRESTResponse'
if not collection.errors:
for pe in collection:
name = '%s' % pe.Name
if debug:
print pe.Name
if(name == object_value):
#print pe.ref, pe.Name,
return pe.ref
def myconverter(o):
if isinstance(o, datetime.datetime):
out = o.__str__()
out = out.replace(" ", "T")
return out
def db(database_name='rally_data'):
return psycopg2.connect("dbname=rally_data user=readonly password=readonly host=localhost")
#Get the database values and store them into dictionary.
def query_db(query, args=(), one=False):
cur = db().cursor()
cur.execute(query, args)
r = [dict((cur.description[i][0], value) \
for i, value in enumerate(row)) for row in cur.fetchall()]
cur.connection.close()
return (r[0] if r else None) if one else r
def getUserStoryRefByName(name):
debug = 0
if debug:
print "debug mode userstoryrefbyname"
print "searching for user story name %s" % name
ref = story_ref[name] if story_ref.get(name) else False
if ref == False:
pprint(story_ref)
sys.stderr.write("Error finding user story reference... something has gone wrong")
sys.exit(1)
if debug:
print "found user story ref %s" % ref
return ref
def getTestCaseRefByName(name):
debug = 0
if debug:
print "debug mode test case refbyname"
print "searching for user story name %s" % name
ref = testcase_ref[name] if testcase_ref.get(name) else False
if ref == False:
pprint(testcase_ref)
sys.stderr.write("Error finding test case reference... something has gone wrong")
sys.exit(1)
if debug:
print "found test case ref"
return ref
# When creating a task, the project needs to be filled in. We get that from the user story.
def getProjectRefFromUserStoryName(name):
debug = 0
if debug:
print "debug mode projectreffromstoryname"
print "searching for user story name %s" % name
ref = story_project_ref[name] if story_project_ref.get(name) else False
if ref == False: #Let's check the defect log before we error out
pprint(story_project_ref)
sys.stderr.write("Error finding project ref from user story reference... something has gone wrong")
sys.exit(1)
if debug:
print "found test case ref"
return ref
"""
This determines which fields need to be translated due to being a reference value.
If found, substitute the text with the real value
"""
def replace_values(val, object_type):
debug = 0
#print "replace values for "
#print val
#print " item type: " + object_type
if debug:
print val
if object_type == "Task":
if debug:
print "we need to get a project in the task object from the story object"
val['project'] = getProjectRefFromUserStoryName(val['workproduct'])
val['workproduct'] = getUserStoryRefByName(val['workproduct'])
#print "Workproduct %s project %s" % (val['workproduct'], val['project'])
if val.get("testcase"):
if debug:
print "checking for testcase"
val["testcase"] = getUserStoryRefByName(val["testcase"])
if val.get("testfolder"):
debug = 1
if debug:
print "checking for testfolder is %s" % val["testfolder"]
val["testfolder"] = getTestFolderRef(val["testfolder"])
#print "TestFolder is %s" % (val['testfolder'])
if val.get("requirement"):
if debug:
print "checking for requirement"
val["requirement"] = getUserStoryRefByName(val["requirement"])
if val.get("iteration"):
if debug:
print "checking for iteration"
val["iteration"] = getIterationRef(val["iteration"])
if val.get("portfolioitem"):
if debug:
print "checking for parent on user story"
if object_type == "Story":
val["portfolioitem"] = getPortfolioItemFeatureRef(val["portfolioitem"])
if val.get("release"):
if debug:
print "checking for release"
val["release"] = getReleaseRef(val["release"])
if val.get("project") and (object_type != "Task"):
if debug:
print "checking for project"
val["project"] = getProjectRef(val["project"])
if val.get("workproduct") and (object_type != "Task"): #Work product on non tasks can be a user story, so let's look that up.
if debug:
print "checking for workproduct"
val["workproduct"] = getUserStoryRefByName(val["workproduct"])
if val.get("owner"):
if debug:
print "getting user"
val["owner"] = getUserRef(val["owner"])
if val.get("preliminaryestimate"):
if debug:
print "getting prelim estimate"
val["preliminaryestimate"] = getPreliminaryEstimateRef(val["preliminaryestimate"])
if val.get("parent"): # Parent can mean different objects, depending on where it is referenced. So we determine the type and replace it.
if debug:
print "getting parent for " + object_type + "\n"
if object_type == "Initiative":
val["parent"] = getPortfolioItemThemeRef(val["parent"])
if object_type == "Feature":
val["parent"] = getPortfolioItemInitiativeRef(val["parent"])
if val.get("state") :
if debug:
print "getting state"
if object_type == "Initiative":
val["state"] = rally.getState('Initiative', val["state"]).ref
if object_type == "Feature":
val["state"] = rally.getState('Feature', val["state"]).ref
if object_type == "Theme":
val["state"] = rally.getState('Theme', val["state"]).ref
"""
We need to clear out none/nulls from the json output.
"""
def scrub(x):
# Converts None to empty string
ret = copy.deepcopy(x)
# Handle dictionaries, lits & tuples. Scrub all values
if isinstance(x, dict):
for k, v in ret.items():
ret[k] = scrub(v)
if isinstance(x, (list, tuple)):
for k, v in enumerate(ret):
ret[k] = scrub(v)
# Handle None
if x is None:
ret = ''
# Finished scrubbing
# Scrub out listing_order from dataset
return ret
"""
Empty keys cause a problem with Rally, so let's clean them out.
"""
def remove_empty_keys(x):
empty_keys = [k for k,v in x.iteritems() if not v]
for k in empty_keys:
del x[k]
#this was added in to make user story numbers (and other objects) import in a specific order.
if 'listing_order' in x:
del x['listing_order']
#this is to remove the dataset column from import.
if 'dataset' in x:
del x['dataset']
def main(args):
global rally
global server_name
global debug
login_name = ""
api_key = ""
#Parse Command line options
parser = argparse.ArgumentParser("create_data")
parser.add_argument("--server", "-s", "--server", required=True, help="Server options = sales, integrations or partner", type=str)
parser.add_argument("--workspace_name", "-n", "--name", required=True, help="Name of the workspace to update")
parser.add_argument("--dataset_name", "-d", "--dataset", required=True, help="Name of the dataset to load")
args = parser.parse_args()
workspace_name = args.workspace_name
server_name = args.server
dataset = args.dataset_name
config = SafeConfigParser()
config.read('config.ini')
if config.has_option(server_name,'server'):
rally_server = config.get(server_name,'server')
if config.has_option(server_name,'username'):
login_name = config.get(server_name,'username')
if config.has_option(server_name,'password'):
password = config.get(server_name,'password')
if config.has_option(server_name,'api_key'):
api_key = config.get(server_name,'api_key')
#print api_key + login_name + password + rally_server + server_name
#login_name = "thomas.mcquitty@acme.com"
if debug:
print "server name is %s" % args.server
print "workspace name is %s" % args.workspace_name
valid_servers = ["integrations", "sales", "partners"]
if server_name.lower() not in valid_servers:
print "You have selected an invalid server. Please use a valid option."
sys.exit(1)
try:
if api_key == "":
if debug:
print "Login/password connection"
rally = Rally(rally_server, login_name, password, workspace=workspace_name, project='Online Store')
if api_key != "":
if debug:
print "Api connection"
rally = Rally(rally_server, apikey=api_key, workspace=workspace_name, project='Online Store')
except Exception, details:
print traceback.print_exc()
print "details %s " % details
print ("Error logging in")
sys.exit(1)
if debug:
rally.enableLogging('output.log')
objects = ["Release", "Iteration", "Theme", "Initiative", "Feature", "Story", "Defect", "TestFolder", "TestSet", "TestCase", "TestCaseStep", "TestCaseResult", "Task", "FundingIncrement"]
for item_type in objects:
item_text = "%s" % item_type
print "Processing " + item_text + "..."
query_text = "select * from " + item_text + " where dataset = '" + dataset + "';"
my_query = query_db(query_text)
#process each item. We will have to do substitution for values that are references in the data, like projects and user names
for item in my_query:
item = scrub(item)
replace_values(item, item_text)
remove_empty_keys(item) #remove empty keys, they cause an issue when loading and Rally expects a value.
output = json.dumps(item, default = myconverter)
output = json.loads(output)
try:
if debug:
print output
print "creating object " + item_text + "\n\n"
record = rally.create(item_text, output)
#pprint(record)
debug = 0
#build array of stories, defects, testsets and test cases for quick reference
if (item_text == "Story") or (item_text == "Defect") or (item_text == "TestSet") or (item_text == "TestCase"):
if debug:
print "Debugging???"
print "Name %s story ref %s Project %s " % (record.Name, record.ref, record.Project.ref)
story_project_ref[record.Name] = record.Project.ref
story_ref[record.Name] = record.ref
# Build array of feature, themes and initiatives added to the workspace
if(item_text == "Theme") or (item_text == "Initiative") or (item_text == "Feature"):
if debug:
print "adding to theme list"
print "Name %s portfolio ref %s" % (record.Name, record.ref)
portfolio_item_ref[record.Name] = record.ref
except Exception, details:
sys.stderr.write("error creating %s\n\n" % item_text)
sys.stderr.write("ERROR: %s \n" % details)
sys.exit(1)
debug = 0
if __name__ == '__main__':
main(sys.argv[1:])
sys.exit(0)
|
normal
|
{
"blob_id": "be0afa5184f753ed5f9a483379a4d81cd7af4886",
"index": 6845,
"step-1": "#!/usr/bin/python2.7\nimport sys\nimport datetime\nimport psycopg2\nimport json\nimport collections\nfrom pprint import pprint\nfrom pyral import Rally, rallyWorkset\nimport copy\nimport os\nimport argparse\nfrom ConfigParser import SafeConfigParser\nimport traceback\nglobal rally\nglobal server_name\n\n\"\"\"\n\nWARNING: This was hacked together and set up to do some quick and \n dirty work. Please make no judgements on the quality of the \n code.\n\n\"\"\"\n\n## TODO: Convert all the lookups to use the cached information\n## DONE: Adjust the command line so the environment can be passed\n## DONE: Import the data again, this time using an order field, then order the query\n## \n\n## Since we are creating all the objects, perhaps if we log them it will be faster. The information will be cached and we won't need to make additional queries\n## This will save on server performance and make the script faster\n\nstory_project_ref = {}\nstory_ref = {}\ntestcase_ref = {}\ndefect_project_ref = {}\ndefect_ref = {}\nportfolio_item_ref = {}\nworkspace_name = \"\"\n\n\nuser_names = {}\nproject_names = {}\ndebug = 1\n\n# get the first instance of a user\n## Get's a user ref for Rally\n## First time, it will query the system and add it to the dictionary\n## Subsequent calls will have cached user information, speeding up the system\ndef getUserRef(user_name):\n global rally\n global server_name\n global debug\n\n # If we need to work on another instance, say integration or partners, we need to change the email address of the users\n if server_name == \"integrations\" or server_name == \"partners\":\n\tuser_name = user_name.replace(\"@acme.com\", \"@\" + server_name + \".acme.com\")\n\n if debug:\n print(user_names.items())\n \n if user_name in user_names:\n if debug:\n print(\"Found %s\" % user_name)\n value = user_names[user_name]\n else:\n if debug:\n print(\"Adding name %s \" %user_name)\n value = rally.getUserInfo(username=user_name).pop(0).ref\n user_names[user_name] = value\n \n return value\n\n## Get's a project ref for Rally\n## First time, it will query the system and add it to the dictionary\n## Subsequent calls will have cached information, speeding up the system\ndef getProjectRef(project_name):\n global rally\n global project_names\n\n if debug:\n print(\"Items:\\n\")\n print(project_names.items())\n\n #let's build a list of project names and reference ids, so we don't have to query the system each time.\n if project_name in project_names:\n if debug:\n print(\"Found %s\" % project_name)\n value = project_names[project_name]\n else:\n if debug:\n print(\"Adding name %s \" %project_name)\n try:\n\t\tvalue = rally.getProject(project_name).ref\n \tproject_names[project_name] = value\n\texcept Exception, details:\n\t sys.stderr.write(\"ERROR: %s \\n\" % details)\n \t\tsys.exit(1)\n\n return value\n\ndef getIterationRef(piName):\n global rally\n\n if debug:\n print \"Getting Iteration\" \n\n collection = rally.get('Iteration')\n assert collection.__class__.__name__ == 'RallyRESTResponse'\n if not collection.errors:\n for pe in collection:\n name = '%s' % pe.Name\n if debug:\n print pe.Name\n if(name == piName):\n #print pe.oid, pe.Name\n return pe.oid\n\ndef getPortfolioItemInitiativeRef(piName):\n global rally\n \n if debug:\n print \"Getting Initiative Ref\"\n\n collection = rally.getCollection(\"https://us1.rallydev.com/slm/webservice/v2.0/portfolioitem/initiative?\")\n assert collection.__class__.__name__ == 'RallyRESTResponse'\n if not collection.errors:\n for pe in collection:\n name = '%s' % pe.Name\n if(name == piName):\n #print pe.oid, pe.Name\n return pe.oid\n\ndef getPortfolioItemThemeRef(piName):\n global rally\n \n if debug:\n print \"Getting Theme Ref\"\n\n collection = rally.getCollection(\"https://us1.rallydev.com/slm/webservice/v2.0/portfolioitem/theme?\")\n assert collection.__class__.__name__ == 'RallyRESTResponse'\n if not collection.errors:\n for pe in collection:\n name = '%s' % pe.Name\n if(name == piName):\n #print pe.oid, pe.Name\n return pe.oid\n\ndef getPortfolioItemFeatureRef(piName):\n global rally\n \n if debug:\n print \"Getting Feature Ref\"\n\n collection = rally.getCollection(\"https://us1.rallydev.com/slm/webservice/v2.0/portfolioitem/feature?\")\n #pprint(collection)\n assert collection.__class__.__name__ == 'RallyRESTResponse'\n if not collection.errors:\n for pe in collection:\n name = '%s' % pe.Name\n if debug:\n print pe.Name\n if(name == piName):\n if debug:\n print \"Feature Found\"\n #print pe.oid, pe.Name\n return pe.oid\n\ndef getPreliminaryEstimateRef(object_value):\n global rally\n if debug:\n print \"Getting Prelim Estiamte\"\n\n collection = rally.getCollection(\"https://us1.rallydev.com/slm/webservice/v2.0/preliminaryestimate?\")\n #pprint(collection)\n assert collection.__class__.__name__ == 'RallyRESTResponse'\n if not collection.errors:\n for pe in collection:\n name = '%s' % pe.Name\n if(name == object_value):\n #print pe.ref, pe.Name, pe.oid\n return pe.ref\n\ndef getReleaseRef(object_value):\n global rally\n\n if debug:\n print \"Getting Release Data\"\n collection = rally.get('Release')\n #pprint(collection)\n assert collection.__class__.__name__ == 'RallyRESTResponse'\n if not collection.errors:\n for pe in collection:\n name = '%s' % pe.Name\n if(name == object_value):\n #print pe.ref, pe.Name, pe.Project\n return pe.ref\n\ndef getUserStoryRef(object_value):\n global rally\n if debug:\n print \"Getting User Story Data\"\n #print \"Scope is : \" + rally.getProject().Name\n args = {\"projectScopeDown\" : \"True\", \"Project\" : \"Online Store\"}\n collection = rally.get('UserStory', kwargs=args)\n #pprint(collection)\n assert collection.__class__.__name__ == 'RallyRESTResponse'\n if not collection.errors:\n for pe in collection:\n name = '%s' % pe.Name\n if debug:\n print pe.Name, pe.Project\n if(name == object_value):\n #print pe.ref, pe.Name, pe.Project\n return pe.ref\n\ndef getTestFolderRef(object_value):\n global rally\n debug = 0\n if debug:\n print \"Getting TestFolder Data\"\n collection = rally.getCollection('https://us1.rallydev.com/slm/webservice/v2.0/testfolder?')\n pprint(collection)\n assert collection.__class__.__name__ == 'RallyRESTResponse'\n if not collection.errors:\n for pe in collection:\n name = '%s' % pe.Name\n if debug:\n print \"peName is %s\" % pe.Name\n if(name == object_value):\n #print pe.ref, pe.Name, pe.Project\n return pe.ref\n\ndef getTestCaseRef(object_value):\n global rally\n debug = 0\n if debug:\n print \"Getting TestCase Data\"\n collection = rally.getCollection('https://us1.rallydev.com/slm/webservice/v2.0/testcase?')\n #pprint(collection)\n assert collection.__class__.__name__ == 'RallyRESTResponse'\n if not collection.errors:\n for pe in collection:\n name = '%s' % pe.Name\n if debug:\n print pe.Name\n if(name == object_value):\n #print pe.ref, pe.Name,\n return pe.ref\n\ndef myconverter(o):\n if isinstance(o, datetime.datetime):\n\tout = o.__str__()\n\tout = out.replace(\" \", \"T\")\n return out\n\ndef db(database_name='rally_data'):\n return psycopg2.connect(\"dbname=rally_data user=readonly password=readonly host=localhost\")\n\n#Get the database values and store them into dictionary.\ndef query_db(query, args=(), one=False):\n cur = db().cursor()\n cur.execute(query, args)\n r = [dict((cur.description[i][0], value) \\\n for i, value in enumerate(row)) for row in cur.fetchall()]\n cur.connection.close()\n return (r[0] if r else None) if one else r\n\ndef getUserStoryRefByName(name):\n\tdebug = 0\n\tif debug:\n\t\tprint \"debug mode userstoryrefbyname\"\n print \"searching for user story name %s\" % name\n\tref = story_ref[name] if story_ref.get(name) else False\n\tif ref == False:\n\t\tpprint(story_ref)\n\t\tsys.stderr.write(\"Error finding user story reference... something has gone wrong\")\n\t\tsys.exit(1)\n\tif debug:\n\t\tprint \"found user story ref %s\" % ref\n\treturn ref\n\ndef getTestCaseRefByName(name):\n debug = 0\n\tif debug:\n print \"debug mode test case refbyname\"\n print \"searching for user story name %s\" % name\n ref = testcase_ref[name] if testcase_ref.get(name) else False\n if ref == False:\n pprint(testcase_ref)\n sys.stderr.write(\"Error finding test case reference... something has gone wrong\")\n sys.exit(1)\n if debug:\n print \"found test case ref\"\n return ref\n\n# When creating a task, the project needs to be filled in. We get that from the user story.\ndef getProjectRefFromUserStoryName(name):\n debug = 0\n\tif debug:\n print \"debug mode projectreffromstoryname\"\n\t\tprint \"searching for user story name %s\" % name\n ref = story_project_ref[name] if story_project_ref.get(name) else False\n if ref == False: #Let's check the defect log before we error out\n\t pprint(story_project_ref)\n\t sys.stderr.write(\"Error finding project ref from user story reference... something has gone wrong\")\n\t sys.exit(1)\n if debug:\n print \"found test case ref\"\n return ref\n\n\"\"\"\nThis determines which fields need to be translated due to being a reference value. \nIf found, substitute the text with the real value\n\"\"\"\ndef replace_values(val, object_type):\n\tdebug = 0\n\t#print \"replace values for \"\n\t#print val\n\t#print \" item type: \" + object_type\n\tif debug:\n\t\tprint val\n\n\tif object_type == \"Task\":\n\t\tif debug:\n\t\t\tprint \"we need to get a project in the task object from the story object\"\n val['project'] = getProjectRefFromUserStoryName(val['workproduct'])\n\t\tval['workproduct'] = getUserStoryRefByName(val['workproduct'])\n\t\t#print \"Workproduct %s project %s\" % (val['workproduct'], val['project'])\n\tif val.get(\"testcase\"):\n\t\tif debug:\n\t\t\tprint \"checking for testcase\"\n\t\tval[\"testcase\"] = getUserStoryRefByName(val[\"testcase\"])\n if val.get(\"testfolder\"):\n debug = 1\n if debug:\n print \"checking for testfolder is %s\" % val[\"testfolder\"]\n val[\"testfolder\"] = getTestFolderRef(val[\"testfolder\"])\n #print \"TestFolder is %s\" % (val['testfolder'])\n\n if val.get(\"requirement\"):\n if debug:\n print \"checking for requirement\"\n val[\"requirement\"] = getUserStoryRefByName(val[\"requirement\"])\n if val.get(\"iteration\"):\n if debug:\n print \"checking for iteration\"\n val[\"iteration\"] = getIterationRef(val[\"iteration\"])\n\tif val.get(\"portfolioitem\"):\n\t\tif debug:\n\t\t\tprint \"checking for parent on user story\"\n\t\tif object_type == \"Story\":\n\t\t\tval[\"portfolioitem\"] = getPortfolioItemFeatureRef(val[\"portfolioitem\"])\n if val.get(\"release\"):\n if debug:\n print \"checking for release\"\n val[\"release\"] = getReleaseRef(val[\"release\"])\n\tif val.get(\"project\") and (object_type != \"Task\"):\n\t\tif debug:\n print \"checking for project\"\n val[\"project\"] = getProjectRef(val[\"project\"])\n\tif val.get(\"workproduct\") and (object_type != \"Task\"):\t\t#Work product on non tasks can be a user story, so let's look that up.\n\t\tif debug:\n print \"checking for workproduct\"\n val[\"workproduct\"] = getUserStoryRefByName(val[\"workproduct\"]) \n\tif val.get(\"owner\"):\n\t\tif debug:\n\t\t\tprint \"getting user\"\n\t \tval[\"owner\"] = getUserRef(val[\"owner\"])\n if val.get(\"preliminaryestimate\"):\n if debug:\n print \"getting prelim estimate\"\n val[\"preliminaryestimate\"] = getPreliminaryEstimateRef(val[\"preliminaryestimate\"])\n if val.get(\"parent\"):\t\t# Parent can mean different objects, depending on where it is referenced. So we determine the type and replace it.\n if debug:\n print \"getting parent for \" + object_type + \"\\n\"\n\t\tif object_type == \"Initiative\":\n\t\t\tval[\"parent\"] = getPortfolioItemThemeRef(val[\"parent\"]) \n\t\tif object_type == \"Feature\":\n\t\t\tval[\"parent\"] = getPortfolioItemInitiativeRef(val[\"parent\"]) \n if val.get(\"state\") :\n if debug:\n print \"getting state\"\n\t\tif object_type == \"Initiative\":\n\t\t\tval[\"state\"] = rally.getState('Initiative', val[\"state\"]).ref\n\t\tif object_type == \"Feature\":\n\t\t\tval[\"state\"] = rally.getState('Feature', val[\"state\"]).ref\n if object_type == \"Theme\":\n val[\"state\"] = rally.getState('Theme', val[\"state\"]).ref\n\n\n\"\"\"\nWe need to clear out none/nulls from the json output.\n\"\"\"\ndef scrub(x):\n # Converts None to empty string\n ret = copy.deepcopy(x)\n # Handle dictionaries, lits & tuples. Scrub all values\n if isinstance(x, dict):\n for k, v in ret.items():\n ret[k] = scrub(v)\n if isinstance(x, (list, tuple)):\n for k, v in enumerate(ret):\n ret[k] = scrub(v)\n # Handle None\n if x is None:\n ret = ''\n # Finished scrubbing\n # Scrub out listing_order from dataset\n return ret\n\n\"\"\"\nEmpty keys cause a problem with Rally, so let's clean them out.\n\"\"\"\ndef remove_empty_keys(x):\n\tempty_keys = [k for k,v in x.iteritems() if not v]\n\tfor k in empty_keys:\n \t\tdel x[k]\n\t#this was added in to make user story numbers (and other objects) import in a specific order.\n\tif 'listing_order' in x:\n\t\tdel x['listing_order']\n\t#this is to remove the dataset column from import. \n\tif 'dataset' in x:\n\t\tdel x['dataset']\n\ndef main(args):\n\tglobal rally\n\tglobal server_name\n\tglobal debug\n\tlogin_name \t= \"\"\n\tapi_key \t= \"\"\t\n\t#Parse Command line options\n parser = argparse.ArgumentParser(\"create_data\")\n parser.add_argument(\"--server\", \"-s\", \"--server\", required=True, help=\"Server options = sales, integrations or partner\", type=str)\n parser.add_argument(\"--workspace_name\", \"-n\", \"--name\", required=True, help=\"Name of the workspace to update\")\n parser.add_argument(\"--dataset_name\", \"-d\", \"--dataset\", required=True, help=\"Name of the dataset to load\")\n args = parser.parse_args()\n workspace_name = args.workspace_name\n server_name = args.server\n\tdataset = args.dataset_name\n\n\n\tconfig = SafeConfigParser()\n\tconfig.read('config.ini')\n\tif config.has_option(server_name,'server'):\n\t\trally_server \t= config.get(server_name,'server')\n\tif config.has_option(server_name,'username'):\n\t\tlogin_name \t= config.get(server_name,'username')\n\tif config.has_option(server_name,'password'):\n\t\tpassword\t= config.get(server_name,'password')\n\tif config.has_option(server_name,'api_key'):\n\t\tapi_key\t\t= config.get(server_name,'api_key')\n\t\n\t\n\t#print api_key + login_name + password + rally_server + server_name\n\t#login_name = \"thomas.mcquitty@acme.com\"\n\n if debug:\n\t\tprint \"server name is %s\" % args.server\n \tprint \"workspace name is %s\" % args.workspace_name\n\n valid_servers = [\"integrations\", \"sales\", \"partners\"]\n\tif server_name.lower() not in valid_servers:\n\t\tprint \"You have selected an invalid server. Please use a valid option.\"\n\t\tsys.exit(1)\n\n\ttry:\n\t\tif api_key == \"\":\n\t\t\tif debug:\n\t\t\t\tprint \"Login/password connection\"\n\t\t\trally = Rally(rally_server, login_name, password, workspace=workspace_name, project='Online Store')\n\t\tif api_key != \"\":\n\t\t\tif debug:\n\t\t\t\tprint \"Api connection\"\n\t\t\trally = Rally(rally_server, apikey=api_key, workspace=workspace_name, project='Online Store')\n except Exception, details:\n\t\tprint traceback.print_exc()\n\t\tprint \"details %s \" % details\n print (\"Error logging in\")\n sys.exit(1)\n\n\tif debug:\n\t\trally.enableLogging('output.log')\n\n\tobjects = [\"Release\", \"Iteration\", \"Theme\", \"Initiative\", \"Feature\", \"Story\", \"Defect\", \"TestFolder\", \"TestSet\", \"TestCase\", \"TestCaseStep\", \"TestCaseResult\", \"Task\", \"FundingIncrement\"]\n\n\tfor item_type in objects:\n\t\titem_text = \"%s\" % item_type\n\t\tprint \"Processing \" + item_text + \"...\"\n\t\tquery_text = \"select * from \" + item_text + \" where dataset = '\" + dataset + \"';\"\n\t\tmy_query = query_db(query_text)\n\t\t#process each item. We will have to do substitution for values that are references in the data, like projects and user names\n\t\tfor item in my_query:\n\t\t\titem = scrub(item)\n\t\t\treplace_values(item, item_text)\t\n\t\t\tremove_empty_keys(item) \t#remove empty keys, they cause an issue when loading and Rally expects a value.\n\t\t\toutput = json.dumps(item, default = myconverter)\n\t\t\toutput = json.loads(output)\n\t\t\ttry:\n\t\t\t\tif debug:\n\t\t\t\t\tprint output\n\t\t\t\t\tprint \"creating object \" + item_text + \"\\n\\n\"\n\t\t\t\trecord = rally.create(item_text, output)\n\t\t\t\t#pprint(record)\n\t\t\t\tdebug = 0\n\t\t\t\t#build array of stories, defects, testsets and test cases for quick reference\n\t\t\t\tif (item_text == \"Story\") or (item_text == \"Defect\") or (item_text == \"TestSet\") or (item_text == \"TestCase\"):\n\t\t\t\t\tif debug:\n\t\t\t\t\t\tprint \"Debugging???\"\n\t\t\t\t\t\tprint \"Name %s story ref %s Project %s \" % (record.Name, record.ref, record.Project.ref)\n\t\t\t\t\tstory_project_ref[record.Name] = record.Project.ref\n\t\t\t\t\tstory_ref[record.Name] = record.ref\n\t\t\t\t# Build array of feature, themes and initiatives added to the workspace\n\t\t\t\tif(item_text == \"Theme\") or (item_text == \"Initiative\") or (item_text == \"Feature\"):\n\t\t\t\t\tif debug:\n\t\t\t\t\t\tprint \"adding to theme list\"\n\t\t\t\t\t\tprint \"Name %s portfolio ref %s\" % (record.Name, record.ref)\n\t\t\t\t\tportfolio_item_ref[record.Name] = record.ref\n\t\t\texcept Exception, details:\n\t\t\t\tsys.stderr.write(\"error creating %s\\n\\n\" % item_text)\n\t\t\t\tsys.stderr.write(\"ERROR: %s \\n\" % details)\n\t\t\t\tsys.exit(1)\n\t\t\tdebug = 0\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n sys.exit(0)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/python
import sys
import itertools as it
pop_list = []
#with open("/Users/dashazhernakova/Documents/Doby/GenomeRussia/ancientDNA/GR+Lazaridis.ind") as f:
with open(sys.argv[1]) as f:
[pop_list.append(l.strip().split("\t")[2]) for l in f if l.strip().split("\t")[2] not in pop_list]
triplets = it.combinations(pop_list, 3)
for a,b,c in triplets:
print a + "\t" + b + "\t" + c + "\tMbuti.DG"
|
normal
|
{
"blob_id": "de7cd231aceb2700acb3ecafe36d1ba1f5c1643b",
"index": 6191,
"step-1": "#!/usr/bin/python\nimport sys\nimport itertools as it\n\npop_list = []\n\n#with open(\"/Users/dashazhernakova/Documents/Doby/GenomeRussia/ancientDNA/GR+Lazaridis.ind\") as f:\nwith open(sys.argv[1]) as f:\n\t[pop_list.append(l.strip().split(\"\\t\")[2]) for l in f if l.strip().split(\"\\t\")[2] not in pop_list]\n\ntriplets = it.combinations(pop_list, 3)\nfor a,b,c in triplets:\n\tprint a + \"\\t\" + b + \"\\t\" + c + \"\\tMbuti.DG\"\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from flask import Flask, request, render_template, redirect
from stories import Story, stories
# from flask_debugtoolbar import DebugToolbarExtension
app = Flask(__name__)
# app.config['SECRET_KEY'] = "secret"
# debug = DebugToolbarExtension(app)
# my original approach involved using a global story variable to store the instances which were in this file
# After looking at the answer code, storing this data in the instance maskes more sense
# story_global = None
@app.route('/')
def home_page():
"""Offer user choice of Madlib Games"""
return render_template('index.html', stories=stories.values())
@app.route('/form')
def show_form():
"""Show Form for User Input"""
story_title = request.args["madlib"]
for story in stories.values():
if story.title == story_title:
story_for_form = story
return render_template('form.html', s=story_for_form, story_title=story_title)
@app.route("/story")
def show_story():
"""Display Madlib Story"""
answers = request.args
story_title = request.args["story_title"]
for story in stories.values():
if story.title == story_title:
story_to_gen = story
return render_template("story.html", story_to_gen=story_to_gen, user_answers=answers)
@app.route('/play-again')
def play_again():
"""Redirect Home"""
return redirect('/')
|
normal
|
{
"blob_id": "08ed57ffb7a83973059d62f686f77b1bea136fbd",
"index": 3828,
"step-1": "<mask token>\n\n\n@app.route('/')\ndef home_page():\n \"\"\"Offer user choice of Madlib Games\"\"\"\n return render_template('index.html', stories=stories.values())\n\n\n<mask token>\n\n\n@app.route('/story')\ndef show_story():\n \"\"\"Display Madlib Story\"\"\"\n answers = request.args\n story_title = request.args['story_title']\n for story in stories.values():\n if story.title == story_title:\n story_to_gen = story\n return render_template('story.html', story_to_gen=story_to_gen,\n user_answers=answers)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef home_page():\n \"\"\"Offer user choice of Madlib Games\"\"\"\n return render_template('index.html', stories=stories.values())\n\n\n@app.route('/form')\ndef show_form():\n \"\"\"Show Form for User Input\"\"\"\n story_title = request.args['madlib']\n for story in stories.values():\n if story.title == story_title:\n story_for_form = story\n return render_template('form.html', s=story_for_form, story_title=\n story_title)\n\n\n@app.route('/story')\ndef show_story():\n \"\"\"Display Madlib Story\"\"\"\n answers = request.args\n story_title = request.args['story_title']\n for story in stories.values():\n if story.title == story_title:\n story_to_gen = story\n return render_template('story.html', story_to_gen=story_to_gen,\n user_answers=answers)\n\n\n@app.route('/play-again')\ndef play_again():\n \"\"\"Redirect Home\"\"\"\n return redirect('/')\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/')\ndef home_page():\n \"\"\"Offer user choice of Madlib Games\"\"\"\n return render_template('index.html', stories=stories.values())\n\n\n@app.route('/form')\ndef show_form():\n \"\"\"Show Form for User Input\"\"\"\n story_title = request.args['madlib']\n for story in stories.values():\n if story.title == story_title:\n story_for_form = story\n return render_template('form.html', s=story_for_form, story_title=\n story_title)\n\n\n@app.route('/story')\ndef show_story():\n \"\"\"Display Madlib Story\"\"\"\n answers = request.args\n story_title = request.args['story_title']\n for story in stories.values():\n if story.title == story_title:\n story_to_gen = story\n return render_template('story.html', story_to_gen=story_to_gen,\n user_answers=answers)\n\n\n@app.route('/play-again')\ndef play_again():\n \"\"\"Redirect Home\"\"\"\n return redirect('/')\n",
"step-4": "from flask import Flask, request, render_template, redirect\nfrom stories import Story, stories\napp = Flask(__name__)\n\n\n@app.route('/')\ndef home_page():\n \"\"\"Offer user choice of Madlib Games\"\"\"\n return render_template('index.html', stories=stories.values())\n\n\n@app.route('/form')\ndef show_form():\n \"\"\"Show Form for User Input\"\"\"\n story_title = request.args['madlib']\n for story in stories.values():\n if story.title == story_title:\n story_for_form = story\n return render_template('form.html', s=story_for_form, story_title=\n story_title)\n\n\n@app.route('/story')\ndef show_story():\n \"\"\"Display Madlib Story\"\"\"\n answers = request.args\n story_title = request.args['story_title']\n for story in stories.values():\n if story.title == story_title:\n story_to_gen = story\n return render_template('story.html', story_to_gen=story_to_gen,\n user_answers=answers)\n\n\n@app.route('/play-again')\ndef play_again():\n \"\"\"Redirect Home\"\"\"\n return redirect('/')\n",
"step-5": "from flask import Flask, request, render_template, redirect\nfrom stories import Story, stories\n# from flask_debugtoolbar import DebugToolbarExtension\n\napp = Flask(__name__)\t\n# app.config['SECRET_KEY'] = \"secret\"\n\n# debug = DebugToolbarExtension(app)\n\n\n# my original approach involved using a global story variable to store the instances which were in this file\n# After looking at the answer code, storing this data in the instance maskes more sense\n# story_global = None\n\n@app.route('/')\ndef home_page():\n \"\"\"Offer user choice of Madlib Games\"\"\"\n\n return render_template('index.html', stories=stories.values())\n\n\n@app.route('/form')\ndef show_form():\n \"\"\"Show Form for User Input\"\"\"\n\n story_title = request.args[\"madlib\"]\n for story in stories.values():\n if story.title == story_title:\n story_for_form = story\n \n return render_template('form.html', s=story_for_form, story_title=story_title)\n\n\n@app.route(\"/story\")\ndef show_story():\n \"\"\"Display Madlib Story\"\"\"\n\n answers = request.args\n story_title = request.args[\"story_title\"]\n for story in stories.values():\n if story.title == story_title:\n story_to_gen = story\n \n return render_template(\"story.html\", story_to_gen=story_to_gen, user_answers=answers)\n\n\n@app.route('/play-again')\ndef play_again():\n \"\"\"Redirect Home\"\"\"\n\n return redirect('/')\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cv.imshow('Threshold Trunc', th3)
cv.imshow('Threshold2', th2)
cv.imshow('Threshold', th1)
cv.imshow('Image', img)
cv.imshow('th4', th4)
cv.imshow('th5', th5)
cv.waitKey(0)
cv.destroyAllWindows()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
img = cv.imread('images/gradient.png', 0)
_, th1 = cv.threshold(img, 127, 255, cv.THRESH_BINARY)
_, th2 = cv.threshold(img, 127, 255, cv.THRESH_BINARY_INV)
_, th3 = cv.threshold(img, 127, 255, cv.THRESH_TRUNC)
_, th4 = cv.threshold(img, 127, 255, cv.THRESH_TOZERO)
_, th5 = cv.threshold(img, 127, 255, cv.THRESH_TOZERO_INV)
cv.imshow('Threshold Trunc', th3)
cv.imshow('Threshold2', th2)
cv.imshow('Threshold', th1)
cv.imshow('Image', img)
cv.imshow('th4', th4)
cv.imshow('th5', th5)
cv.waitKey(0)
cv.destroyAllWindows()
<|reserved_special_token_1|>
import cv2 as cv
img = cv.imread('images/gradient.png', 0)
_, th1 = cv.threshold(img, 127, 255, cv.THRESH_BINARY)
_, th2 = cv.threshold(img, 127, 255, cv.THRESH_BINARY_INV)
_, th3 = cv.threshold(img, 127, 255, cv.THRESH_TRUNC)
_, th4 = cv.threshold(img, 127, 255, cv.THRESH_TOZERO)
_, th5 = cv.threshold(img, 127, 255, cv.THRESH_TOZERO_INV)
cv.imshow('Threshold Trunc', th3)
cv.imshow('Threshold2', th2)
cv.imshow('Threshold', th1)
cv.imshow('Image', img)
cv.imshow('th4', th4)
cv.imshow('th5', th5)
cv.waitKey(0)
cv.destroyAllWindows()
<|reserved_special_token_1|>
import cv2 as cv
img = cv.imread('images/gradient.png', 0)
_,th1 = cv.threshold(img, 127,255, cv.THRESH_BINARY)
_,th2 = cv.threshold(img, 127, 255, cv.THRESH_BINARY_INV)
_,th3 = cv.threshold(img, 127, 255, cv.THRESH_TRUNC) #freeze the pixel color after the threshold
_,th4 = cv.threshold(img, 127, 255, cv.THRESH_TOZERO) #less to threshold will be zero
_,th5 = cv.threshold(img, 127, 255, cv.THRESH_TOZERO_INV) #if the value of the pixel is greater than threshold it will be zero
cv.imshow("Threshold Trunc", th3)
cv.imshow("Threshold2", th2)
cv.imshow("Threshold", th1)
cv.imshow("Image",img)
cv.imshow("th4", th4)
cv.imshow("th5", th5)
cv.waitKey(0)
cv.destroyAllWindows()
|
flexible
|
{
"blob_id": "6f356840944e11f52a280262697d7e33b3cca650",
"index": 2319,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncv.imshow('Threshold Trunc', th3)\ncv.imshow('Threshold2', th2)\ncv.imshow('Threshold', th1)\ncv.imshow('Image', img)\ncv.imshow('th4', th4)\ncv.imshow('th5', th5)\ncv.waitKey(0)\ncv.destroyAllWindows()\n",
"step-3": "<mask token>\nimg = cv.imread('images/gradient.png', 0)\n_, th1 = cv.threshold(img, 127, 255, cv.THRESH_BINARY)\n_, th2 = cv.threshold(img, 127, 255, cv.THRESH_BINARY_INV)\n_, th3 = cv.threshold(img, 127, 255, cv.THRESH_TRUNC)\n_, th4 = cv.threshold(img, 127, 255, cv.THRESH_TOZERO)\n_, th5 = cv.threshold(img, 127, 255, cv.THRESH_TOZERO_INV)\ncv.imshow('Threshold Trunc', th3)\ncv.imshow('Threshold2', th2)\ncv.imshow('Threshold', th1)\ncv.imshow('Image', img)\ncv.imshow('th4', th4)\ncv.imshow('th5', th5)\ncv.waitKey(0)\ncv.destroyAllWindows()\n",
"step-4": "import cv2 as cv\nimg = cv.imread('images/gradient.png', 0)\n_, th1 = cv.threshold(img, 127, 255, cv.THRESH_BINARY)\n_, th2 = cv.threshold(img, 127, 255, cv.THRESH_BINARY_INV)\n_, th3 = cv.threshold(img, 127, 255, cv.THRESH_TRUNC)\n_, th4 = cv.threshold(img, 127, 255, cv.THRESH_TOZERO)\n_, th5 = cv.threshold(img, 127, 255, cv.THRESH_TOZERO_INV)\ncv.imshow('Threshold Trunc', th3)\ncv.imshow('Threshold2', th2)\ncv.imshow('Threshold', th1)\ncv.imshow('Image', img)\ncv.imshow('th4', th4)\ncv.imshow('th5', th5)\ncv.waitKey(0)\ncv.destroyAllWindows()\n",
"step-5": "import cv2 as cv\n\nimg = cv.imread('images/gradient.png', 0)\n_,th1 = cv.threshold(img, 127,255, cv.THRESH_BINARY)\n_,th2 = cv.threshold(img, 127, 255, cv.THRESH_BINARY_INV)\n_,th3 = cv.threshold(img, 127, 255, cv.THRESH_TRUNC) #freeze the pixel color after the threshold\n_,th4 = cv.threshold(img, 127, 255, cv.THRESH_TOZERO) #less to threshold will be zero\n_,th5 = cv.threshold(img, 127, 255, cv.THRESH_TOZERO_INV) #if the value of the pixel is greater than threshold it will be zero\n\ncv.imshow(\"Threshold Trunc\", th3)\ncv.imshow(\"Threshold2\", th2)\ncv.imshow(\"Threshold\", th1)\ncv.imshow(\"Image\",img)\ncv.imshow(\"th4\", th4)\ncv.imshow(\"th5\", th5)\n\ncv.waitKey(0)\ncv.destroyAllWindows()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class BannerAdmin(object):
list_display = 'title', 'url', 'index'
class BaseSetting(object):
enable_themes = True
user_bootswatch = True
class GlobalSetting(object):
site_title = '西游记'
site_footer = '咨询在线'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EmailVerifyRecordAdmin(object):
pass
class BannerAdmin(object):
list_display = 'title', 'url', 'index'
class BaseSetting(object):
enable_themes = True
user_bootswatch = True
class GlobalSetting(object):
site_title = '西游记'
site_footer = '咨询在线'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EmailVerifyRecordAdmin(object):
pass
class BannerAdmin(object):
list_display = 'title', 'url', 'index'
class BaseSetting(object):
enable_themes = True
user_bootswatch = True
class GlobalSetting(object):
site_title = '西游记'
site_footer = '咨询在线'
xadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)
xadmin.site.register(Banner, BannerAdmin)
xadmin.site.register(views.BaseAdminView, BaseSetting)
xadmin.site.register(views.CommAdminView, GlobalSetting)
<|reserved_special_token_1|>
import xadmin
from .models import EmailVerifyRecord, Banner
from xadmin import views
class EmailVerifyRecordAdmin(object):
pass
class BannerAdmin(object):
list_display = 'title', 'url', 'index'
class BaseSetting(object):
enable_themes = True
user_bootswatch = True
class GlobalSetting(object):
site_title = '西游记'
site_footer = '咨询在线'
xadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)
xadmin.site.register(Banner, BannerAdmin)
xadmin.site.register(views.BaseAdminView, BaseSetting)
xadmin.site.register(views.CommAdminView, GlobalSetting)
<|reserved_special_token_1|>
import xadmin
from .models import EmailVerifyRecord,Banner
from xadmin import views
class EmailVerifyRecordAdmin(object):
pass
class BannerAdmin(object):
list_display=('title','url','index')
class BaseSetting(object):
enable_themes=True
user_bootswatch=True
#设置xadmin页面标题和页脚
class GlobalSetting(object):
site_title='西游记'
site_footer='咨询在线'
xadmin.site.register(EmailVerifyRecord,EmailVerifyRecordAdmin)
xadmin.site.register(Banner,BannerAdmin)
xadmin.site.register(views.BaseAdminView,BaseSetting)
xadmin.site.register(views.CommAdminView,GlobalSetting)
|
flexible
|
{
"blob_id": "263a853f33eb9724101ca87f12b914282dea9981",
"index": 1441,
"step-1": "<mask token>\n\n\nclass BannerAdmin(object):\n list_display = 'title', 'url', 'index'\n\n\nclass BaseSetting(object):\n enable_themes = True\n user_bootswatch = True\n\n\nclass GlobalSetting(object):\n site_title = '西游记'\n site_footer = '咨询在线'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass EmailVerifyRecordAdmin(object):\n pass\n\n\nclass BannerAdmin(object):\n list_display = 'title', 'url', 'index'\n\n\nclass BaseSetting(object):\n enable_themes = True\n user_bootswatch = True\n\n\nclass GlobalSetting(object):\n site_title = '西游记'\n site_footer = '咨询在线'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass EmailVerifyRecordAdmin(object):\n pass\n\n\nclass BannerAdmin(object):\n list_display = 'title', 'url', 'index'\n\n\nclass BaseSetting(object):\n enable_themes = True\n user_bootswatch = True\n\n\nclass GlobalSetting(object):\n site_title = '西游记'\n site_footer = '咨询在线'\n\n\nxadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)\nxadmin.site.register(Banner, BannerAdmin)\nxadmin.site.register(views.BaseAdminView, BaseSetting)\nxadmin.site.register(views.CommAdminView, GlobalSetting)\n",
"step-4": "import xadmin\nfrom .models import EmailVerifyRecord, Banner\nfrom xadmin import views\n\n\nclass EmailVerifyRecordAdmin(object):\n pass\n\n\nclass BannerAdmin(object):\n list_display = 'title', 'url', 'index'\n\n\nclass BaseSetting(object):\n enable_themes = True\n user_bootswatch = True\n\n\nclass GlobalSetting(object):\n site_title = '西游记'\n site_footer = '咨询在线'\n\n\nxadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)\nxadmin.site.register(Banner, BannerAdmin)\nxadmin.site.register(views.BaseAdminView, BaseSetting)\nxadmin.site.register(views.CommAdminView, GlobalSetting)\n",
"step-5": "import xadmin\nfrom .models import EmailVerifyRecord,Banner\nfrom xadmin import views\n\nclass EmailVerifyRecordAdmin(object):\n pass\n\n\nclass BannerAdmin(object):\n list_display=('title','url','index')\n\nclass BaseSetting(object):\n enable_themes=True\n user_bootswatch=True\n#设置xadmin页面标题和页脚\nclass GlobalSetting(object):\n site_title='西游记'\n site_footer='咨询在线'\nxadmin.site.register(EmailVerifyRecord,EmailVerifyRecordAdmin)\nxadmin.site.register(Banner,BannerAdmin)\nxadmin.site.register(views.BaseAdminView,BaseSetting)\nxadmin.site.register(views.CommAdminView,GlobalSetting)\n\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
# coding:utf-8
'''
对称二叉树
实现一个函数,用来判断一个二叉树是不是对称的
如果一颗二叉树和它的镜像是一样的,就是对称的
'''
class BinaryTreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def isSymmetryBonaryTree(self, pRoot):
if pRoot is None:
return False
pRoot.right, pRoot.left = pRoot.left, pRoot.right
self.isSymmetryBonaryTree(pRoot.left)
self.isSymmetryBonaryTree(pRoot.right)
return pRoot
if __name__ == '__main__':
# The first Binary Tree
pRoot1 = BinaryTreeNode(10)
pRoot2 = BinaryTreeNode(12)
pRoot3 = BinaryTreeNode(12)
pRoot1.left = pRoot2
pRoot1.right = pRoot3
# The second Binary Tree
pRoot4 = BinaryTreeNode(13)
pRoot5 = BinaryTreeNode(14)
pRoot6 = BinaryTreeNode(14)
pRoot7 = BinaryTreeNode(16)
pRoot8 = BinaryTreeNode(17)
pRoot9 = BinaryTreeNode(17)
pRoot10 = BinaryTreeNode(16)
pRoot4.left = pRoot5
pRoot4.right = pRoot6
pRoot5.left = pRoot7
pRoot5.right = pRoot8
pRoot6.left = pRoot9
pRoot6.right = pRoot10
# The third Binary Tree
pRootx1 = BinaryTreeNode(100)
pRootx2 = BinaryTreeNode(102)
pRootx3 = BinaryTreeNode(103)
pRootx1.left = pRootx2
pRootx1.right = pRootx3
s = Solution()
pRootCopy1 = s.isSymmetryBonaryTree(pRoot1)
print pRootCopy1 == pRoot1
pRootCopy4 = s.isSymmetryBonaryTree(pRoot4)
print pRootCopy4 == pRoot4
pRootCopyx1 = s.isSymmetryBonaryTree(pRootx1)
print pRootCopyx1 == pRootx1
|
normal
|
{
"blob_id": "c1a9c220b9100a927076753d6483ad7c069dea8c",
"index": 4271,
"step-1": "# coding:utf-8\n\n'''\n对称二叉树\n实现一个函数,用来判断一个二叉树是不是对称的\n如果一颗二叉树和它的镜像是一样的,就是对称的\n'''\n\n\nclass BinaryTreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n def isSymmetryBonaryTree(self, pRoot):\n if pRoot is None:\n return False\n pRoot.right, pRoot.left = pRoot.left, pRoot.right\n self.isSymmetryBonaryTree(pRoot.left)\n self.isSymmetryBonaryTree(pRoot.right)\n return pRoot\n\n\nif __name__ == '__main__':\n # The first Binary Tree\n pRoot1 = BinaryTreeNode(10)\n pRoot2 = BinaryTreeNode(12)\n pRoot3 = BinaryTreeNode(12)\n\n pRoot1.left = pRoot2\n pRoot1.right = pRoot3\n\n # The second Binary Tree\n pRoot4 = BinaryTreeNode(13)\n pRoot5 = BinaryTreeNode(14)\n pRoot6 = BinaryTreeNode(14)\n pRoot7 = BinaryTreeNode(16)\n pRoot8 = BinaryTreeNode(17)\n pRoot9 = BinaryTreeNode(17)\n pRoot10 = BinaryTreeNode(16)\n\n pRoot4.left = pRoot5\n pRoot4.right = pRoot6\n pRoot5.left = pRoot7\n pRoot5.right = pRoot8\n pRoot6.left = pRoot9\n pRoot6.right = pRoot10\n\n # The third Binary Tree\n pRootx1 = BinaryTreeNode(100)\n pRootx2 = BinaryTreeNode(102)\n pRootx3 = BinaryTreeNode(103)\n\n pRootx1.left = pRootx2\n pRootx1.right = pRootx3\n\n s = Solution()\n pRootCopy1 = s.isSymmetryBonaryTree(pRoot1)\n print pRootCopy1 == pRoot1\n\n pRootCopy4 = s.isSymmetryBonaryTree(pRoot4)\n print pRootCopy4 == pRoot4\n\n pRootCopyx1 = s.isSymmetryBonaryTree(pRootx1)\n print pRootCopyx1 == pRootx1\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#! /usr/bin/env python3
import common, os, shutil, sys
def main():
os.chdir(common.root)
shutil.rmtree('shared/target', ignore_errors = True)
shutil.rmtree('platform/build', ignore_errors = True)
shutil.rmtree('platform/target', ignore_errors = True)
shutil.rmtree('tests/target', ignore_errors = True)
shutil.rmtree('examples/lwjgl/target', ignore_errors = True)
shutil.rmtree('examples/kwinit/target', ignore_errors = True)
shutil.rmtree('examples/jwm/target', ignore_errors = True)
shutil.rmtree('examples/swt/target', ignore_errors = True)
return 0
if __name__ == '__main__':
sys.exit(main())
|
normal
|
{
"blob_id": "2305d0b7ec0d9e08e3f1c0cedaafa6ed60786e50",
"index": 7359,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n os.chdir(common.root)\n shutil.rmtree('shared/target', ignore_errors=True)\n shutil.rmtree('platform/build', ignore_errors=True)\n shutil.rmtree('platform/target', ignore_errors=True)\n shutil.rmtree('tests/target', ignore_errors=True)\n shutil.rmtree('examples/lwjgl/target', ignore_errors=True)\n shutil.rmtree('examples/kwinit/target', ignore_errors=True)\n shutil.rmtree('examples/jwm/target', ignore_errors=True)\n shutil.rmtree('examples/swt/target', ignore_errors=True)\n return 0\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n os.chdir(common.root)\n shutil.rmtree('shared/target', ignore_errors=True)\n shutil.rmtree('platform/build', ignore_errors=True)\n shutil.rmtree('platform/target', ignore_errors=True)\n shutil.rmtree('tests/target', ignore_errors=True)\n shutil.rmtree('examples/lwjgl/target', ignore_errors=True)\n shutil.rmtree('examples/kwinit/target', ignore_errors=True)\n shutil.rmtree('examples/jwm/target', ignore_errors=True)\n shutil.rmtree('examples/swt/target', ignore_errors=True)\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-4": "import common, os, shutil, sys\n\n\ndef main():\n os.chdir(common.root)\n shutil.rmtree('shared/target', ignore_errors=True)\n shutil.rmtree('platform/build', ignore_errors=True)\n shutil.rmtree('platform/target', ignore_errors=True)\n shutil.rmtree('tests/target', ignore_errors=True)\n shutil.rmtree('examples/lwjgl/target', ignore_errors=True)\n shutil.rmtree('examples/kwinit/target', ignore_errors=True)\n shutil.rmtree('examples/jwm/target', ignore_errors=True)\n shutil.rmtree('examples/swt/target', ignore_errors=True)\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-5": "#! /usr/bin/env python3\nimport common, os, shutil, sys\n\ndef main():\n os.chdir(common.root)\n shutil.rmtree('shared/target', ignore_errors = True)\n shutil.rmtree('platform/build', ignore_errors = True)\n shutil.rmtree('platform/target', ignore_errors = True)\n shutil.rmtree('tests/target', ignore_errors = True)\n shutil.rmtree('examples/lwjgl/target', ignore_errors = True)\n shutil.rmtree('examples/kwinit/target', ignore_errors = True)\n shutil.rmtree('examples/jwm/target', ignore_errors = True)\n shutil.rmtree('examples/swt/target', ignore_errors = True)\n\n return 0\n\nif __name__ == '__main__':\n sys.exit(main())",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
n_points = 100
n_sims = 1000
def simulate_one_realisation():
return np.random.normal(1, 2, size=n_points)
def infer(sample):
return {'mean': np.mean(sample), 'std': np.std(sample)}
inference = [infer(simulate_one_realisation()) for _ in range(n_sims)]
means = np.percentile([x['mean'] for x in inference], [25, 50, 75])
print(means)
plt.hist([x['mean'] for x in inference], bins=25)
plt.show()
standard_error = np.percentile([x['std'] for x in inference], [25, 50, 75])
print(standard_error)
plt.hist([x['std'] for x in inference], bins=25)
plt.show()
# The sample standard deviation and estimated standard error of the sample mean both have a slight right skew. But the skewness of the sample standard deviation is much more than the sample mean.
# The sample standar deviation histogram has many modes signifying a random distributionwhereas the sample mean histogram has a more uniform distribution shape with only one mode.
|
normal
|
{
"blob_id": "6e8ef901fc614ecbba25df01f84a43c429f25cf6",
"index": 4919,
"step-1": "<mask token>\n\n\ndef infer(sample):\n return {'mean': np.mean(sample), 'std': np.std(sample)}\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef simulate_one_realisation():\n return np.random.normal(1, 2, size=n_points)\n\n\ndef infer(sample):\n return {'mean': np.mean(sample), 'std': np.std(sample)}\n\n\n<mask token>\nprint(means)\nplt.hist([x['mean'] for x in inference], bins=25)\nplt.show()\n<mask token>\nprint(standard_error)\nplt.hist([x['std'] for x in inference], bins=25)\nplt.show()\n",
"step-3": "<mask token>\nn_points = 100\nn_sims = 1000\n\n\ndef simulate_one_realisation():\n return np.random.normal(1, 2, size=n_points)\n\n\ndef infer(sample):\n return {'mean': np.mean(sample), 'std': np.std(sample)}\n\n\ninference = [infer(simulate_one_realisation()) for _ in range(n_sims)]\nmeans = np.percentile([x['mean'] for x in inference], [25, 50, 75])\nprint(means)\nplt.hist([x['mean'] for x in inference], bins=25)\nplt.show()\nstandard_error = np.percentile([x['std'] for x in inference], [25, 50, 75])\nprint(standard_error)\nplt.hist([x['std'] for x in inference], bins=25)\nplt.show()\n",
"step-4": "import matplotlib.pyplot as plt\nimport numpy as np\nn_points = 100\nn_sims = 1000\n\n\ndef simulate_one_realisation():\n return np.random.normal(1, 2, size=n_points)\n\n\ndef infer(sample):\n return {'mean': np.mean(sample), 'std': np.std(sample)}\n\n\ninference = [infer(simulate_one_realisation()) for _ in range(n_sims)]\nmeans = np.percentile([x['mean'] for x in inference], [25, 50, 75])\nprint(means)\nplt.hist([x['mean'] for x in inference], bins=25)\nplt.show()\nstandard_error = np.percentile([x['std'] for x in inference], [25, 50, 75])\nprint(standard_error)\nplt.hist([x['std'] for x in inference], bins=25)\nplt.show()\n",
"step-5": "#!/usr/bin/env python\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nn_points = 100\nn_sims = 1000\n\ndef simulate_one_realisation():\n return np.random.normal(1, 2, size=n_points)\n\ndef infer(sample):\n return {'mean': np.mean(sample), 'std': np.std(sample)}\n\ninference = [infer(simulate_one_realisation()) for _ in range(n_sims)]\n\nmeans = np.percentile([x['mean'] for x in inference], [25, 50, 75])\nprint(means)\n\nplt.hist([x['mean'] for x in inference], bins=25)\nplt.show()\n\nstandard_error = np.percentile([x['std'] for x in inference], [25, 50, 75])\nprint(standard_error)\n\nplt.hist([x['std'] for x in inference], bins=25)\nplt.show()\n\n# The sample standard deviation and estimated standard error of the sample mean both have a slight right skew. But the skewness of the sample standard deviation is much more than the sample mean.\n# The sample standar deviation histogram has many modes signifying a random distributionwhereas the sample mean histogram has a more uniform distribution shape with only one mode. \n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from PIL import Image
from scipy.misc import imsave, imread
def plots(epochs, train_acc, test_acc, train_loss, test_loss, train_error, test_error,filename):
plt.style.use('bmh')
fig=plt.figure(figsize=(8,6))
plt.plot(epochs,train_acc, 'r', epochs,test_acc, 'g')
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train_acc', 'test_acc'], loc='upper left')
fig.savefig(filename + '_accuracy.png')
fig=plt.figure(figsize=(8,6))
plt.plot(epochs,train_loss, 'r', epochs,test_loss, 'g')
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train_loss', 'test_loss'], loc='upper left')
fig.savefig(filename + '_loss.png')
fig=plt.figure(figsize=(8,6))
plt.plot(epochs,train_error, 'r', epochs,test_error, 'g')
plt.title('model error rate')
plt.ylabel('error rate')
plt.xlabel('epoch')
plt.legend(['train_error', 'test_error'], loc='upper left')
fig.savefig(filename + '_error.png')
plt.close('all')
def write_csv(filename, train_acc,test_acc,train_loss,test_loss,train_error,test_error,epoch):
if epoch==0:
with open(filename, 'w') as f:
f.write('train_acc,test_acc,train_loss, test_loss, train_error, test_error\n')
f.write('{0},{1},{2},{3},{4},{5}\n'.format(train_acc[-1],\
test_acc[-1],\
train_loss[-1],\
test_loss[-1],\
train_error[-1],\
test_error[-1]))
else:
with open(filename, 'a') as f:
f.write('{0},{1},{2},{3},{4},{5}\n'.format(train_acc[-1],\
test_acc[-1],\
train_loss[-1],\
test_loss[-1],\
train_error[-1],\
test_error[-1]))
|
normal
|
{
"blob_id": "93150eb1c6746e2b1967eb5305fa526ae36968fd",
"index": 2003,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef write_csv(filename, train_acc, test_acc, train_loss, test_loss,\n train_error, test_error, epoch):\n if epoch == 0:\n with open(filename, 'w') as f:\n f.write(\n 'train_acc,test_acc,train_loss, test_loss, train_error, test_error\\n'\n )\n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\n test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1\n ], test_error[-1]))\n else:\n with open(filename, 'a') as f:\n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\n test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1\n ], test_error[-1]))\n",
"step-3": "<mask token>\n\n\ndef plots(epochs, train_acc, test_acc, train_loss, test_loss, train_error,\n test_error, filename):\n plt.style.use('bmh')\n fig = plt.figure(figsize=(8, 6))\n plt.plot(epochs, train_acc, 'r', epochs, test_acc, 'g')\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train_acc', 'test_acc'], loc='upper left')\n fig.savefig(filename + '_accuracy.png')\n fig = plt.figure(figsize=(8, 6))\n plt.plot(epochs, train_loss, 'r', epochs, test_loss, 'g')\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train_loss', 'test_loss'], loc='upper left')\n fig.savefig(filename + '_loss.png')\n fig = plt.figure(figsize=(8, 6))\n plt.plot(epochs, train_error, 'r', epochs, test_error, 'g')\n plt.title('model error rate')\n plt.ylabel('error rate')\n plt.xlabel('epoch')\n plt.legend(['train_error', 'test_error'], loc='upper left')\n fig.savefig(filename + '_error.png')\n plt.close('all')\n\n\ndef write_csv(filename, train_acc, test_acc, train_loss, test_loss,\n train_error, test_error, epoch):\n if epoch == 0:\n with open(filename, 'w') as f:\n f.write(\n 'train_acc,test_acc,train_loss, test_loss, train_error, test_error\\n'\n )\n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\n test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1\n ], test_error[-1]))\n else:\n with open(filename, 'a') as f:\n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\n test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1\n ], test_error[-1]))\n",
"step-4": "import matplotlib.pyplot as plt\nimport matplotlib\nimport numpy as np\nfrom PIL import Image\nfrom scipy.misc import imsave, imread\n\n\ndef plots(epochs, train_acc, test_acc, train_loss, test_loss, train_error,\n test_error, filename):\n plt.style.use('bmh')\n fig = plt.figure(figsize=(8, 6))\n plt.plot(epochs, train_acc, 'r', epochs, test_acc, 'g')\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train_acc', 'test_acc'], loc='upper left')\n fig.savefig(filename + '_accuracy.png')\n fig = plt.figure(figsize=(8, 6))\n plt.plot(epochs, train_loss, 'r', epochs, test_loss, 'g')\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train_loss', 'test_loss'], loc='upper left')\n fig.savefig(filename + '_loss.png')\n fig = plt.figure(figsize=(8, 6))\n plt.plot(epochs, train_error, 'r', epochs, test_error, 'g')\n plt.title('model error rate')\n plt.ylabel('error rate')\n plt.xlabel('epoch')\n plt.legend(['train_error', 'test_error'], loc='upper left')\n fig.savefig(filename + '_error.png')\n plt.close('all')\n\n\ndef write_csv(filename, train_acc, test_acc, train_loss, test_loss,\n train_error, test_error, epoch):\n if epoch == 0:\n with open(filename, 'w') as f:\n f.write(\n 'train_acc,test_acc,train_loss, test_loss, train_error, test_error\\n'\n )\n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\n test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1\n ], test_error[-1]))\n else:\n with open(filename, 'a') as f:\n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\n test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1\n ], test_error[-1]))\n",
"step-5": "import matplotlib.pyplot as plt\nimport matplotlib\nimport numpy as np\nfrom PIL import Image\nfrom scipy.misc import imsave, imread\n\n\ndef plots(epochs, train_acc, test_acc, train_loss, test_loss, train_error, test_error,filename):\n plt.style.use('bmh')\n\n fig=plt.figure(figsize=(8,6))\n plt.plot(epochs,train_acc, 'r', epochs,test_acc, 'g')\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train_acc', 'test_acc'], loc='upper left')\n fig.savefig(filename + '_accuracy.png')\n\n fig=plt.figure(figsize=(8,6))\n plt.plot(epochs,train_loss, 'r', epochs,test_loss, 'g')\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train_loss', 'test_loss'], loc='upper left')\n fig.savefig(filename + '_loss.png')\n \n fig=plt.figure(figsize=(8,6))\n plt.plot(epochs,train_error, 'r', epochs,test_error, 'g')\n plt.title('model error rate')\n plt.ylabel('error rate')\n plt.xlabel('epoch')\n plt.legend(['train_error', 'test_error'], loc='upper left')\n fig.savefig(filename + '_error.png')\n\n plt.close('all')\n\n\n\ndef write_csv(filename, train_acc,test_acc,train_loss,test_loss,train_error,test_error,epoch):\n if epoch==0:\n \n with open(filename, 'w') as f:\n f.write('train_acc,test_acc,train_loss, test_loss, train_error, test_error\\n') \n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\\\n test_acc[-1],\\\n train_loss[-1],\\\n test_loss[-1],\\\n train_error[-1],\\\n test_error[-1]))\n \n else:\n with open(filename, 'a') as f:\n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\\\n test_acc[-1],\\\n train_loss[-1],\\\n test_loss[-1],\\\n train_error[-1],\\\n test_error[-1]))\n \n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class TicketSetSchema(RebaseSchema):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TicketSetSchema(RebaseSchema):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@post_load
def make_ticket_set(self, data):
from rebase.models import TicketSet
return self._get_or_make_object(TicketSet, data)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TicketSetSchema(RebaseSchema):
id = fields.Integer()
bid_limits = SecureNestedField(BidLimitSchema, exclude=('ticket_set',),
only=('id', 'price', 'ticket_snapshot'), many=True)
auction = SecureNestedField('AuctionSchema', only=('id',))
nominations = SecureNestedField('NominationSchema', only=('contractor',
'ticket_set', 'job_fit', 'auction', 'hide'), many=True)
@post_load
def make_ticket_set(self, data):
from rebase.models import TicketSet
return self._get_or_make_object(TicketSet, data)
serializer = TicketSetSchema()
deserializer = TicketSetSchema(strict=True)
update_deserializer = TicketSetSchema(context={'raw': True})
<|reserved_special_token_1|>
from marshmallow import fields, post_load
from rebase.common.schema import RebaseSchema, SecureNestedField
from rebase.views.bid_limit import BidLimitSchema
class TicketSetSchema(RebaseSchema):
id = fields.Integer()
bid_limits = SecureNestedField(BidLimitSchema, exclude=('ticket_set',),
only=('id', 'price', 'ticket_snapshot'), many=True)
auction = SecureNestedField('AuctionSchema', only=('id',))
nominations = SecureNestedField('NominationSchema', only=('contractor',
'ticket_set', 'job_fit', 'auction', 'hide'), many=True)
@post_load
def make_ticket_set(self, data):
from rebase.models import TicketSet
return self._get_or_make_object(TicketSet, data)
serializer = TicketSetSchema()
deserializer = TicketSetSchema(strict=True)
update_deserializer = TicketSetSchema(context={'raw': True})
|
flexible
|
{
"blob_id": "5ebc4f61810f007fd345b52531f7f4318820b9c8",
"index": 6333,
"step-1": "<mask token>\n\n\nclass TicketSetSchema(RebaseSchema):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TicketSetSchema(RebaseSchema):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @post_load\n def make_ticket_set(self, data):\n from rebase.models import TicketSet\n return self._get_or_make_object(TicketSet, data)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TicketSetSchema(RebaseSchema):\n id = fields.Integer()\n bid_limits = SecureNestedField(BidLimitSchema, exclude=('ticket_set',),\n only=('id', 'price', 'ticket_snapshot'), many=True)\n auction = SecureNestedField('AuctionSchema', only=('id',))\n nominations = SecureNestedField('NominationSchema', only=('contractor',\n 'ticket_set', 'job_fit', 'auction', 'hide'), many=True)\n\n @post_load\n def make_ticket_set(self, data):\n from rebase.models import TicketSet\n return self._get_or_make_object(TicketSet, data)\n\n\nserializer = TicketSetSchema()\ndeserializer = TicketSetSchema(strict=True)\nupdate_deserializer = TicketSetSchema(context={'raw': True})\n",
"step-4": "from marshmallow import fields, post_load\nfrom rebase.common.schema import RebaseSchema, SecureNestedField\nfrom rebase.views.bid_limit import BidLimitSchema\n\n\nclass TicketSetSchema(RebaseSchema):\n id = fields.Integer()\n bid_limits = SecureNestedField(BidLimitSchema, exclude=('ticket_set',),\n only=('id', 'price', 'ticket_snapshot'), many=True)\n auction = SecureNestedField('AuctionSchema', only=('id',))\n nominations = SecureNestedField('NominationSchema', only=('contractor',\n 'ticket_set', 'job_fit', 'auction', 'hide'), many=True)\n\n @post_load\n def make_ticket_set(self, data):\n from rebase.models import TicketSet\n return self._get_or_make_object(TicketSet, data)\n\n\nserializer = TicketSetSchema()\ndeserializer = TicketSetSchema(strict=True)\nupdate_deserializer = TicketSetSchema(context={'raw': True})\n",
"step-5": null,
"step-ids": [
1,
2,
4,
5
]
}
|
[
1,
2,
4,
5
] |
import dlib
import cv2
import imageio
import torch
from PIL import Image
from model import AgeGenderModel
from mix_model import MixModel
from torchvision.transforms import transforms
from tqdm import tqdm
from retinaface.pre_trained_models import get_model
transform = transforms.Compose([
transforms.Resize((112, 112)),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
# Load model age gender
model = MixModel()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
ckpt = torch.load("outputs_w_free/model_epoch_50.pth")
model.load_state_dict(ckpt['model_state_dict'])
model.eval()
model.to(device)
model_face = get_model("resnet50_2020-07-20", max_size=512, device='cuda:1')
model_face.eval()
# load the detector
detector = dlib.get_frontal_face_detector()
FPS = 30
# read the video
out_video = imageio.get_writer("/home/cybercore/haimd/w_freeze_osaka.mp4", format='mp4', mode='I', fps=FPS)
video = imageio.get_reader("/home/cybercore/haimd/osaka.mp4")
for img in tqdm(video):
if img is not None:
# gray = cv2.cvtColor(src=img, code=cv2.COLOR_BGR2GRAY)
# faces = detector(gray)
annotation = model_face.predict_jsons(img)
max_thresh = annotation[0]['score']
bbox = annotation[0]['bbox']
if max_thresh > 0.3:
max_head_bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]
# for face in faces:
# print(face)
x1 = bbox[0]
y1 = bbox[1]
x2 = bbox[2]
y2 = bbox[3]
x1_face = bbox[0]-20
y1_face = bbox[1]-20
x2_face = bbox[2]+20
y2_face = bbox[3]+20
if x1_face > 0 and y1_face > 0:
img_face = img[y1_face:y2_face, x1_face:x2_face]
imageio.imwrite('face.jpg', img_face)
img_face = Image.fromarray(img_face)
img_face = transform(img_face)
img_face = torch.unsqueeze(img_face, 0)
img_face = img_face.to(device)
gen_pred, age_cls_pred, age_reg_pred = model(img_face)
_, gen_preds = torch.max(gen_pred, 1)
_, age_cls_pred = torch.max(age_cls_pred, 1)
if gen_preds.item() == 1:
text = f'M:{int(age_reg_pred.item()*100)}'
cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2), color=(255,0,0), thickness=4)
cv2.putText(img, text, org=(x1, y1), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1, color=(255, 0, 0), thickness=2, lineType=cv2.LINE_AA)
elif gen_preds.item() == 0:
text = f'F:{int(age_reg_pred.item()*100)}'
cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2), color=(0,0,255), thickness=4)
cv2.putText(img, text, org=(x1, y1), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1, color=(0, 0, 255), thickness=2, lineType=cv2.LINE_AA)
out_video.append_data(img)
out_video.close()
print('Done')
|
normal
|
{
"blob_id": "1cc14836808d70c1e53a9ca948a52776ebc89f4a",
"index": 4624,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmodel.load_state_dict(ckpt['model_state_dict'])\nmodel.eval()\nmodel.to(device)\n<mask token>\nmodel_face.eval()\n<mask token>\nfor img in tqdm(video):\n if img is not None:\n annotation = model_face.predict_jsons(img)\n max_thresh = annotation[0]['score']\n bbox = annotation[0]['bbox']\n if max_thresh > 0.3:\n max_head_bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]\n x1 = bbox[0]\n y1 = bbox[1]\n x2 = bbox[2]\n y2 = bbox[3]\n x1_face = bbox[0] - 20\n y1_face = bbox[1] - 20\n x2_face = bbox[2] + 20\n y2_face = bbox[3] + 20\n if x1_face > 0 and y1_face > 0:\n img_face = img[y1_face:y2_face, x1_face:x2_face]\n imageio.imwrite('face.jpg', img_face)\n img_face = Image.fromarray(img_face)\n img_face = transform(img_face)\n img_face = torch.unsqueeze(img_face, 0)\n img_face = img_face.to(device)\n gen_pred, age_cls_pred, age_reg_pred = model(img_face)\n _, gen_preds = torch.max(gen_pred, 1)\n _, age_cls_pred = torch.max(age_cls_pred, 1)\n if gen_preds.item() == 1:\n text = f'M:{int(age_reg_pred.item() * 100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),\n color=(255, 0, 0), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.\n FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255, 0, 0\n ), thickness=2, lineType=cv2.LINE_AA)\n elif gen_preds.item() == 0:\n text = f'F:{int(age_reg_pred.item() * 100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),\n color=(0, 0, 255), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.\n FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 0, 255\n ), thickness=2, lineType=cv2.LINE_AA)\n out_video.append_data(img)\nout_video.close()\nprint('Done')\n",
"step-3": "<mask token>\ntransform = transforms.Compose([transforms.Resize((112, 112)), transforms.\n ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, \n 0.1994, 0.201))])\nmodel = MixModel()\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nckpt = torch.load('outputs_w_free/model_epoch_50.pth')\nmodel.load_state_dict(ckpt['model_state_dict'])\nmodel.eval()\nmodel.to(device)\nmodel_face = get_model('resnet50_2020-07-20', max_size=512, device='cuda:1')\nmodel_face.eval()\ndetector = dlib.get_frontal_face_detector()\nFPS = 30\nout_video = imageio.get_writer('/home/cybercore/haimd/w_freeze_osaka.mp4',\n format='mp4', mode='I', fps=FPS)\nvideo = imageio.get_reader('/home/cybercore/haimd/osaka.mp4')\nfor img in tqdm(video):\n if img is not None:\n annotation = model_face.predict_jsons(img)\n max_thresh = annotation[0]['score']\n bbox = annotation[0]['bbox']\n if max_thresh > 0.3:\n max_head_bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]\n x1 = bbox[0]\n y1 = bbox[1]\n x2 = bbox[2]\n y2 = bbox[3]\n x1_face = bbox[0] - 20\n y1_face = bbox[1] - 20\n x2_face = bbox[2] + 20\n y2_face = bbox[3] + 20\n if x1_face > 0 and y1_face > 0:\n img_face = img[y1_face:y2_face, x1_face:x2_face]\n imageio.imwrite('face.jpg', img_face)\n img_face = Image.fromarray(img_face)\n img_face = transform(img_face)\n img_face = torch.unsqueeze(img_face, 0)\n img_face = img_face.to(device)\n gen_pred, age_cls_pred, age_reg_pred = model(img_face)\n _, gen_preds = torch.max(gen_pred, 1)\n _, age_cls_pred = torch.max(age_cls_pred, 1)\n if gen_preds.item() == 1:\n text = f'M:{int(age_reg_pred.item() * 100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),\n color=(255, 0, 0), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.\n FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255, 0, 0\n ), thickness=2, lineType=cv2.LINE_AA)\n elif gen_preds.item() == 0:\n text = f'F:{int(age_reg_pred.item() * 100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),\n color=(0, 0, 255), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.\n FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 0, 255\n ), thickness=2, lineType=cv2.LINE_AA)\n out_video.append_data(img)\nout_video.close()\nprint('Done')\n",
"step-4": "import dlib\nimport cv2\nimport imageio\nimport torch\nfrom PIL import Image\nfrom model import AgeGenderModel\nfrom mix_model import MixModel\nfrom torchvision.transforms import transforms\nfrom tqdm import tqdm\nfrom retinaface.pre_trained_models import get_model\ntransform = transforms.Compose([transforms.Resize((112, 112)), transforms.\n ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, \n 0.1994, 0.201))])\nmodel = MixModel()\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nckpt = torch.load('outputs_w_free/model_epoch_50.pth')\nmodel.load_state_dict(ckpt['model_state_dict'])\nmodel.eval()\nmodel.to(device)\nmodel_face = get_model('resnet50_2020-07-20', max_size=512, device='cuda:1')\nmodel_face.eval()\ndetector = dlib.get_frontal_face_detector()\nFPS = 30\nout_video = imageio.get_writer('/home/cybercore/haimd/w_freeze_osaka.mp4',\n format='mp4', mode='I', fps=FPS)\nvideo = imageio.get_reader('/home/cybercore/haimd/osaka.mp4')\nfor img in tqdm(video):\n if img is not None:\n annotation = model_face.predict_jsons(img)\n max_thresh = annotation[0]['score']\n bbox = annotation[0]['bbox']\n if max_thresh > 0.3:\n max_head_bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]\n x1 = bbox[0]\n y1 = bbox[1]\n x2 = bbox[2]\n y2 = bbox[3]\n x1_face = bbox[0] - 20\n y1_face = bbox[1] - 20\n x2_face = bbox[2] + 20\n y2_face = bbox[3] + 20\n if x1_face > 0 and y1_face > 0:\n img_face = img[y1_face:y2_face, x1_face:x2_face]\n imageio.imwrite('face.jpg', img_face)\n img_face = Image.fromarray(img_face)\n img_face = transform(img_face)\n img_face = torch.unsqueeze(img_face, 0)\n img_face = img_face.to(device)\n gen_pred, age_cls_pred, age_reg_pred = model(img_face)\n _, gen_preds = torch.max(gen_pred, 1)\n _, age_cls_pred = torch.max(age_cls_pred, 1)\n if gen_preds.item() == 1:\n text = f'M:{int(age_reg_pred.item() * 100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),\n color=(255, 0, 0), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.\n FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255, 0, 0\n ), thickness=2, lineType=cv2.LINE_AA)\n elif gen_preds.item() == 0:\n text = f'F:{int(age_reg_pred.item() * 100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),\n color=(0, 0, 255), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.\n FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 0, 255\n ), thickness=2, lineType=cv2.LINE_AA)\n out_video.append_data(img)\nout_video.close()\nprint('Done')\n",
"step-5": "import dlib\nimport cv2\nimport imageio\nimport torch\nfrom PIL import Image \nfrom model import AgeGenderModel\nfrom mix_model import MixModel\nfrom torchvision.transforms import transforms\nfrom tqdm import tqdm\nfrom retinaface.pre_trained_models import get_model\n\n\ntransform = transforms.Compose([\n transforms.Resize((112, 112)),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465),\n (0.2023, 0.1994, 0.2010)),\n])\n\n# Load model age gender\nmodel = MixModel()\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nckpt = torch.load(\"outputs_w_free/model_epoch_50.pth\")\n\nmodel.load_state_dict(ckpt['model_state_dict'])\nmodel.eval()\nmodel.to(device)\n\nmodel_face = get_model(\"resnet50_2020-07-20\", max_size=512, device='cuda:1')\nmodel_face.eval()\n\n# load the detector\ndetector = dlib.get_frontal_face_detector()\nFPS = 30\n# read the video\nout_video = imageio.get_writer(\"/home/cybercore/haimd/w_freeze_osaka.mp4\", format='mp4', mode='I', fps=FPS)\nvideo = imageio.get_reader(\"/home/cybercore/haimd/osaka.mp4\")\nfor img in tqdm(video):\n if img is not None:\n # gray = cv2.cvtColor(src=img, code=cv2.COLOR_BGR2GRAY)\n \n # faces = detector(gray)\n \n annotation = model_face.predict_jsons(img)\n max_thresh = annotation[0]['score']\n bbox = annotation[0]['bbox']\n if max_thresh > 0.3:\n max_head_bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]\n \n \n # for face in faces:\n # print(face)\n x1 = bbox[0]\n y1 = bbox[1]\n x2 = bbox[2]\n y2 = bbox[3]\n \n x1_face = bbox[0]-20\n y1_face = bbox[1]-20\n x2_face = bbox[2]+20\n y2_face = bbox[3]+20\n if x1_face > 0 and y1_face > 0:\n \n img_face = img[y1_face:y2_face, x1_face:x2_face]\n \n imageio.imwrite('face.jpg', img_face)\n img_face = Image.fromarray(img_face)\n img_face = transform(img_face)\n\n img_face = torch.unsqueeze(img_face, 0)\n img_face = img_face.to(device) \n\n gen_pred, age_cls_pred, age_reg_pred = model(img_face)\n _, gen_preds = torch.max(gen_pred, 1)\n _, age_cls_pred = torch.max(age_cls_pred, 1)\n\n if gen_preds.item() == 1:\n text = f'M:{int(age_reg_pred.item()*100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2), color=(255,0,0), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=1, color=(255, 0, 0), thickness=2, lineType=cv2.LINE_AA)\n elif gen_preds.item() == 0:\n text = f'F:{int(age_reg_pred.item()*100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2), color=(0,0,255), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=1, color=(0, 0, 255), thickness=2, lineType=cv2.LINE_AA)\n out_video.append_data(img)\nout_video.close()\nprint('Done')\n \n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
'''
THROW with or without parameters
Which of the following is true about the THROW statement?
Answer the question
50XP
Possible Answers
- The THROW statement without parameters should be placed within a CATCH block.
- The THROW statement with parameters can only be placed within a CATCH block.
- The THROW statement without parameters can't re-throw an original error.
Answer : The THROW statement without parameters should be placed within a CATCH block.
'''
|
flexible
|
{
"blob_id": "75023c7600fcceda0dc225992e7c433291b1a190",
"index": 7254,
"step-1": "<mask token>\n",
"step-2": "'''\nTHROW with or without parameters\n\n\nWhich of the following is true about the THROW statement?\n\nAnswer the question\n50XP\n\nPossible Answers\n\n - The THROW statement without parameters should be placed within a CATCH block.\n\n - The THROW statement with parameters can only be placed within a CATCH block.\n\n - The THROW statement without parameters can't re-throw an original error.\n\nAnswer : The THROW statement without parameters should be placed within a CATCH block.\n\n'''\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.